root / lib / bootstrap.py @ 952d7515
History | View | Annotate | Download (27.2 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2010 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Functions to bootstrap a new cluster.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import os |
27 |
import os.path |
28 |
import re |
29 |
import logging |
30 |
import time |
31 |
|
32 |
from ganeti import rpc |
33 |
from ganeti import ssh |
34 |
from ganeti import utils |
35 |
from ganeti import errors |
36 |
from ganeti import config |
37 |
from ganeti import constants |
38 |
from ganeti import objects |
39 |
from ganeti import ssconf |
40 |
from ganeti import serializer |
41 |
from ganeti import hypervisor |
42 |
from ganeti import bdev |
43 |
from ganeti import netutils |
44 |
from ganeti import backend |
45 |
|
46 |
# ec_id for InitConfig's temporary reservation manager
|
47 |
_INITCONF_ECID = "initconfig-ecid"
|
48 |
|
49 |
|
50 |
def _InitSSHSetup(): |
51 |
"""Setup the SSH configuration for the cluster.
|
52 |
|
53 |
This generates a dsa keypair for root, adds the pub key to the
|
54 |
permitted hosts and adds the hostkey to its own known hosts.
|
55 |
|
56 |
"""
|
57 |
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) |
58 |
|
59 |
for name in priv_key, pub_key: |
60 |
if os.path.exists(name):
|
61 |
utils.CreateBackup(name) |
62 |
utils.RemoveFile(name) |
63 |
|
64 |
result = utils.RunCmd(["ssh-keygen", "-t", "dsa", |
65 |
"-f", priv_key,
|
66 |
"-q", "-N", ""]) |
67 |
if result.failed:
|
68 |
raise errors.OpExecError("Could not generate ssh keypair, error %s" % |
69 |
result.output) |
70 |
|
71 |
utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key)) |
72 |
|
73 |
|
74 |
def GenerateHmacKey(file_name): |
75 |
"""Writes a new HMAC key.
|
76 |
|
77 |
@type file_name: str
|
78 |
@param file_name: Path to output file
|
79 |
|
80 |
"""
|
81 |
utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400, |
82 |
backup=True)
|
83 |
|
84 |
|
85 |
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key, |
86 |
new_cds, rapi_cert_pem=None, cds=None, |
87 |
nodecert_file=constants.NODED_CERT_FILE, |
88 |
rapicert_file=constants.RAPI_CERT_FILE, |
89 |
hmackey_file=constants.CONFD_HMAC_KEY, |
90 |
cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE): |
91 |
"""Updates the cluster certificates, keys and secrets.
|
92 |
|
93 |
@type new_cluster_cert: bool
|
94 |
@param new_cluster_cert: Whether to generate a new cluster certificate
|
95 |
@type new_rapi_cert: bool
|
96 |
@param new_rapi_cert: Whether to generate a new RAPI certificate
|
97 |
@type new_confd_hmac_key: bool
|
98 |
@param new_confd_hmac_key: Whether to generate a new HMAC key
|
99 |
@type new_cds: bool
|
100 |
@param new_cds: Whether to generate a new cluster domain secret
|
101 |
@type rapi_cert_pem: string
|
102 |
@param rapi_cert_pem: New RAPI certificate in PEM format
|
103 |
@type cds: string
|
104 |
@param cds: New cluster domain secret
|
105 |
@type nodecert_file: string
|
106 |
@param nodecert_file: optional override of the node cert file path
|
107 |
@type rapicert_file: string
|
108 |
@param rapicert_file: optional override of the rapi cert file path
|
109 |
@type hmackey_file: string
|
110 |
@param hmackey_file: optional override of the hmac key file path
|
111 |
|
112 |
"""
|
113 |
# noded SSL certificate
|
114 |
cluster_cert_exists = os.path.exists(nodecert_file) |
115 |
if new_cluster_cert or not cluster_cert_exists: |
116 |
if cluster_cert_exists:
|
117 |
utils.CreateBackup(nodecert_file) |
118 |
|
119 |
logging.debug("Generating new cluster certificate at %s", nodecert_file)
|
120 |
utils.GenerateSelfSignedSslCert(nodecert_file) |
121 |
|
122 |
# confd HMAC key
|
123 |
if new_confd_hmac_key or not os.path.exists(hmackey_file): |
124 |
logging.debug("Writing new confd HMAC key to %s", hmackey_file)
|
125 |
GenerateHmacKey(hmackey_file) |
126 |
|
127 |
# RAPI
|
128 |
rapi_cert_exists = os.path.exists(rapicert_file) |
129 |
|
130 |
if rapi_cert_pem:
|
131 |
# Assume rapi_pem contains a valid PEM-formatted certificate and key
|
132 |
logging.debug("Writing RAPI certificate at %s", rapicert_file)
|
133 |
utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
|
134 |
|
135 |
elif new_rapi_cert or not rapi_cert_exists: |
136 |
if rapi_cert_exists:
|
137 |
utils.CreateBackup(rapicert_file) |
138 |
|
139 |
logging.debug("Generating new RAPI certificate at %s", rapicert_file)
|
140 |
utils.GenerateSelfSignedSslCert(rapicert_file) |
141 |
|
142 |
# Cluster domain secret
|
143 |
if cds:
|
144 |
logging.debug("Writing cluster domain secret to %s", cds_file)
|
145 |
utils.WriteFile(cds_file, data=cds, backup=True)
|
146 |
|
147 |
elif new_cds or not os.path.exists(cds_file): |
148 |
logging.debug("Generating new cluster domain secret at %s", cds_file)
|
149 |
GenerateHmacKey(cds_file) |
150 |
|
151 |
|
152 |
def _InitGanetiServerSetup(master_name): |
153 |
"""Setup the necessary configuration for the initial node daemon.
|
154 |
|
155 |
This creates the nodepass file containing the shared password for
|
156 |
the cluster, generates the SSL certificate and starts the node daemon.
|
157 |
|
158 |
@type master_name: str
|
159 |
@param master_name: Name of the master node
|
160 |
|
161 |
"""
|
162 |
# Generate cluster secrets
|
163 |
GenerateClusterCrypto(True, False, False, False) |
164 |
|
165 |
result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
|
166 |
if result.failed:
|
167 |
raise errors.OpExecError("Could not start the node daemon, command %s" |
168 |
" had exitcode %s and error %s" %
|
169 |
(result.cmd, result.exit_code, result.output)) |
170 |
|
171 |
_WaitForNodeDaemon(master_name) |
172 |
|
173 |
|
174 |
def _WaitForNodeDaemon(node_name): |
175 |
"""Wait for node daemon to become responsive.
|
176 |
|
177 |
"""
|
178 |
def _CheckNodeDaemon(): |
179 |
result = rpc.RpcRunner.call_version([node_name])[node_name] |
180 |
if result.fail_msg:
|
181 |
raise utils.RetryAgain()
|
182 |
|
183 |
try:
|
184 |
utils.Retry(_CheckNodeDaemon, 1.0, 10.0) |
185 |
except utils.RetryTimeout:
|
186 |
raise errors.OpExecError("Node daemon on %s didn't answer queries within" |
187 |
" 10 seconds" % node_name)
|
188 |
|
189 |
|
190 |
def _InitFileStorage(file_storage_dir): |
191 |
"""Initialize if needed the file storage.
|
192 |
|
193 |
@param file_storage_dir: the user-supplied value
|
194 |
@return: either empty string (if file storage was disabled at build
|
195 |
time) or the normalized path to the storage directory
|
196 |
|
197 |
"""
|
198 |
if not constants.ENABLE_FILE_STORAGE: |
199 |
return "" |
200 |
|
201 |
file_storage_dir = os.path.normpath(file_storage_dir) |
202 |
|
203 |
if not os.path.isabs(file_storage_dir): |
204 |
raise errors.OpPrereqError("The file storage directory you passed is" |
205 |
" not an absolute path.", errors.ECODE_INVAL)
|
206 |
|
207 |
if not os.path.exists(file_storage_dir): |
208 |
try:
|
209 |
os.makedirs(file_storage_dir, 0750)
|
210 |
except OSError, err: |
211 |
raise errors.OpPrereqError("Cannot create file storage directory" |
212 |
" '%s': %s" % (file_storage_dir, err),
|
213 |
errors.ECODE_ENVIRON) |
214 |
|
215 |
if not os.path.isdir(file_storage_dir): |
216 |
raise errors.OpPrereqError("The file storage directory '%s' is not" |
217 |
" a directory." % file_storage_dir,
|
218 |
errors.ECODE_ENVIRON) |
219 |
return file_storage_dir
|
220 |
|
221 |
|
222 |
def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913 |
223 |
master_netdev, file_storage_dir, candidate_pool_size, |
224 |
secondary_ip=None, vg_name=None, beparams=None, |
225 |
nicparams=None, hvparams=None, enabled_hypervisors=None, |
226 |
modify_etc_hosts=True, modify_ssh_setup=True, |
227 |
maintain_node_health=False, drbd_helper=None, |
228 |
uid_pool=None, default_iallocator=None, |
229 |
primary_ip_version=None, prealloc_wipe_disks=False): |
230 |
"""Initialise the cluster.
|
231 |
|
232 |
@type candidate_pool_size: int
|
233 |
@param candidate_pool_size: master candidate pool size
|
234 |
|
235 |
"""
|
236 |
# TODO: complete the docstring
|
237 |
if config.ConfigWriter.IsCluster():
|
238 |
raise errors.OpPrereqError("Cluster is already initialised", |
239 |
errors.ECODE_STATE) |
240 |
|
241 |
if not enabled_hypervisors: |
242 |
raise errors.OpPrereqError("Enabled hypervisors list must contain at" |
243 |
" least one member", errors.ECODE_INVAL)
|
244 |
invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
|
245 |
if invalid_hvs:
|
246 |
raise errors.OpPrereqError("Enabled hypervisors contains invalid" |
247 |
" entries: %s" % invalid_hvs,
|
248 |
errors.ECODE_INVAL) |
249 |
|
250 |
|
251 |
ipcls = None
|
252 |
if primary_ip_version == constants.IP4_VERSION:
|
253 |
ipcls = netutils.IP4Address |
254 |
elif primary_ip_version == constants.IP6_VERSION:
|
255 |
ipcls = netutils.IP6Address |
256 |
else:
|
257 |
raise errors.OpPrereqError("Invalid primary ip version: %d." % |
258 |
primary_ip_version) |
259 |
|
260 |
hostname = netutils.GetHostname(family=ipcls.family) |
261 |
if not ipcls.IsValid(hostname.ip): |
262 |
raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d" |
263 |
" address." % (hostname.ip, primary_ip_version))
|
264 |
|
265 |
if ipcls.IsLoopback(hostname.ip):
|
266 |
raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback" |
267 |
" address. Please fix DNS or %s." %
|
268 |
(hostname.ip, constants.ETC_HOSTS), |
269 |
errors.ECODE_ENVIRON) |
270 |
|
271 |
if not ipcls.Own(hostname.ip): |
272 |
raise errors.OpPrereqError("Inconsistency: this host's name resolves" |
273 |
" to %s,\nbut this ip address does not"
|
274 |
" belong to this host" %
|
275 |
hostname.ip, errors.ECODE_ENVIRON) |
276 |
|
277 |
clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) |
278 |
|
279 |
if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): |
280 |
raise errors.OpPrereqError("Cluster IP already active", |
281 |
errors.ECODE_NOTUNIQUE) |
282 |
|
283 |
if not secondary_ip: |
284 |
if primary_ip_version == constants.IP6_VERSION:
|
285 |
raise errors.OpPrereqError("When using a IPv6 primary address, a valid" |
286 |
" IPv4 address must be given as secondary",
|
287 |
errors.ECODE_INVAL) |
288 |
secondary_ip = hostname.ip |
289 |
|
290 |
if not netutils.IP4Address.IsValid(secondary_ip): |
291 |
raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid" |
292 |
" IPv4 address." % secondary_ip,
|
293 |
errors.ECODE_INVAL) |
294 |
|
295 |
if not netutils.IP4Address.Own(secondary_ip): |
296 |
raise errors.OpPrereqError("You gave %s as secondary IP," |
297 |
" but it does not belong to this host." %
|
298 |
secondary_ip, errors.ECODE_ENVIRON) |
299 |
|
300 |
if vg_name is not None: |
301 |
# Check if volume group is valid
|
302 |
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, |
303 |
constants.MIN_VG_SIZE) |
304 |
if vgstatus:
|
305 |
raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if" |
306 |
" you are not using lvm" % vgstatus,
|
307 |
errors.ECODE_INVAL) |
308 |
|
309 |
if drbd_helper is not None: |
310 |
try:
|
311 |
curr_helper = bdev.BaseDRBD.GetUsermodeHelper() |
312 |
except errors.BlockDeviceError, err:
|
313 |
raise errors.OpPrereqError("Error while checking drbd helper" |
314 |
" (specify --no-drbd-storage if you are not"
|
315 |
" using drbd): %s" % str(err), |
316 |
errors.ECODE_ENVIRON) |
317 |
if drbd_helper != curr_helper:
|
318 |
raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s" |
319 |
" is the current helper" % (drbd_helper,
|
320 |
curr_helper), |
321 |
errors.ECODE_INVAL) |
322 |
|
323 |
file_storage_dir = _InitFileStorage(file_storage_dir) |
324 |
|
325 |
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): |
326 |
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix, |
327 |
errors.ECODE_INVAL) |
328 |
|
329 |
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) |
330 |
if result.failed:
|
331 |
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % |
332 |
(master_netdev, |
333 |
result.output.strip()), errors.ECODE_INVAL) |
334 |
|
335 |
dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)] |
336 |
utils.EnsureDirs(dirs) |
337 |
|
338 |
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) |
339 |
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) |
340 |
objects.NIC.CheckParameterSyntax(nicparams) |
341 |
|
342 |
# hvparams is a mapping of hypervisor->hvparams dict
|
343 |
for hv_name, hv_params in hvparams.iteritems(): |
344 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
345 |
hv_class = hypervisor.GetHypervisor(hv_name) |
346 |
hv_class.CheckParameterSyntax(hv_params) |
347 |
|
348 |
# set up ssh config and /etc/hosts
|
349 |
sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB) |
350 |
sshkey = sshline.split(" ")[1] |
351 |
|
352 |
if modify_etc_hosts:
|
353 |
utils.AddHostToEtcHosts(hostname.name, hostname.ip) |
354 |
|
355 |
if modify_ssh_setup:
|
356 |
_InitSSHSetup() |
357 |
|
358 |
if default_iallocator is not None: |
359 |
alloc_script = utils.FindFile(default_iallocator, |
360 |
constants.IALLOCATOR_SEARCH_PATH, |
361 |
os.path.isfile) |
362 |
if alloc_script is None: |
363 |
raise errors.OpPrereqError("Invalid default iallocator script '%s'" |
364 |
" specified" % default_iallocator,
|
365 |
errors.ECODE_INVAL) |
366 |
|
367 |
now = time.time() |
368 |
|
369 |
# init of cluster config file
|
370 |
cluster_config = objects.Cluster( |
371 |
serial_no=1,
|
372 |
rsahostkeypub=sshkey, |
373 |
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
|
374 |
mac_prefix=mac_prefix, |
375 |
volume_group_name=vg_name, |
376 |
tcpudp_port_pool=set(),
|
377 |
master_node=hostname.name, |
378 |
master_ip=clustername.ip, |
379 |
master_netdev=master_netdev, |
380 |
cluster_name=clustername.name, |
381 |
file_storage_dir=file_storage_dir, |
382 |
enabled_hypervisors=enabled_hypervisors, |
383 |
beparams={constants.PP_DEFAULT: beparams}, |
384 |
nicparams={constants.PP_DEFAULT: nicparams}, |
385 |
hvparams=hvparams, |
386 |
candidate_pool_size=candidate_pool_size, |
387 |
modify_etc_hosts=modify_etc_hosts, |
388 |
modify_ssh_setup=modify_ssh_setup, |
389 |
uid_pool=uid_pool, |
390 |
ctime=now, |
391 |
mtime=now, |
392 |
maintain_node_health=maintain_node_health, |
393 |
drbd_usermode_helper=drbd_helper, |
394 |
default_iallocator=default_iallocator, |
395 |
primary_ip_family=ipcls.family, |
396 |
prealloc_wipe_disks=prealloc_wipe_disks, |
397 |
) |
398 |
master_node_config = objects.Node(name=hostname.name, |
399 |
primary_ip=hostname.ip, |
400 |
secondary_ip=secondary_ip, |
401 |
serial_no=1,
|
402 |
master_candidate=True,
|
403 |
offline=False, drained=False, |
404 |
) |
405 |
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) |
406 |
cfg = config.ConfigWriter(offline=True)
|
407 |
ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE) |
408 |
cfg.Update(cfg.GetClusterInfo(), logging.error) |
409 |
backend.WriteSsconfFiles(cfg.GetSsconfValues()) |
410 |
|
411 |
# set up the inter-node password and certificate
|
412 |
_InitGanetiServerSetup(hostname.name) |
413 |
|
414 |
logging.debug("Starting daemons")
|
415 |
result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
|
416 |
if result.failed:
|
417 |
raise errors.OpExecError("Could not start daemons, command %s" |
418 |
" had exitcode %s and error %s" %
|
419 |
(result.cmd, result.exit_code, result.output)) |
420 |
|
421 |
|
422 |
def InitConfig(version, cluster_config, master_node_config, |
423 |
cfg_file=constants.CLUSTER_CONF_FILE): |
424 |
"""Create the initial cluster configuration.
|
425 |
|
426 |
It will contain the current node, which will also be the master
|
427 |
node, and no instances.
|
428 |
|
429 |
@type version: int
|
430 |
@param version: configuration version
|
431 |
@type cluster_config: L{objects.Cluster}
|
432 |
@param cluster_config: cluster configuration
|
433 |
@type master_node_config: L{objects.Node}
|
434 |
@param master_node_config: master node configuration
|
435 |
@type cfg_file: string
|
436 |
@param cfg_file: configuration file path
|
437 |
|
438 |
"""
|
439 |
uuid_generator = config.TemporaryReservationManager() |
440 |
cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID, |
441 |
_INITCONF_ECID) |
442 |
master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID, |
443 |
_INITCONF_ECID) |
444 |
nodes = { |
445 |
master_node_config.name: master_node_config, |
446 |
} |
447 |
default_nodegroup = objects.NodeGroup( |
448 |
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), |
449 |
name="default",
|
450 |
members=[master_node_config.name], |
451 |
) |
452 |
nodegroups = { |
453 |
default_nodegroup.uuid: default_nodegroup, |
454 |
} |
455 |
now = time.time() |
456 |
config_data = objects.ConfigData(version=version, |
457 |
cluster=cluster_config, |
458 |
nodegroups=nodegroups, |
459 |
nodes=nodes, |
460 |
instances={}, |
461 |
serial_no=1,
|
462 |
ctime=now, mtime=now) |
463 |
utils.WriteFile(cfg_file, |
464 |
data=serializer.Dump(config_data.ToDict()), |
465 |
mode=0600)
|
466 |
|
467 |
|
468 |
def FinalizeClusterDestroy(master): |
469 |
"""Execute the last steps of cluster destroy
|
470 |
|
471 |
This function shuts down all the daemons, completing the destroy
|
472 |
begun in cmdlib.LUDestroyOpcode.
|
473 |
|
474 |
"""
|
475 |
cfg = config.ConfigWriter() |
476 |
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup |
477 |
result = rpc.RpcRunner.call_node_stop_master(master, True)
|
478 |
msg = result.fail_msg |
479 |
if msg:
|
480 |
logging.warning("Could not disable the master role: %s", msg)
|
481 |
result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup) |
482 |
msg = result.fail_msg |
483 |
if msg:
|
484 |
logging.warning("Could not shutdown the node daemon and cleanup"
|
485 |
" the node: %s", msg)
|
486 |
|
487 |
|
488 |
def SetupNodeDaemon(cluster_name, node, ssh_key_check): |
489 |
"""Add a node to the cluster.
|
490 |
|
491 |
This function must be called before the actual opcode, and will ssh
|
492 |
to the remote node, copy the needed files, and start ganeti-noded,
|
493 |
allowing the master to do the rest via normal rpc calls.
|
494 |
|
495 |
@param cluster_name: the cluster name
|
496 |
@param node: the name of the new node
|
497 |
@param ssh_key_check: whether to do a strict key check
|
498 |
|
499 |
"""
|
500 |
family = ssconf.SimpleStore().GetPrimaryIPFamily() |
501 |
sshrunner = ssh.SshRunner(cluster_name, |
502 |
ipv6=family==netutils.IP6Address.family) |
503 |
|
504 |
noded_cert = utils.ReadFile(constants.NODED_CERT_FILE) |
505 |
rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE) |
506 |
confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY) |
507 |
|
508 |
# in the base64 pem encoding, neither '!' nor '.' are valid chars,
|
509 |
# so we use this to detect an invalid certificate; as long as the
|
510 |
# cert doesn't contain this, the here-document will be correctly
|
511 |
# parsed by the shell sequence below. HMAC keys are hexadecimal strings,
|
512 |
# so the same restrictions apply.
|
513 |
for content in (noded_cert, rapi_cert, confd_hmac_key): |
514 |
if re.search('^!EOF\.', content, re.MULTILINE): |
515 |
raise errors.OpExecError("invalid SSL certificate or HMAC key") |
516 |
|
517 |
if not noded_cert.endswith("\n"): |
518 |
noded_cert += "\n"
|
519 |
if not rapi_cert.endswith("\n"): |
520 |
rapi_cert += "\n"
|
521 |
if not confd_hmac_key.endswith("\n"): |
522 |
confd_hmac_key += "\n"
|
523 |
|
524 |
bind_address = constants.IP4_ADDRESS_ANY |
525 |
if family == netutils.IP6Address.family:
|
526 |
bind_address = constants.IP6_ADDRESS_ANY |
527 |
|
528 |
# set up inter-node password and certificate and restarts the node daemon
|
529 |
# and then connect with ssh to set password and start ganeti-noded
|
530 |
# note that all the below variables are sanitized at this point,
|
531 |
# either by being constants or by the checks above
|
532 |
sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE) |
533 |
sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE) |
534 |
sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY) |
535 |
mycommand = ("%s stop-all; %s start %s -b '%s'" % (constants.DAEMON_UTIL,
|
536 |
constants.DAEMON_UTIL, |
537 |
constants.NODED, |
538 |
bind_address)) |
539 |
|
540 |
result = sshrunner.Run(node, 'root', mycommand, batch=False, |
541 |
ask_key=ssh_key_check, |
542 |
use_cluster_key=True,
|
543 |
strict_host_check=ssh_key_check) |
544 |
if result.failed:
|
545 |
raise errors.OpExecError("Remote command on node %s, error: %s," |
546 |
" output: %s" %
|
547 |
(node, result.fail_reason, result.output)) |
548 |
|
549 |
_WaitForNodeDaemon(node) |
550 |
|
551 |
|
552 |
def MasterFailover(no_voting=False): |
553 |
"""Failover the master node.
|
554 |
|
555 |
This checks that we are not already the master, and will cause the
|
556 |
current master to cease being master, and the non-master to become
|
557 |
new master.
|
558 |
|
559 |
@type no_voting: boolean
|
560 |
@param no_voting: force the operation without remote nodes agreement
|
561 |
(dangerous)
|
562 |
|
563 |
"""
|
564 |
sstore = ssconf.SimpleStore() |
565 |
|
566 |
old_master, new_master = ssconf.GetMasterAndMyself(sstore) |
567 |
node_list = sstore.GetNodeList() |
568 |
mc_list = sstore.GetMasterCandidates() |
569 |
|
570 |
if old_master == new_master:
|
571 |
raise errors.OpPrereqError("This commands must be run on the node" |
572 |
" where you want the new master to be."
|
573 |
" %s is already the master" %
|
574 |
old_master, errors.ECODE_INVAL) |
575 |
|
576 |
if new_master not in mc_list: |
577 |
mc_no_master = [name for name in mc_list if name != old_master] |
578 |
raise errors.OpPrereqError("This node is not among the nodes marked" |
579 |
" as master candidates. Only these nodes"
|
580 |
" can become masters. Current list of"
|
581 |
" master candidates is:\n"
|
582 |
"%s" % ('\n'.join(mc_no_master)), |
583 |
errors.ECODE_STATE) |
584 |
|
585 |
if not no_voting: |
586 |
vote_list = GatherMasterVotes(node_list) |
587 |
|
588 |
if vote_list:
|
589 |
voted_master = vote_list[0][0] |
590 |
if voted_master is None: |
591 |
raise errors.OpPrereqError("Cluster is inconsistent, most nodes did" |
592 |
" not respond.", errors.ECODE_ENVIRON)
|
593 |
elif voted_master != old_master:
|
594 |
raise errors.OpPrereqError("I have a wrong configuration, I believe" |
595 |
" the master is %s but the other nodes"
|
596 |
" voted %s. Please resync the configuration"
|
597 |
" of this node." %
|
598 |
(old_master, voted_master), |
599 |
errors.ECODE_STATE) |
600 |
# end checks
|
601 |
|
602 |
rcode = 0
|
603 |
|
604 |
logging.info("Setting master to %s, old master: %s", new_master, old_master)
|
605 |
|
606 |
try:
|
607 |
# instantiate a real config writer, as we now know we have the
|
608 |
# configuration data
|
609 |
cfg = config.ConfigWriter(accept_foreign=True)
|
610 |
|
611 |
cluster_info = cfg.GetClusterInfo() |
612 |
cluster_info.master_node = new_master |
613 |
# this will also regenerate the ssconf files, since we updated the
|
614 |
# cluster info
|
615 |
cfg.Update(cluster_info, logging.error) |
616 |
except errors.ConfigurationError, err:
|
617 |
logging.error("Error while trying to set the new master: %s",
|
618 |
str(err))
|
619 |
return 1 |
620 |
|
621 |
# if cfg.Update worked, then it means the old master daemon won't be
|
622 |
# able now to write its own config file (we rely on locking in both
|
623 |
# backend.UploadFile() and ConfigWriter._Write(); hence the next
|
624 |
# step is to kill the old master
|
625 |
|
626 |
logging.info("Stopping the master daemon on node %s", old_master)
|
627 |
|
628 |
result = rpc.RpcRunner.call_node_stop_master(old_master, True)
|
629 |
msg = result.fail_msg |
630 |
if msg:
|
631 |
logging.error("Could not disable the master role on the old master"
|
632 |
" %s, please disable manually: %s", old_master, msg)
|
633 |
|
634 |
logging.info("Checking master IP non-reachability...")
|
635 |
|
636 |
master_ip = sstore.GetMasterIP() |
637 |
total_timeout = 30
|
638 |
# Here we have a phase where no master should be running
|
639 |
def _check_ip(): |
640 |
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
|
641 |
raise utils.RetryAgain()
|
642 |
|
643 |
try:
|
644 |
utils.Retry(_check_ip, (1, 1.5, 5), total_timeout) |
645 |
except utils.RetryTimeout:
|
646 |
logging.warning("The master IP is still reachable after %s seconds,"
|
647 |
" continuing but activating the master on the current"
|
648 |
" node will probably fail", total_timeout)
|
649 |
|
650 |
logging.info("Starting the master daemons on the new master")
|
651 |
|
652 |
result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
|
653 |
msg = result.fail_msg |
654 |
if msg:
|
655 |
logging.error("Could not start the master role on the new master"
|
656 |
" %s, please check: %s", new_master, msg)
|
657 |
rcode = 1
|
658 |
|
659 |
logging.info("Master failed over from %s to %s", old_master, new_master)
|
660 |
return rcode
|
661 |
|
662 |
|
663 |
def GetMaster(): |
664 |
"""Returns the current master node.
|
665 |
|
666 |
This is a separate function in bootstrap since it's needed by
|
667 |
gnt-cluster, and instead of importing directly ssconf, it's better
|
668 |
to abstract it in bootstrap, where we do use ssconf in other
|
669 |
functions too.
|
670 |
|
671 |
"""
|
672 |
sstore = ssconf.SimpleStore() |
673 |
|
674 |
old_master, _ = ssconf.GetMasterAndMyself(sstore) |
675 |
|
676 |
return old_master
|
677 |
|
678 |
|
679 |
def GatherMasterVotes(node_list): |
680 |
"""Check the agreement on who is the master.
|
681 |
|
682 |
This function will return a list of (node, number of votes), ordered
|
683 |
by the number of votes. Errors will be denoted by the key 'None'.
|
684 |
|
685 |
Note that the sum of votes is the number of nodes this machine
|
686 |
knows, whereas the number of entries in the list could be different
|
687 |
(if some nodes vote for another master).
|
688 |
|
689 |
We remove ourselves from the list since we know that (bugs aside)
|
690 |
since we use the same source for configuration information for both
|
691 |
backend and boostrap, we'll always vote for ourselves.
|
692 |
|
693 |
@type node_list: list
|
694 |
@param node_list: the list of nodes to query for master info; the current
|
695 |
node will be removed if it is in the list
|
696 |
@rtype: list
|
697 |
@return: list of (node, votes)
|
698 |
|
699 |
"""
|
700 |
myself = netutils.Hostname.GetSysName() |
701 |
try:
|
702 |
node_list.remove(myself) |
703 |
except ValueError: |
704 |
pass
|
705 |
if not node_list: |
706 |
# no nodes left (eventually after removing myself)
|
707 |
return []
|
708 |
results = rpc.RpcRunner.call_master_info(node_list) |
709 |
if not isinstance(results, dict): |
710 |
# this should not happen (unless internal error in rpc)
|
711 |
logging.critical("Can't complete rpc call, aborting master startup")
|
712 |
return [(None, len(node_list))] |
713 |
votes = {} |
714 |
for node in results: |
715 |
nres = results[node] |
716 |
data = nres.payload |
717 |
msg = nres.fail_msg |
718 |
fail = False
|
719 |
if msg:
|
720 |
logging.warning("Error contacting node %s: %s", node, msg)
|
721 |
fail = True
|
722 |
# for now we accept both length 3 and 4 (data[3] is primary ip version)
|
723 |
elif not isinstance(data, (tuple, list)) or len(data) < 3: |
724 |
logging.warning("Invalid data received from node %s: %s", node, data)
|
725 |
fail = True
|
726 |
if fail:
|
727 |
if None not in votes: |
728 |
votes[None] = 0 |
729 |
votes[None] += 1 |
730 |
continue
|
731 |
master_node = data[2]
|
732 |
if master_node not in votes: |
733 |
votes[master_node] = 0
|
734 |
votes[master_node] += 1
|
735 |
|
736 |
vote_list = [v for v in votes.items()] |
737 |
# sort first on number of votes then on name, since we want None
|
738 |
# sorted later if we have the half of the nodes not responding, and
|
739 |
# half voting all for the same master
|
740 |
vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True) |
741 |
|
742 |
return vote_list
|