4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
41 def _InitSSHSetup(node):
42 """Setup the SSH configuration for the cluster.
44 This generates a dsa keypair for root, adds the pub key to the
45 permitted hosts and adds the hostkey to its own known hosts.
47 @param node: the name of this host as an FQDN
50 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
52 for name in priv_key, pub_key:
53 if os.path.exists(name):
54 utils.CreateBackup(name)
55 utils.RemoveFile(name)
57 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
61 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
64 f = open(pub_key, 'r')
66 utils.AddAuthorizedKey(auth_keys, f.read(8192))
71 def _InitGanetiServerSetup():
72 """Setup the necessary configuration for the initial node daemon.
74 This creates the nodepass file containing the shared password for
75 the cluster and also generates the SSL certificate.
78 result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
79 "-days", str(365*5), "-nodes", "-x509",
80 "-keyout", constants.SSL_CERT_FILE,
81 "-out", constants.SSL_CERT_FILE, "-batch"])
83 raise errors.OpExecError("could not generate server ssl cert, command"
84 " %s had exitcode %s and error message %s" %
85 (result.cmd, result.exit_code, result.output))
87 os.chmod(constants.SSL_CERT_FILE, 0400)
89 result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
92 raise errors.OpExecError("Could not start the node daemon, command %s"
93 " had exitcode %s and error %s" %
94 (result.cmd, result.exit_code, result.output))
97 def InitCluster(cluster_name, mac_prefix, def_bridge,
98 master_netdev, file_storage_dir, candidate_pool_size,
99 secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
100 enabled_hypervisors=None, default_hypervisor=None):
101 """Initialise the cluster.
103 @type candidate_pool_size: int
104 @param candidate_pool_size: master candidate pool size
107 # TODO: complete the docstring
108 if config.ConfigWriter.IsCluster():
109 raise errors.OpPrereqError("Cluster is already initialised")
111 hostname = utils.HostInfo()
113 if hostname.ip.startswith("127."):
114 raise errors.OpPrereqError("This host's IP resolves to the private"
115 " range (%s). Please fix DNS or %s." %
116 (hostname.ip, constants.ETC_HOSTS))
118 if not utils.OwnIpAddress(hostname.ip):
119 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
120 " to %s,\nbut this ip address does not"
121 " belong to this host."
122 " Aborting." % hostname.ip)
124 clustername = utils.HostInfo(cluster_name)
126 if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
128 raise errors.OpPrereqError("Cluster IP already active. Aborting.")
131 if not utils.IsValidIP(secondary_ip):
132 raise errors.OpPrereqError("Invalid secondary ip given")
133 if (secondary_ip != hostname.ip and
134 not utils.OwnIpAddress(secondary_ip)):
135 raise errors.OpPrereqError("You gave %s as secondary IP,"
136 " but it does not belong to this host." %
139 secondary_ip = hostname.ip
141 if vg_name is not None:
142 # Check if volume group is valid
143 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
144 constants.MIN_VG_SIZE)
146 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
147 " you are not using lvm" % vgstatus)
149 file_storage_dir = os.path.normpath(file_storage_dir)
151 if not os.path.isabs(file_storage_dir):
152 raise errors.OpPrereqError("The file storage directory you passed is"
153 " not an absolute path.")
155 if not os.path.exists(file_storage_dir):
157 os.makedirs(file_storage_dir, 0750)
159 raise errors.OpPrereqError("Cannot create file storage directory"
161 (file_storage_dir, err))
163 if not os.path.isdir(file_storage_dir):
164 raise errors.OpPrereqError("The file storage directory '%s' is not"
165 " a directory." % file_storage_dir)
167 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
168 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
170 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
172 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
174 result.output.strip()))
176 if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
177 os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
178 raise errors.OpPrereqError("Init.d script '%s' missing or not"
179 " executable." % constants.NODE_INITD_SCRIPT)
181 utils.CheckBEParams(beparams)
183 # set up the inter-node password and certificate
184 _InitGanetiServerSetup()
186 # set up ssh config and /etc/hosts
187 f = open(constants.SSH_HOST_RSA_PUB, 'r')
192 sshkey = sshline.split(" ")[1]
194 utils.AddHostToEtcHosts(hostname.name)
195 _InitSSHSetup(hostname.name)
197 # init of cluster config file
198 cluster_config = objects.Cluster(
200 rsahostkeypub=sshkey,
201 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
202 mac_prefix=mac_prefix,
203 volume_group_name=vg_name,
204 default_bridge=def_bridge,
205 tcpudp_port_pool=set(),
206 master_node=hostname.name,
207 master_ip=clustername.ip,
208 master_netdev=master_netdev,
209 cluster_name=clustername.name,
210 file_storage_dir=file_storage_dir,
211 enabled_hypervisors=enabled_hypervisors,
212 default_hypervisor=default_hypervisor,
213 beparams={constants.BEGR_DEFAULT: beparams},
215 candidate_pool_size=candidate_pool_size,
217 master_node_config = objects.Node(name=hostname.name,
218 primary_ip=hostname.ip,
219 secondary_ip=secondary_ip,
221 master_candidate=True,
225 sscfg = InitConfig(constants.CONFIG_VERSION,
226 cluster_config, master_node_config)
227 ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
228 cfg = config.ConfigWriter()
229 cfg.Update(cfg.GetClusterInfo())
231 # start the master ip
232 # TODO: Review rpc call from bootstrap
233 rpc.RpcRunner.call_node_start_master(hostname.name, True)
236 def InitConfig(version, cluster_config, master_node_config,
237 cfg_file=constants.CLUSTER_CONF_FILE):
238 """Create the initial cluster configuration.
240 It will contain the current node, which will also be the master
241 node, and no instances.
244 @param version: configuration version
245 @type cluster_config: L{objects.Cluster}
246 @param cluster_config: cluster configuration
247 @type master_node_config: L{objects.Node}
248 @param master_node_config: master node configuration
249 @type cfg_file: string
250 @param cfg_file: configuration file path
252 @rtype: L{ssconf.SimpleConfigWriter}
253 @returns: initialized config instance
257 master_node_config.name: master_node_config,
260 config_data = objects.ConfigData(version=version,
261 cluster=cluster_config,
265 cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
271 def FinalizeClusterDestroy(master):
272 """Execute the last steps of cluster destroy
274 This function shuts down all the daemons, completing the destroy
275 begun in cmdlib.LUDestroyOpcode.
278 result = rpc.RpcRunner.call_node_stop_master(master, True)
279 if result.failed or not result.data:
280 logging.warning("Could not disable the master role")
281 result = rpc.RpcRunner.call_node_leave_cluster(master)
282 if result.failed or not result.data:
283 logging.warning("Could not shutdown the node daemon and cleanup the node")
286 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
287 """Add a node to the cluster.
289 This function must be called before the actual opcode, and will ssh
290 to the remote node, copy the needed files, and start ganeti-noded,
291 allowing the master to do the rest via normal rpc calls.
293 @param cluster_name: the cluster name
294 @param node: the name of the new node
295 @param ssh_key_check: whether to do a strict key check
298 sshrunner = ssh.SshRunner(cluster_name)
299 gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
300 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
301 # so we use this to detect an invalid certificate; as long as the
302 # cert doesn't contain this, the here-document will be correctly
303 # parsed by the shell sequence below
304 if re.search('^!EOF\.', gntpem, re.MULTILINE):
305 raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
306 if not gntpem.endswith("\n"):
307 raise errors.OpExecError("PEM must end with newline")
309 # set up inter-node password and certificate and restarts the node daemon
310 # and then connect with ssh to set password and start ganeti-noded
311 # note that all the below variables are sanitized at this point,
312 # either by being constants or by the checks above
313 mycommand = ("umask 077 && "
314 "cat > '%s' << '!EOF.' && \n"
315 "%s!EOF.\n%s restart" %
316 (constants.SSL_CERT_FILE, gntpem,
317 constants.NODE_INITD_SCRIPT))
319 result = sshrunner.Run(node, 'root', mycommand, batch=False,
320 ask_key=ssh_key_check,
321 use_cluster_key=False,
322 strict_host_check=ssh_key_check)
324 raise errors.OpExecError("Remote command on node %s, error: %s,"
326 (node, result.fail_reason, result.output))
329 def MasterFailover():
330 """Failover the master node.
332 This checks that we are not already the master, and will cause the
333 current master to cease being master, and the non-master to become
337 sstore = ssconf.SimpleStore()
339 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
340 node_list = sstore.GetNodeList()
341 mc_list = sstore.GetMasterCandidates()
343 if old_master == new_master:
344 raise errors.OpPrereqError("This commands must be run on the node"
345 " where you want the new master to be."
346 " %s is already the master" %
349 if new_master not in mc_list:
350 mc_no_master = [name for name in mc_list if name != old_master]
351 raise errors.OpPrereqError("This node is not among the nodes marked"
352 " as master candidates. Only these nodes"
353 " can become masters. Current list of"
354 " master candidates is:\n"
355 "%s" % ('\n'.join(mc_no_master)))
357 vote_list = GatherMasterVotes(node_list)
360 voted_master = vote_list[0][0]
361 if voted_master is None:
362 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
364 elif voted_master != old_master:
365 raise errors.OpPrereqError("I have wrong configuration, I believe the"
366 " master is %s but the other nodes voted for"
367 " %s. Please resync the configuration of"
368 " this node." % (old_master, voted_master))
373 logging.info("Setting master to %s, old master: %s", new_master, old_master)
375 result = rpc.RpcRunner.call_node_stop_master(old_master, True)
376 if result.failed or not result.data:
377 logging.error("Could not disable the master role on the old master"
378 " %s, please disable manually", old_master)
380 # Here we have a phase where no master should be running
382 # instantiate a real config writer, as we now know we have the
384 cfg = config.ConfigWriter()
386 cluster_info = cfg.GetClusterInfo()
387 cluster_info.master_node = new_master
388 # this will also regenerate the ssconf files, since we updated the
390 cfg.Update(cluster_info)
392 result = rpc.RpcRunner.call_node_start_master(new_master, True)
393 if result.failed or not result.data:
394 logging.error("Could not start the master role on the new master"
395 " %s, please check", new_master)
401 def GatherMasterVotes(node_list):
402 """Check the agreement on who is the master.
404 This function will return a list of (node, number of votes), ordered
405 by the number of votes. Errors will be denoted by the key 'None'.
407 Note that the sum of votes is the number of nodes this machine
408 knows, whereas the number of entries in the list could be different
409 (if some nodes vote for another master).
411 We remove ourselves from the list since we know that (bugs aside)
412 since we use the same source for configuration information for both
413 backend and boostrap, we'll always vote for ourselves.
415 @type node_list: list
416 @param node_list: the list of nodes to query for master info; the current
417 node wil be removed if it is in the list
419 @return: list of (node, votes)
422 myself = utils.HostInfo().name
424 node_list.remove(myself)
428 # no nodes left (eventually after removing myself)
430 results = rpc.RpcRunner.call_master_info(node_list)
431 if not isinstance(results, dict):
432 # this should not happen (unless internal error in rpc)
433 logging.critical("Can't complete rpc call, aborting master startup")
434 return [(None, len(node_list))]
435 positive = negative = 0
441 if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
442 # here the rpc layer should have already logged errors
443 if None not in votes:
447 master_node = data[2]
448 if master_node not in votes:
449 votes[master_node] = 0
450 votes[master_node] += 1
452 vote_list = [v for v in votes.items()]
453 # sort first on number of votes then on name, since we want None
454 # sorted later if we have the half of the nodes not responding, and
455 # half voting all for the same master
456 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)