4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
41 def _InitSSHSetup(node):
42 """Setup the SSH configuration for the cluster.
45 This generates a dsa keypair for root, adds the pub key to the
46 permitted hosts and adds the hostkey to its own known hosts.
49 node: the name of this host as a fqdn
52 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
54 for name in priv_key, pub_key:
55 if os.path.exists(name):
56 utils.CreateBackup(name)
57 utils.RemoveFile(name)
59 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
63 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
66 f = open(pub_key, 'r')
68 utils.AddAuthorizedKey(auth_keys, f.read(8192))
73 def _InitGanetiServerSetup():
74 """Setup the necessary configuration for the initial node daemon.
76 This creates the nodepass file containing the shared password for
77 the cluster and also generates the SSL certificate.
80 # Create pseudo random password
81 randpass = utils.GenerateSecret()
83 # and write it into the config file
84 utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
85 data="%s\n" % randpass, mode=0400)
87 result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
88 "-days", str(365*5), "-nodes", "-x509",
89 "-keyout", constants.SSL_CERT_FILE,
90 "-out", constants.SSL_CERT_FILE, "-batch"])
92 raise errors.OpExecError("could not generate server ssl cert, command"
93 " %s had exitcode %s and error message %s" %
94 (result.cmd, result.exit_code, result.output))
96 os.chmod(constants.SSL_CERT_FILE, 0400)
98 result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
101 raise errors.OpExecError("Could not start the node daemon, command %s"
102 " had exitcode %s and error %s" %
103 (result.cmd, result.exit_code, result.output))
106 def InitCluster(cluster_name, mac_prefix, def_bridge,
107 master_netdev, file_storage_dir,
109 vg_name=None, beparams=None, hvparams=None,
110 enabled_hypervisors=None, default_hypervisor=None):
111 """Initialise the cluster.
114 if config.ConfigWriter.IsCluster():
115 raise errors.OpPrereqError("Cluster is already initialised")
117 hostname = utils.HostInfo()
119 if hostname.ip.startswith("127."):
120 raise errors.OpPrereqError("This host's IP resolves to the private"
121 " range (%s). Please fix DNS or %s." %
122 (hostname.ip, constants.ETC_HOSTS))
124 if not utils.OwnIpAddress(hostname.ip):
125 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
126 " to %s,\nbut this ip address does not"
127 " belong to this host."
128 " Aborting." % hostname.ip)
130 clustername = utils.HostInfo(cluster_name)
132 if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
134 raise errors.OpPrereqError("Cluster IP already active. Aborting.")
137 if not utils.IsValidIP(secondary_ip):
138 raise errors.OpPrereqError("Invalid secondary ip given")
139 if (secondary_ip != hostname.ip and
140 not utils.OwnIpAddress(secondary_ip)):
141 raise errors.OpPrereqError("You gave %s as secondary IP,"
142 " but it does not belong to this host." %
145 secondary_ip = hostname.ip
147 if vg_name is not None:
148 # Check if volume group is valid
149 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
150 constants.MIN_VG_SIZE)
152 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
153 " you are not using lvm" % vgstatus)
155 file_storage_dir = os.path.normpath(file_storage_dir)
157 if not os.path.isabs(file_storage_dir):
158 raise errors.OpPrereqError("The file storage directory you passed is"
159 " not an absolute path.")
161 if not os.path.exists(file_storage_dir):
163 os.makedirs(file_storage_dir, 0750)
165 raise errors.OpPrereqError("Cannot create file storage directory"
167 (file_storage_dir, err))
169 if not os.path.isdir(file_storage_dir):
170 raise errors.OpPrereqError("The file storage directory '%s' is not"
171 " a directory." % file_storage_dir)
173 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
174 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
176 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
178 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
180 result.output.strip()))
182 if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
183 os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
184 raise errors.OpPrereqError("Init.d script '%s' missing or not"
185 " executable." % constants.NODE_INITD_SCRIPT)
187 # set up the inter-node password and certificate
188 _InitGanetiServerSetup()
190 # set up ssh config and /etc/hosts
191 f = open(constants.SSH_HOST_RSA_PUB, 'r')
196 sshkey = sshline.split(" ")[1]
198 utils.AddHostToEtcHosts(hostname.name)
199 _InitSSHSetup(hostname.name)
201 # init of cluster config file
202 cluster_config = objects.Cluster(
204 rsahostkeypub=sshkey,
205 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
206 mac_prefix=mac_prefix,
207 volume_group_name=vg_name,
208 default_bridge=def_bridge,
209 tcpudp_port_pool=set(),
210 master_node=hostname.name,
211 master_ip=clustername.ip,
212 master_netdev=master_netdev,
213 cluster_name=clustername.name,
214 file_storage_dir=file_storage_dir,
215 enabled_hypervisors=enabled_hypervisors,
216 default_hypervisor=default_hypervisor,
217 beparams={constants.BEGR_DEFAULT: beparams},
220 master_node_config = objects.Node(name=hostname.name,
221 primary_ip=hostname.ip,
222 secondary_ip=secondary_ip)
224 cfg = InitConfig(constants.CONFIG_VERSION,
225 cluster_config, master_node_config)
226 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
228 # start the master ip
229 # TODO: Review rpc call from bootstrap
230 rpc.RpcRunner.call_node_start_master(hostname.name, True)
233 def InitConfig(version, cluster_config, master_node_config,
234 cfg_file=constants.CLUSTER_CONF_FILE):
235 """Create the initial cluster configuration.
237 It will contain the current node, which will also be the master
238 node, and no instances.
241 @param version: Configuration version
242 @type cluster_config: objects.Cluster
243 @param cluster_config: Cluster configuration
244 @type master_node_config: objects.Node
245 @param master_node_config: Master node configuration
246 @type file_name: string
247 @param file_name: Configuration file path
249 @rtype: ssconf.SimpleConfigWriter
250 @returns: Initialized config instance
254 master_node_config.name: master_node_config,
257 config_data = objects.ConfigData(version=version,
258 cluster=cluster_config,
262 cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
268 def FinalizeClusterDestroy(master):
269 """Execute the last steps of cluster destroy
271 This function shuts down all the daemons, completing the destroy
272 begun in cmdlib.LUDestroyOpcode.
275 if not rpc.RpcRunner.call_node_stop_master(master, True):
276 logging.warning("Could not disable the master role")
277 if not rpc.RpcRunner.call_node_leave_cluster(master):
278 logging.warning("Could not shutdown the node daemon and cleanup the node")
281 def SetupNodeDaemon(node, ssh_key_check):
282 """Add a node to the cluster.
284 This function must be called before the actual opcode, and will ssh
285 to the remote node, copy the needed files, and start ganeti-noded,
286 allowing the master to do the rest via normal rpc calls.
289 node: fully qualified domain name for the new node
292 cfg = ssconf.SimpleConfigReader()
293 sshrunner = ssh.SshRunner(cfg.GetClusterName())
294 gntpass = utils.GetNodeDaemonPassword()
295 if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
296 raise errors.OpExecError("ganeti password corruption detected")
297 gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
298 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
299 # so we use this to detect an invalid certificate; as long as the
300 # cert doesn't contain this, the here-document will be correctly
301 # parsed by the shell sequence below
302 if re.search('^!EOF\.', gntpem, re.MULTILINE):
303 raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
304 if not gntpem.endswith("\n"):
305 raise errors.OpExecError("PEM must end with newline")
307 # set up inter-node password and certificate and restarts the node daemon
308 # and then connect with ssh to set password and start ganeti-noded
309 # note that all the below variables are sanitized at this point,
310 # either by being constants or by the checks above
311 mycommand = ("umask 077 && "
312 "echo '%s' > '%s' && "
313 "cat > '%s' << '!EOF.' && \n"
314 "%s!EOF.\n%s restart" %
315 (gntpass, constants.CLUSTER_PASSWORD_FILE,
316 constants.SSL_CERT_FILE, gntpem,
317 constants.NODE_INITD_SCRIPT))
319 result = sshrunner.Run(node, 'root', mycommand, batch=False,
320 ask_key=ssh_key_check,
321 use_cluster_key=False,
322 strict_host_check=ssh_key_check)
324 raise errors.OpExecError("Remote command on node %s, error: %s,"
326 (node, result.fail_reason, result.output))
331 def MasterFailover():
332 """Failover the master node.
334 This checks that we are not already the master, and will cause the
335 current master to cease being master, and the non-master to become
339 cfg = ssconf.SimpleConfigWriter()
341 new_master = utils.HostInfo().name
342 old_master = cfg.GetMasterNode()
343 node_list = cfg.GetNodeList()
345 if old_master == new_master:
346 raise errors.OpPrereqError("This commands must be run on the node"
347 " where you want the new master to be."
348 " %s is already the master" %
351 vote_list = GatherMasterVotes(node_list)
354 voted_master = vote_list[0][0]
355 if voted_master is None:
356 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
358 elif voted_master != old_master:
359 raise errors.OpPrereqError("I have wrong configuration, I believe the"
360 " master is %s but the other nodes voted for"
361 " %s. Please resync the configuration of"
362 " this node." % (old_master, voted_master))
367 logging.info("Setting master to %s, old master: %s", new_master, old_master)
369 if not rpc.RpcRunner.call_node_stop_master(old_master, True):
370 logging.error("Could not disable the master role on the old master"
371 " %s, please disable manually", old_master)
373 cfg.SetMasterNode(new_master)
376 # Here we have a phase where no master should be running
378 if not rpc.RpcRunner.call_upload_file(cfg.GetNodeList(),
379 constants.CLUSTER_CONF_FILE):
380 logging.error("Could not distribute the new configuration"
381 " to the other nodes, please check.")
384 if not rpc.RpcRunner.call_node_start_master(new_master, True):
385 logging.error("Could not start the master role on the new master"
386 " %s, please check", new_master)
392 def GatherMasterVotes(node_list):
393 """Check the agreement on who is the master.
395 This function will return a list of (node, number of votes), ordered
396 by the number of votes. Errors will be denoted by the key 'None'.
398 Note that the sum of votes is the number of nodes this machine
399 knows, whereas the number of entries in the list could be different
400 (if some nodes vote for another master).
402 We remove ourselves from the list since we know that (bugs aside)
403 since we use the same source for configuration information for both
404 backend and boostrap, we'll always vote for ourselves.
406 @type node_list: list
407 @param node_list: the list of nodes to query for master info; the current
408 node wil be removed if it is in the list
410 @return: list of (node, votes)
413 myself = utils.HostInfo().name
415 node_list.remove(myself)
419 # no nodes left (eventually after removing myself)
421 results = rpc.RpcRunner.call_master_info(node_list)
422 if not isinstance(results, dict):
423 # this should not happen (unless internal error in rpc)
424 logging.critical("Can't complete rpc call, aborting master startup")
425 return [(None, len(node_list))]
426 positive = negative = 0
430 if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
431 # here the rpc layer should have already logged errors
432 if None not in votes:
436 master_node = results[node][2]
437 if master_node not in votes:
438 votes[master_node] = 0
439 votes[master_node] += 1
441 vote_list = [v for v in votes.items()]
442 # sort first on number of votes then on name, since we want None
443 # sorted later if we have the half of the nodes not responding, and
444 # half voting all for the same master
445 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)