4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
41 def _InitSSHSetup(node):
42 """Setup the SSH configuration for the cluster.
45 This generates a dsa keypair for root, adds the pub key to the
46 permitted hosts and adds the hostkey to its own known hosts.
49 node: the name of this host as a fqdn
52 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
54 for name in priv_key, pub_key:
55 if os.path.exists(name):
56 utils.CreateBackup(name)
57 utils.RemoveFile(name)
59 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
63 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
66 f = open(pub_key, 'r')
68 utils.AddAuthorizedKey(auth_keys, f.read(8192))
73 def _InitGanetiServerSetup():
74 """Setup the necessary configuration for the initial node daemon.
76 This creates the nodepass file containing the shared password for
77 the cluster and also generates the SSL certificate.
80 result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
81 "-days", str(365*5), "-nodes", "-x509",
82 "-keyout", constants.SSL_CERT_FILE,
83 "-out", constants.SSL_CERT_FILE, "-batch"])
85 raise errors.OpExecError("could not generate server ssl cert, command"
86 " %s had exitcode %s and error message %s" %
87 (result.cmd, result.exit_code, result.output))
89 os.chmod(constants.SSL_CERT_FILE, 0400)
91 result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
94 raise errors.OpExecError("Could not start the node daemon, command %s"
95 " had exitcode %s and error %s" %
96 (result.cmd, result.exit_code, result.output))
99 def InitCluster(cluster_name, mac_prefix, def_bridge,
100 master_netdev, file_storage_dir,
102 vg_name=None, beparams=None, hvparams=None,
103 enabled_hypervisors=None, default_hypervisor=None):
104 """Initialise the cluster.
107 if config.ConfigWriter.IsCluster():
108 raise errors.OpPrereqError("Cluster is already initialised")
110 hostname = utils.HostInfo()
112 if hostname.ip.startswith("127."):
113 raise errors.OpPrereqError("This host's IP resolves to the private"
114 " range (%s). Please fix DNS or %s." %
115 (hostname.ip, constants.ETC_HOSTS))
117 if not utils.OwnIpAddress(hostname.ip):
118 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
119 " to %s,\nbut this ip address does not"
120 " belong to this host."
121 " Aborting." % hostname.ip)
123 clustername = utils.HostInfo(cluster_name)
125 if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
127 raise errors.OpPrereqError("Cluster IP already active. Aborting.")
130 if not utils.IsValidIP(secondary_ip):
131 raise errors.OpPrereqError("Invalid secondary ip given")
132 if (secondary_ip != hostname.ip and
133 not utils.OwnIpAddress(secondary_ip)):
134 raise errors.OpPrereqError("You gave %s as secondary IP,"
135 " but it does not belong to this host." %
138 secondary_ip = hostname.ip
140 if vg_name is not None:
141 # Check if volume group is valid
142 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
143 constants.MIN_VG_SIZE)
145 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
146 " you are not using lvm" % vgstatus)
148 file_storage_dir = os.path.normpath(file_storage_dir)
150 if not os.path.isabs(file_storage_dir):
151 raise errors.OpPrereqError("The file storage directory you passed is"
152 " not an absolute path.")
154 if not os.path.exists(file_storage_dir):
156 os.makedirs(file_storage_dir, 0750)
158 raise errors.OpPrereqError("Cannot create file storage directory"
160 (file_storage_dir, err))
162 if not os.path.isdir(file_storage_dir):
163 raise errors.OpPrereqError("The file storage directory '%s' is not"
164 " a directory." % file_storage_dir)
166 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
167 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
169 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
171 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
173 result.output.strip()))
175 if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
176 os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
177 raise errors.OpPrereqError("Init.d script '%s' missing or not"
178 " executable." % constants.NODE_INITD_SCRIPT)
180 # set up the inter-node password and certificate
181 _InitGanetiServerSetup()
183 # set up ssh config and /etc/hosts
184 f = open(constants.SSH_HOST_RSA_PUB, 'r')
189 sshkey = sshline.split(" ")[1]
191 utils.AddHostToEtcHosts(hostname.name)
192 _InitSSHSetup(hostname.name)
194 # init of cluster config file
195 cluster_config = objects.Cluster(
197 rsahostkeypub=sshkey,
198 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
199 mac_prefix=mac_prefix,
200 volume_group_name=vg_name,
201 default_bridge=def_bridge,
202 tcpudp_port_pool=set(),
203 master_node=hostname.name,
204 master_ip=clustername.ip,
205 master_netdev=master_netdev,
206 cluster_name=clustername.name,
207 file_storage_dir=file_storage_dir,
208 enabled_hypervisors=enabled_hypervisors,
209 default_hypervisor=default_hypervisor,
210 beparams={constants.BEGR_DEFAULT: beparams},
213 master_node_config = objects.Node(name=hostname.name,
214 primary_ip=hostname.ip,
215 secondary_ip=secondary_ip)
217 cfg = InitConfig(constants.CONFIG_VERSION,
218 cluster_config, master_node_config)
219 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
221 # start the master ip
222 # TODO: Review rpc call from bootstrap
223 rpc.RpcRunner.call_node_start_master(hostname.name, True)
226 def InitConfig(version, cluster_config, master_node_config,
227 cfg_file=constants.CLUSTER_CONF_FILE):
228 """Create the initial cluster configuration.
230 It will contain the current node, which will also be the master
231 node, and no instances.
234 @param version: Configuration version
235 @type cluster_config: objects.Cluster
236 @param cluster_config: Cluster configuration
237 @type master_node_config: objects.Node
238 @param master_node_config: Master node configuration
239 @type file_name: string
240 @param file_name: Configuration file path
242 @rtype: ssconf.SimpleConfigWriter
243 @returns: Initialized config instance
247 master_node_config.name: master_node_config,
250 config_data = objects.ConfigData(version=version,
251 cluster=cluster_config,
255 cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
261 def FinalizeClusterDestroy(master):
262 """Execute the last steps of cluster destroy
264 This function shuts down all the daemons, completing the destroy
265 begun in cmdlib.LUDestroyOpcode.
268 if not rpc.RpcRunner.call_node_stop_master(master, True):
269 logging.warning("Could not disable the master role")
270 if not rpc.RpcRunner.call_node_leave_cluster(master):
271 logging.warning("Could not shutdown the node daemon and cleanup the node")
274 def SetupNodeDaemon(node, ssh_key_check):
275 """Add a node to the cluster.
277 This function must be called before the actual opcode, and will ssh
278 to the remote node, copy the needed files, and start ganeti-noded,
279 allowing the master to do the rest via normal rpc calls.
282 node: fully qualified domain name for the new node
285 cfg = ssconf.SimpleConfigReader()
286 sshrunner = ssh.SshRunner(cfg.GetClusterName())
287 gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
288 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
289 # so we use this to detect an invalid certificate; as long as the
290 # cert doesn't contain this, the here-document will be correctly
291 # parsed by the shell sequence below
292 if re.search('^!EOF\.', gntpem, re.MULTILINE):
293 raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
294 if not gntpem.endswith("\n"):
295 raise errors.OpExecError("PEM must end with newline")
297 # set up inter-node password and certificate and restarts the node daemon
298 # and then connect with ssh to set password and start ganeti-noded
299 # note that all the below variables are sanitized at this point,
300 # either by being constants or by the checks above
301 mycommand = ("umask 077 && "
302 "cat > '%s' << '!EOF.' && \n"
303 "%s!EOF.\n%s restart" %
304 (constants.SSL_CERT_FILE, gntpem,
305 constants.NODE_INITD_SCRIPT))
307 result = sshrunner.Run(node, 'root', mycommand, batch=False,
308 ask_key=ssh_key_check,
309 use_cluster_key=False,
310 strict_host_check=ssh_key_check)
312 raise errors.OpExecError("Remote command on node %s, error: %s,"
314 (node, result.fail_reason, result.output))
319 def MasterFailover():
320 """Failover the master node.
322 This checks that we are not already the master, and will cause the
323 current master to cease being master, and the non-master to become
327 cfg = ssconf.SimpleConfigWriter()
329 new_master = utils.HostInfo().name
330 old_master = cfg.GetMasterNode()
331 node_list = cfg.GetNodeList()
333 if old_master == new_master:
334 raise errors.OpPrereqError("This commands must be run on the node"
335 " where you want the new master to be."
336 " %s is already the master" %
339 vote_list = GatherMasterVotes(node_list)
342 voted_master = vote_list[0][0]
343 if voted_master is None:
344 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
346 elif voted_master != old_master:
347 raise errors.OpPrereqError("I have wrong configuration, I believe the"
348 " master is %s but the other nodes voted for"
349 " %s. Please resync the configuration of"
350 " this node." % (old_master, voted_master))
355 logging.info("Setting master to %s, old master: %s", new_master, old_master)
357 if not rpc.RpcRunner.call_node_stop_master(old_master, True):
358 logging.error("Could not disable the master role on the old master"
359 " %s, please disable manually", old_master)
361 cfg.SetMasterNode(new_master)
364 # Here we have a phase where no master should be running
366 if not rpc.RpcRunner.call_upload_file(cfg.GetNodeList(),
367 constants.CLUSTER_CONF_FILE):
368 logging.error("Could not distribute the new configuration"
369 " to the other nodes, please check.")
372 if not rpc.RpcRunner.call_node_start_master(new_master, True):
373 logging.error("Could not start the master role on the new master"
374 " %s, please check", new_master)
380 def GatherMasterVotes(node_list):
381 """Check the agreement on who is the master.
383 This function will return a list of (node, number of votes), ordered
384 by the number of votes. Errors will be denoted by the key 'None'.
386 Note that the sum of votes is the number of nodes this machine
387 knows, whereas the number of entries in the list could be different
388 (if some nodes vote for another master).
390 We remove ourselves from the list since we know that (bugs aside)
391 since we use the same source for configuration information for both
392 backend and boostrap, we'll always vote for ourselves.
394 @type node_list: list
395 @param node_list: the list of nodes to query for master info; the current
396 node wil be removed if it is in the list
398 @return: list of (node, votes)
401 myself = utils.HostInfo().name
403 node_list.remove(myself)
407 # no nodes left (eventually after removing myself)
409 results = rpc.RpcRunner.call_master_info(node_list)
410 if not isinstance(results, dict):
411 # this should not happen (unless internal error in rpc)
412 logging.critical("Can't complete rpc call, aborting master startup")
413 return [(None, len(node_list))]
414 positive = negative = 0
418 if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
419 # here the rpc layer should have already logged errors
420 if None not in votes:
424 master_node = results[node][2]
425 if master_node not in votes:
426 votes[master_node] = 0
427 votes[master_node] += 1
429 vote_list = [v for v in votes.items()]
430 # sort first on number of votes then on name, since we want None
431 # sorted later if we have the half of the nodes not responding, and
432 # half voting all for the same master
433 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)