4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
41 from ganeti.rpc import RpcRunner
43 def _InitSSHSetup(node):
44 """Setup the SSH configuration for the cluster.
47 This generates a dsa keypair for root, adds the pub key to the
48 permitted hosts and adds the hostkey to its own known hosts.
51 node: the name of this host as a fqdn
54 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
56 for name in priv_key, pub_key:
57 if os.path.exists(name):
58 utils.CreateBackup(name)
59 utils.RemoveFile(name)
61 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
65 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
68 f = open(pub_key, 'r')
70 utils.AddAuthorizedKey(auth_keys, f.read(8192))
75 def _InitGanetiServerSetup():
76 """Setup the necessary configuration for the initial node daemon.
78 This creates the nodepass file containing the shared password for
79 the cluster and also generates the SSL certificate.
82 # Create pseudo random password
83 randpass = utils.GenerateSecret()
85 # and write it into the config file
86 utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
87 data="%s\n" % randpass, mode=0400)
89 result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
90 "-days", str(365*5), "-nodes", "-x509",
91 "-keyout", constants.SSL_CERT_FILE,
92 "-out", constants.SSL_CERT_FILE, "-batch"])
94 raise errors.OpExecError("could not generate server ssl cert, command"
95 " %s had exitcode %s and error message %s" %
96 (result.cmd, result.exit_code, result.output))
98 os.chmod(constants.SSL_CERT_FILE, 0400)
100 result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
103 raise errors.OpExecError("Could not start the node daemon, command %s"
104 " had exitcode %s and error %s" %
105 (result.cmd, result.exit_code, result.output))
108 def InitCluster(cluster_name, hypervisor_type, mac_prefix, def_bridge,
109 master_netdev, file_storage_dir,
112 """Initialise the cluster.
115 if config.ConfigWriter.IsCluster():
116 raise errors.OpPrereqError("Cluster is already initialised")
118 if hypervisor_type == constants.HT_XEN_HVM:
119 if not os.path.exists(constants.VNC_PASSWORD_FILE):
120 raise errors.OpPrereqError("Please prepare the cluster VNC"
122 constants.VNC_PASSWORD_FILE)
124 hostname = utils.HostInfo()
126 if hostname.ip.startswith("127."):
127 raise errors.OpPrereqError("This host's IP resolves to the private"
128 " range (%s). Please fix DNS or %s." %
129 (hostname.ip, constants.ETC_HOSTS))
131 if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
132 source=constants.LOCALHOST_IP_ADDRESS):
133 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
134 " to %s,\nbut this ip address does not"
135 " belong to this host."
136 " Aborting." % hostname.ip)
138 clustername = utils.HostInfo(cluster_name)
140 if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
142 raise errors.OpPrereqError("Cluster IP already active. Aborting.")
145 if not utils.IsValidIP(secondary_ip):
146 raise errors.OpPrereqError("Invalid secondary ip given")
147 if (secondary_ip != hostname.ip and
148 (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
149 source=constants.LOCALHOST_IP_ADDRESS))):
150 raise errors.OpPrereqError("You gave %s as secondary IP,"
151 " but it does not belong to this host." %
154 secondary_ip = hostname.ip
156 if vg_name is not None:
157 # Check if volume group is valid
158 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
159 constants.MIN_VG_SIZE)
161 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
162 " you are not using lvm" % vgstatus)
164 file_storage_dir = os.path.normpath(file_storage_dir)
166 if not os.path.isabs(file_storage_dir):
167 raise errors.OpPrereqError("The file storage directory you passed is"
168 " not an absolute path.")
170 if not os.path.exists(file_storage_dir):
172 os.makedirs(file_storage_dir, 0750)
174 raise errors.OpPrereqError("Cannot create file storage directory"
176 (file_storage_dir, err))
178 if not os.path.isdir(file_storage_dir):
179 raise errors.OpPrereqError("The file storage directory '%s' is not"
180 " a directory." % file_storage_dir)
182 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
183 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
185 if hypervisor_type not in constants.HYPER_TYPES:
186 raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
189 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
191 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
193 result.output.strip()))
195 if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
196 os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
197 raise errors.OpPrereqError("Init.d script '%s' missing or not"
198 " executable." % constants.NODE_INITD_SCRIPT)
200 # set up the inter-node password and certificate
201 _InitGanetiServerSetup()
203 # set up ssh config and /etc/hosts
204 f = open(constants.SSH_HOST_RSA_PUB, 'r')
209 sshkey = sshline.split(" ")[1]
211 utils.AddHostToEtcHosts(hostname.name)
212 _InitSSHSetup(hostname.name)
214 # init of cluster config file
215 cluster_config = objects.Cluster(
217 rsahostkeypub=sshkey,
218 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
219 mac_prefix=mac_prefix,
220 volume_group_name=vg_name,
221 default_bridge=def_bridge,
222 tcpudp_port_pool=set(),
223 hypervisor=hypervisor_type,
224 master_node=hostname.name,
225 master_ip=clustername.ip,
226 master_netdev=master_netdev,
227 cluster_name=clustername.name,
228 file_storage_dir=file_storage_dir,
230 master_node_config = objects.Node(name=hostname.name,
231 primary_ip=hostname.ip,
232 secondary_ip=secondary_ip)
234 cfg = InitConfig(constants.CONFIG_VERSION,
235 cluster_config, master_node_config)
236 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
238 # start the master ip
239 # TODO: Review rpc call from bootstrap
240 RpcRunner.call_node_start_master(hostname.name, True)
243 def InitConfig(version, cluster_config, master_node_config,
244 cfg_file=constants.CLUSTER_CONF_FILE):
245 """Create the initial cluster configuration.
247 It will contain the current node, which will also be the master
248 node, and no instances.
251 @param version: Configuration version
252 @type cluster_config: objects.Cluster
253 @param cluster_config: Cluster configuration
254 @type master_node_config: objects.Node
255 @param master_node_config: Master node configuration
256 @type file_name: string
257 @param file_name: Configuration file path
259 @rtype: ssconf.SimpleConfigWriter
260 @returns: Initialized config instance
264 master_node_config.name: master_node_config,
267 config_data = objects.ConfigData(version=version,
268 cluster=cluster_config,
272 cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
278 def FinalizeClusterDestroy(master):
279 """Execute the last steps of cluster destroy
281 This function shuts down all the daemons, completing the destroy
282 begun in cmdlib.LUDestroyOpcode.
285 if not RpcRunner.call_node_stop_master(master, True):
286 logging.warning("Could not disable the master role")
287 if not RpcRunner.call_node_leave_cluster(master):
288 logging.warning("Could not shutdown the node daemon and cleanup the node")
291 def SetupNodeDaemon(node, ssh_key_check):
292 """Add a node to the cluster.
294 This function must be called before the actual opcode, and will ssh
295 to the remote node, copy the needed files, and start ganeti-noded,
296 allowing the master to do the rest via normal rpc calls.
299 node: fully qualified domain name for the new node
302 cfg = ssconf.SimpleConfigReader()
303 sshrunner = ssh.SshRunner(cfg.GetClusterName())
304 gntpass = utils.GetNodeDaemonPassword()
305 if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
306 raise errors.OpExecError("ganeti password corruption detected")
307 f = open(constants.SSL_CERT_FILE)
309 gntpem = f.read(8192)
312 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
313 # so we use this to detect an invalid certificate; as long as the
314 # cert doesn't contain this, the here-document will be correctly
315 # parsed by the shell sequence below
316 if re.search('^!EOF\.', gntpem, re.MULTILINE):
317 raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
318 if not gntpem.endswith("\n"):
319 raise errors.OpExecError("PEM must end with newline")
321 # set up inter-node password and certificate and restarts the node daemon
322 # and then connect with ssh to set password and start ganeti-noded
323 # note that all the below variables are sanitized at this point,
324 # either by being constants or by the checks above
325 mycommand = ("umask 077 && "
326 "echo '%s' > '%s' && "
327 "cat > '%s' << '!EOF.' && \n"
328 "%s!EOF.\n%s restart" %
329 (gntpass, constants.CLUSTER_PASSWORD_FILE,
330 constants.SSL_CERT_FILE, gntpem,
331 constants.NODE_INITD_SCRIPT))
333 result = sshrunner.Run(node, 'root', mycommand, batch=False,
334 ask_key=ssh_key_check,
335 use_cluster_key=False,
336 strict_host_check=ssh_key_check)
338 raise errors.OpExecError("Remote command on node %s, error: %s,"
340 (node, result.fail_reason, result.output))
345 def MasterFailover():
346 """Failover the master node.
348 This checks that we are not already the master, and will cause the
349 current master to cease being master, and the non-master to become
353 cfg = ssconf.SimpleConfigWriter()
355 new_master = utils.HostInfo().name
356 old_master = cfg.GetMasterNode()
358 if old_master == new_master:
359 raise errors.OpPrereqError("This commands must be run on the node"
360 " where you want the new master to be."
361 " %s is already the master" %
367 logging.info("setting master to %s, old master: %s", new_master, old_master)
369 if not RpcRunner.call_node_stop_master(old_master, True):
370 logging.error("could disable the master role on the old master"
371 " %s, please disable manually", old_master)
373 cfg.SetMasterNode(new_master)
376 # Here we have a phase where no master should be running
378 if not RpcRunner.call_upload_file(cfg.GetNodeList(),
379 constants.CLUSTER_CONF_FILE):
380 logging.error("could not distribute the new simple store master file"
381 " to the other nodes, please check.")
383 if not RpcRunner.call_node_start_master(new_master, True):
384 logging.error("could not start the master role on the new master"
385 " %s, please check", new_master)