4 # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Tool to upgrade the configuration file.
24 This code handles only the types supported by simplejson. As an
25 example, 'set' is a 'list'.
36 from cStringIO import StringIO
38 from ganeti import constants
39 from ganeti import serializer
40 from ganeti import utils
41 from ganeti import cli
42 from ganeti import bootstrap
43 from ganeti import config
44 from ganeti import netutils
45 from ganeti import pathutils
52 #: Target major version we will upgrade to
54 #: Target minor version we will upgrade to
56 #: Target major version for downgrade
58 #: Target minor version for downgrade
62 class Error(Exception):
63 """Generic exception"""
68 """Configures the logging module.
71 formatter = logging.Formatter("%(asctime)s: %(message)s")
73 stderr_handler = logging.StreamHandler()
74 stderr_handler.setFormatter(formatter)
76 stderr_handler.setLevel(logging.NOTSET)
78 stderr_handler.setLevel(logging.INFO)
80 stderr_handler.setLevel(logging.WARNING)
82 root_logger = logging.getLogger("")
83 root_logger.setLevel(logging.NOTSET)
84 root_logger.addHandler(stderr_handler)
87 def CheckHostname(path):
88 """Ensures hostname matches ssconf value.
90 @param path: Path to ssconf file
93 ssconf_master_node = utils.ReadOneLineFile(path)
94 hostname = netutils.GetHostname().name
96 if ssconf_master_node == hostname:
99 logging.warning("Warning: ssconf says master node is '%s', but this"
100 " machine's name is '%s'; this tool must be run on"
101 " the master node", ssconf_master_node, hostname)
105 def _FillIPolicySpecs(default_ipolicy, ipolicy):
106 if "minmax" in ipolicy:
107 for (key, spec) in ipolicy["minmax"][0].items():
108 for (par, val) in default_ipolicy["minmax"][0][key].items():
113 def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
114 minmax_keys = ["min", "max"]
115 if any((k in ipolicy) for k in minmax_keys):
117 for key in minmax_keys:
120 minmax[key] = ipolicy[key]
123 ipolicy["minmax"] = [minmax]
124 if isgroup and "std" in ipolicy:
126 _FillIPolicySpecs(default_ipolicy, ipolicy)
129 def UpgradeNetworks(config_data):
130 networks = config_data.get("networks", None)
132 config_data["networks"] = {}
135 def UpgradeCluster(config_data):
136 cluster = config_data.get("cluster", None)
138 raise Error("Cannot find cluster")
139 ipolicy = cluster.setdefault("ipolicy", None)
141 UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
144 def UpgradeGroups(config_data):
145 cl_ipolicy = config_data["cluster"].get("ipolicy")
146 for group in config_data["nodegroups"].values():
147 networks = group.get("networks", None)
149 group["networks"] = {}
150 ipolicy = group.get("ipolicy", None)
152 if cl_ipolicy is None:
153 raise Error("A group defines an instance policy but there is no"
154 " instance policy at cluster level")
155 UpgradeIPolicy(ipolicy, cl_ipolicy, True)
158 def GetExclusiveStorageValue(config_data):
159 """Return a conservative value of the exclusive_storage flag.
161 Return C{True} if the cluster or at least a nodegroup have the flag set.
165 cluster = config_data["cluster"]
166 ndparams = cluster.get("ndparams")
167 if ndparams is not None and ndparams.get("exclusive_storage"):
169 for group in config_data["nodegroups"].values():
170 ndparams = group.get("ndparams")
171 if ndparams is not None and ndparams.get("exclusive_storage"):
176 def UpgradeInstances(config_data):
177 network2uuid = dict((n["name"], n["uuid"])
178 for n in config_data["networks"].values())
179 if "instances" not in config_data:
180 raise Error("Can't find the 'instances' key in the configuration!")
182 missing_spindles = False
183 for instance, iobj in config_data["instances"].items():
184 for nic in iobj["nics"]:
185 name = nic.get("network", None)
187 uuid = network2uuid.get(name, None)
189 print("NIC with network name %s found."
190 " Substituting with uuid %s." % (name, uuid))
191 nic["network"] = uuid
193 if "disks" not in iobj:
194 raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
195 disks = iobj["disks"]
196 for idx, dobj in enumerate(disks):
197 expected = "disk/%s" % idx
198 current = dobj.get("iv_name", "")
199 if current != expected:
200 logging.warning("Updating iv_name for instance %s/disk %s"
201 " from '%s' to '%s'",
202 instance, idx, current, expected)
203 dobj["iv_name"] = expected
204 if not "spindles" in dobj:
205 missing_spindles = True
207 if GetExclusiveStorageValue(config_data) and missing_spindles:
208 # We cannot be sure that the instances that are missing spindles have
209 # exclusive storage enabled (the check would be more complicated), so we
210 # give a noncommittal message
211 logging.warning("Some instance disks could be needing to update the"
212 " spindles parameter; you can check by running"
213 " 'gnt-cluster verify', and fix any problem with"
214 " 'gnt-cluster repair-disk-sizes'")
217 def UpgradeRapiUsers():
218 if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
219 not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
220 if os.path.exists(options.RAPI_USERS_FILE):
221 raise Error("Found pre-2.4 RAPI users file at %s, but another file"
222 " already exists at %s" %
223 (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
224 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
225 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
226 if not options.dry_run:
227 utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
228 mkdir=True, mkdir_mode=0750)
230 # Create a symlink for RAPI users file
231 if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
232 os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
233 os.path.isfile(options.RAPI_USERS_FILE)):
234 logging.info("Creating symlink from %s to %s",
235 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
236 if not options.dry_run:
237 os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
240 def UpgradeWatcher():
241 # Remove old watcher state file if it exists
242 if os.path.exists(options.WATCHER_STATEFILE):
243 logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
244 if not options.dry_run:
245 utils.RemoveFile(options.WATCHER_STATEFILE)
248 def UpgradeFileStoragePaths(config_data):
249 # Write file storage paths
250 if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
251 cluster = config_data["cluster"]
252 file_storage_dir = cluster.get("file_storage_dir")
253 shared_file_storage_dir = cluster.get("shared_file_storage_dir")
256 logging.info("Ganeti 2.7 and later only allow whitelisted directories"
257 " for file storage; writing existing configuration values"
259 options.FILE_STORAGE_PATHS_FILE)
262 logging.info("File storage directory: %s", file_storage_dir)
263 if shared_file_storage_dir:
264 logging.info("Shared file storage directory: %s",
265 shared_file_storage_dir)
268 buf.write("# List automatically generated from configuration by\n")
269 buf.write("# cfgupgrade at %s\n" % time.asctime())
271 buf.write("%s\n" % file_storage_dir)
272 if shared_file_storage_dir:
273 buf.write("%s\n" % shared_file_storage_dir)
274 utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
277 dry_run=options.dry_run,
281 def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
282 if old_key not in nodes_by_old_key:
283 logging.warning("Can't find node '%s' in configuration, assuming that it's"
284 " already up-to-date", old_key)
286 return nodes_by_old_key[old_key][new_key_field]
289 def ChangeNodeIndices(config_data, old_key_field, new_key_field):
290 def ChangeDiskNodeIndices(disk):
291 if disk["dev_type"] in constants.LDS_DRBD:
292 for i in range(0, 2):
293 disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
294 disk["logical_id"][i],
296 if "children" in disk:
297 for child in disk["children"]:
298 ChangeDiskNodeIndices(child)
300 nodes_by_old_key = {}
301 nodes_by_new_key = {}
302 for (_, node) in config_data["nodes"].items():
303 nodes_by_old_key[node[old_key_field]] = node
304 nodes_by_new_key[node[new_key_field]] = node
306 config_data["nodes"] = nodes_by_new_key
308 cluster = config_data["cluster"]
309 cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
310 cluster["master_node"],
313 for inst in config_data["instances"].values():
314 inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
315 inst["primary_node"],
317 for disk in inst["disks"]:
318 ChangeDiskNodeIndices(disk)
321 def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
322 insts_by_old_key = {}
323 insts_by_new_key = {}
324 for (_, inst) in config_data["instances"].items():
325 insts_by_old_key[inst[old_key_field]] = inst
326 insts_by_new_key[inst[new_key_field]] = inst
328 config_data["instances"] = insts_by_new_key
331 def UpgradeNodeIndices(config_data):
332 ChangeNodeIndices(config_data, "name", "uuid")
335 def UpgradeInstanceIndices(config_data):
336 ChangeInstanceIndices(config_data, "name", "uuid")
339 def UpgradeAll(config_data):
340 config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
344 UpgradeFileStoragePaths(config_data)
345 UpgradeNetworks(config_data)
346 UpgradeCluster(config_data)
347 UpgradeGroups(config_data)
348 UpgradeInstances(config_data)
349 UpgradeNodeIndices(config_data)
350 UpgradeInstanceIndices(config_data)
353 def DowngradeDisks(disks, owner):
355 # Remove spindles to downgrade to 2.8
356 if "spindles" in disk:
357 logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
359 disk["spindles"], disk["iv_name"], disk["uuid"], owner)
363 def DowngradeInstances(config_data):
364 if "instances" not in config_data:
365 raise Error("Cannot find the 'instances' key in the configuration!")
366 for (iname, iobj) in config_data["instances"].items():
367 if "disks" not in iobj:
368 raise Error("Cannot find 'disks' key for instance %s" % iname)
369 DowngradeDisks(iobj["disks"], iname)
372 def DowngradeNodeIndices(config_data):
373 ChangeNodeIndices(config_data, "uuid", "name")
376 def DowngradeInstanceIndices(config_data):
377 ChangeInstanceIndices(config_data, "uuid", "name")
380 def DowngradeAll(config_data):
381 # Any code specific to a particular version should be labeled that way, so
382 # it can be removed when updating to the next version.
383 config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
385 DowngradeInstances(config_data)
386 DowngradeNodeIndices(config_data)
387 DowngradeInstanceIndices(config_data)
394 global options, args # pylint: disable=W0603
397 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
398 parser.add_option("--dry-run", dest="dry_run",
400 help="Try to do the conversion, but don't write"
402 parser.add_option(cli.FORCE_OPT)
403 parser.add_option(cli.DEBUG_OPT)
404 parser.add_option(cli.VERBOSE_OPT)
405 parser.add_option("--ignore-hostname", dest="ignore_hostname",
406 action="store_true", default=False,
407 help="Don't abort if hostname doesn't match")
408 parser.add_option("--path", help="Convert configuration in this"
409 " directory instead of '%s'" % pathutils.DATA_DIR,
410 default=pathutils.DATA_DIR, dest="data_dir")
411 parser.add_option("--confdir",
412 help=("Use this directory instead of '%s'" %
414 default=pathutils.CONF_DIR, dest="conf_dir")
415 parser.add_option("--no-verify",
416 help="Do not verify configuration after upgrade",
417 action="store_true", dest="no_verify", default=False)
418 parser.add_option("--downgrade",
419 help="Downgrade to the previous stable version",
420 action="store_true", dest="downgrade", default=False)
421 (options, args) = parser.parse_args()
423 # We need to keep filenames locally because they might be renamed between
425 options.data_dir = os.path.abspath(options.data_dir)
426 options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
427 options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
428 options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
429 options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
430 options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
431 options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
432 options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
433 options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
434 options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
435 options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
436 options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
437 options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
438 options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
444 raise Error("No arguments expected")
445 if options.downgrade and not options.no_verify:
446 options.no_verify = True
449 if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
450 logging.error("Aborting due to hostname mismatch")
451 sys.exit(constants.EXIT_FAILURE)
453 if not options.force:
454 if options.downgrade:
455 usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
456 " Some configuration data might be removed if they don't fit"
457 " in the old format. Please make sure you have read the"
458 " upgrade notes (available in the UPGRADE file and included"
459 " in other documentation formats) to understand what they"
460 " are. Continue with *DOWNGRADING* the configuration?" %
461 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
463 usertext = ("Please make sure you have read the upgrade notes for"
464 " Ganeti %s (available in the UPGRADE file and included"
465 " in other documentation formats). Continue with upgrading"
466 " configuration?" % constants.RELEASE_VERSION)
467 if not cli.AskUser(usertext):
468 sys.exit(constants.EXIT_FAILURE)
470 # Check whether it's a Ganeti configuration directory
471 if not (os.path.isfile(options.CONFIG_DATA_PATH) and
472 os.path.isfile(options.SERVER_PEM_PATH) and
473 os.path.isfile(options.KNOWN_HOSTS_PATH)):
474 raise Error(("%s does not seem to be a Ganeti configuration"
475 " directory") % options.data_dir)
477 if not os.path.isdir(options.conf_dir):
478 raise Error("Not a directory: %s" % options.conf_dir)
480 config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
483 config_version = config_data["version"]
485 raise Error("Unable to determine configuration version")
487 (config_major, config_minor, config_revision) = \
488 constants.SplitVersion(config_version)
490 logging.info("Found configuration version %s (%d.%d.%d)",
491 config_version, config_major, config_minor, config_revision)
493 if "config_version" in config_data["cluster"]:
494 raise Error("Inconsistent configuration: found config_version in"
495 " configuration file")
497 # Downgrade to the previous stable version
498 if options.downgrade:
499 if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
500 (config_major == DOWNGRADE_MAJOR and
501 config_minor == DOWNGRADE_MINOR)):
502 raise Error("Downgrade supported only from the latest version (%s.%s),"
503 " found %s (%s.%s.%s) instead" %
504 (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
505 config_minor, config_revision))
506 DowngradeAll(config_data)
508 # Upgrade from 2.{0..7} to 2.9
509 elif config_major == 2 and config_minor in range(0, 10):
510 if config_revision != 0:
511 logging.warning("Config revision is %s, not 0", config_revision)
512 UpgradeAll(config_data)
514 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
515 logging.info("No changes necessary")
518 raise Error("Configuration version %d.%d.%d not supported by this tool" %
519 (config_major, config_minor, config_revision))
522 logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
523 utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
524 data=serializer.DumpJson(config_data),
526 dry_run=options.dry_run,
529 if not options.dry_run:
530 bootstrap.GenerateClusterCrypto(
531 False, False, False, False, False,
532 nodecert_file=options.SERVER_PEM_PATH,
533 rapicert_file=options.RAPI_CERT_FILE,
534 spicecert_file=options.SPICE_CERT_FILE,
535 spicecacert_file=options.SPICE_CACERT_FILE,
536 hmackey_file=options.CONFD_HMAC_KEY,
537 cds_file=options.CDS_FILE)
540 logging.critical("Writing configuration failed. It is probably in an"
541 " inconsistent state and needs manual intervention.")
544 # test loading the config file
546 if not (options.dry_run or options.no_verify):
547 logging.info("Testing the new config file...")
548 cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
549 accept_foreign=options.ignore_hostname,
551 # if we reached this, it's all fine
552 vrfy = cfg.VerifyConfig()
554 logging.error("Errors after conversion:")
556 logging.error(" - %s", item)
559 logging.info("File loaded successfully after upgrading")
562 if options.downgrade:
563 action = "downgraded"
564 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
567 out_ver = constants.RELEASE_VERSION
569 cli.ToStderr("Configuration successfully %s to version %s.",
572 cli.ToStderr("Configuration %s to version %s, but there are errors."
573 "\nPlease review the file.", action, out_ver)
576 if __name__ == "__main__":