#!/usr/bin/python # # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Tool to upgrade the configuration file. This code handles only the types supported by simplejson. As an example, 'set' is a 'list'. """ import os import os.path import sys import optparse import logging import time from cStringIO import StringIO from ganeti import constants from ganeti import serializer from ganeti import utils from ganeti import cli from ganeti import bootstrap from ganeti import config from ganeti import netutils from ganeti import pathutils options = None args = None #: Target major version we will upgrade to TARGET_MAJOR = 2 #: Target minor version we will upgrade to TARGET_MINOR = 9 #: Target major version for downgrade DOWNGRADE_MAJOR = 2 #: Target minor version for downgrade DOWNGRADE_MINOR = 8 class Error(Exception): """Generic exception""" pass def SetupLogging(): """Configures the logging module. """ formatter = logging.Formatter("%(asctime)s: %(message)s") stderr_handler = logging.StreamHandler() stderr_handler.setFormatter(formatter) if options.debug: stderr_handler.setLevel(logging.NOTSET) elif options.verbose: stderr_handler.setLevel(logging.INFO) else: stderr_handler.setLevel(logging.WARNING) root_logger = logging.getLogger("") root_logger.setLevel(logging.NOTSET) root_logger.addHandler(stderr_handler) def CheckHostname(path): """Ensures hostname matches ssconf value. @param path: Path to ssconf file """ ssconf_master_node = utils.ReadOneLineFile(path) hostname = netutils.GetHostname().name if ssconf_master_node == hostname: return True logging.warning("Warning: ssconf says master node is '%s', but this" " machine's name is '%s'; this tool must be run on" " the master node", ssconf_master_node, hostname) return False def _FillIPolicySpecs(default_ipolicy, ipolicy): if "minmax" in ipolicy: for (key, spec) in ipolicy["minmax"][0].items(): for (par, val) in default_ipolicy["minmax"][0][key].items(): if par not in spec: spec[par] = val def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup): minmax_keys = ["min", "max"] if any((k in ipolicy) for k in minmax_keys): minmax = {} for key in minmax_keys: if key in ipolicy: if ipolicy[key]: minmax[key] = ipolicy[key] del ipolicy[key] if minmax: ipolicy["minmax"] = [minmax] if isgroup and "std" in ipolicy: del ipolicy["std"] _FillIPolicySpecs(default_ipolicy, ipolicy) def UpgradeNetworks(config_data): networks = config_data.get("networks", None) if not networks: config_data["networks"] = {} def UpgradeCluster(config_data): cluster = config_data.get("cluster", None) if cluster is None: raise Error("Cannot find cluster") ipolicy = cluster.setdefault("ipolicy", None) if ipolicy: UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) def UpgradeGroups(config_data): cl_ipolicy = config_data["cluster"].get("ipolicy") for group in config_data["nodegroups"].values(): networks = group.get("networks", None) if not networks: group["networks"] = {} ipolicy = group.get("ipolicy", None) if ipolicy: if cl_ipolicy is None: raise Error("A group defines an instance policy but there is no" " instance policy at cluster level") UpgradeIPolicy(ipolicy, cl_ipolicy, True) def GetExclusiveStorageValue(config_data): """Return a conservative value of the exclusive_storage flag. Return C{True} if the cluster or at least a nodegroup have the flag set. """ ret = False cluster = config_data["cluster"] ndparams = cluster.get("ndparams") if ndparams is not None and ndparams.get("exclusive_storage"): ret = True for group in config_data["nodegroups"].values(): ndparams = group.get("ndparams") if ndparams is not None and ndparams.get("exclusive_storage"): ret = True return ret def UpgradeInstances(config_data): network2uuid = dict((n["name"], n["uuid"]) for n in config_data["networks"].values()) if "instances" not in config_data: raise Error("Can't find the 'instances' key in the configuration!") missing_spindles = False for instance, iobj in config_data["instances"].items(): for nic in iobj["nics"]: name = nic.get("network", None) if name: uuid = network2uuid.get(name, None) if uuid: print("NIC with network name %s found." " Substituting with uuid %s." % (name, uuid)) nic["network"] = uuid if "disks" not in iobj: raise Error("Instance '%s' doesn't have a disks entry?!" % instance) disks = iobj["disks"] for idx, dobj in enumerate(disks): expected = "disk/%s" % idx current = dobj.get("iv_name", "") if current != expected: logging.warning("Updating iv_name for instance %s/disk %s" " from '%s' to '%s'", instance, idx, current, expected) dobj["iv_name"] = expected if not "spindles" in dobj: missing_spindles = True if GetExclusiveStorageValue(config_data) and missing_spindles: # We cannot be sure that the instances that are missing spindles have # exclusive storage enabled (the check would be more complicated), so we # give a noncommittal message logging.warning("Some instance disks could be needing to update the" " spindles parameter; you can check by running" " 'gnt-cluster verify', and fix any problem with" " 'gnt-cluster repair-disk-sizes'") def UpgradeRapiUsers(): if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and not os.path.islink(options.RAPI_USERS_FILE_PRE24)): if os.path.exists(options.RAPI_USERS_FILE): raise Error("Found pre-2.4 RAPI users file at %s, but another file" " already exists at %s" % (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)) logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) if not options.dry_run: utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750) # Create a symlink for RAPI users file if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and os.path.isfile(options.RAPI_USERS_FILE)): logging.info("Creating symlink from %s to %s", options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) if not options.dry_run: os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24) def UpgradeWatcher(): # Remove old watcher state file if it exists if os.path.exists(options.WATCHER_STATEFILE): logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE) if not options.dry_run: utils.RemoveFile(options.WATCHER_STATEFILE) def UpgradeFileStoragePaths(config_data): # Write file storage paths if not os.path.exists(options.FILE_STORAGE_PATHS_FILE): cluster = config_data["cluster"] file_storage_dir = cluster.get("file_storage_dir") shared_file_storage_dir = cluster.get("shared_file_storage_dir") del cluster logging.info("Ganeti 2.7 and later only allow whitelisted directories" " for file storage; writing existing configuration values" " into '%s'", options.FILE_STORAGE_PATHS_FILE) if file_storage_dir: logging.info("File storage directory: %s", file_storage_dir) if shared_file_storage_dir: logging.info("Shared file storage directory: %s", shared_file_storage_dir) buf = StringIO() buf.write("# List automatically generated from configuration by\n") buf.write("# cfgupgrade at %s\n" % time.asctime()) if file_storage_dir: buf.write("%s\n" % file_storage_dir) if shared_file_storage_dir: buf.write("%s\n" % shared_file_storage_dir) utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE, data=buf.getvalue(), mode=0600, dry_run=options.dry_run, backup=True) def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field): if old_key not in nodes_by_old_key: logging.warning("Can't find node '%s' in configuration, assuming that it's" " already up-to-date", old_key) return old_key return nodes_by_old_key[old_key][new_key_field] def ChangeNodeIndices(config_data, old_key_field, new_key_field): def ChangeDiskNodeIndices(disk): if disk["dev_type"] in constants.LDS_DRBD: for i in range(0, 2): disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key, disk["logical_id"][i], new_key_field) if "children" in disk: for child in disk["children"]: ChangeDiskNodeIndices(child) nodes_by_old_key = {} nodes_by_new_key = {} for (_, node) in config_data["nodes"].items(): nodes_by_old_key[node[old_key_field]] = node nodes_by_new_key[node[new_key_field]] = node config_data["nodes"] = nodes_by_new_key cluster = config_data["cluster"] cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key, cluster["master_node"], new_key_field) for inst in config_data["instances"].values(): inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key, inst["primary_node"], new_key_field) for disk in inst["disks"]: ChangeDiskNodeIndices(disk) def ChangeInstanceIndices(config_data, old_key_field, new_key_field): insts_by_old_key = {} insts_by_new_key = {} for (_, inst) in config_data["instances"].items(): insts_by_old_key[inst[old_key_field]] = inst insts_by_new_key[inst[new_key_field]] = inst config_data["instances"] = insts_by_new_key def UpgradeNodeIndices(config_data): ChangeNodeIndices(config_data, "name", "uuid") def UpgradeInstanceIndices(config_data): ChangeInstanceIndices(config_data, "name", "uuid") def UpgradeAll(config_data): config_data["version"] = constants.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0) UpgradeRapiUsers() UpgradeWatcher() UpgradeFileStoragePaths(config_data) UpgradeNetworks(config_data) UpgradeCluster(config_data) UpgradeGroups(config_data) UpgradeInstances(config_data) UpgradeNodeIndices(config_data) UpgradeInstanceIndices(config_data) def DowngradeDisks(disks, owner): for disk in disks: # Remove spindles to downgrade to 2.8 if "spindles" in disk: logging.warning("Removing spindles (value=%s) from disk %s (%s) of" " instance %s", disk["spindles"], disk["iv_name"], disk["uuid"], owner) del disk["spindles"] def DowngradeInstances(config_data): if "instances" not in config_data: raise Error("Cannot find the 'instances' key in the configuration!") for (iname, iobj) in config_data["instances"].items(): if "disks" not in iobj: raise Error("Cannot find 'disks' key for instance %s" % iname) DowngradeDisks(iobj["disks"], iname) def DowngradeNodeIndices(config_data): ChangeNodeIndices(config_data, "uuid", "name") def DowngradeInstanceIndices(config_data): ChangeInstanceIndices(config_data, "uuid", "name") def DowngradeAll(config_data): # Any code specific to a particular version should be labeled that way, so # it can be removed when updating to the next version. config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR, DOWNGRADE_MINOR, 0) DowngradeInstances(config_data) DowngradeNodeIndices(config_data) DowngradeInstanceIndices(config_data) def main(): """Main program. """ global options, args # pylint: disable=W0603 # Option parsing parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") parser.add_option("--dry-run", dest="dry_run", action="store_true", help="Try to do the conversion, but don't write" " output file") parser.add_option(cli.FORCE_OPT) parser.add_option(cli.DEBUG_OPT) parser.add_option(cli.VERBOSE_OPT) parser.add_option("--ignore-hostname", dest="ignore_hostname", action="store_true", default=False, help="Don't abort if hostname doesn't match") parser.add_option("--path", help="Convert configuration in this" " directory instead of '%s'" % pathutils.DATA_DIR, default=pathutils.DATA_DIR, dest="data_dir") parser.add_option("--confdir", help=("Use this directory instead of '%s'" % pathutils.CONF_DIR), default=pathutils.CONF_DIR, dest="conf_dir") parser.add_option("--no-verify", help="Do not verify configuration after upgrade", action="store_true", dest="no_verify", default=False) parser.add_option("--downgrade", help="Downgrade to the previous stable version", action="store_true", dest="downgrade", default=False) (options, args) = parser.parse_args() # We need to keep filenames locally because they might be renamed between # versions. options.data_dir = os.path.abspath(options.data_dir) options.CONFIG_DATA_PATH = options.data_dir + "/config.data" options.SERVER_PEM_PATH = options.data_dir + "/server.pem" options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts" options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem" options.SPICE_CERT_FILE = options.data_dir + "/spice.pem" options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem" options.RAPI_USERS_FILE = options.data_dir + "/rapi/users" options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users" options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key" options.CDS_FILE = options.data_dir + "/cluster-domain-secret" options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node" options.WATCHER_STATEFILE = options.data_dir + "/watcher.data" options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths" SetupLogging() # Option checking if args: raise Error("No arguments expected") if options.downgrade and not options.no_verify: options.no_verify = True # Check master name if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname): logging.error("Aborting due to hostname mismatch") sys.exit(constants.EXIT_FAILURE) if not options.force: if options.downgrade: usertext = ("The configuration is going to be DOWNGRADED to version %s.%s" " Some configuration data might be removed if they don't fit" " in the old format. Please make sure you have read the" " upgrade notes (available in the UPGRADE file and included" " in other documentation formats) to understand what they" " are. Continue with *DOWNGRADING* the configuration?" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) else: usertext = ("Please make sure you have read the upgrade notes for" " Ganeti %s (available in the UPGRADE file and included" " in other documentation formats). Continue with upgrading" " configuration?" % constants.RELEASE_VERSION) if not cli.AskUser(usertext): sys.exit(constants.EXIT_FAILURE) # Check whether it's a Ganeti configuration directory if not (os.path.isfile(options.CONFIG_DATA_PATH) and os.path.isfile(options.SERVER_PEM_PATH) and os.path.isfile(options.KNOWN_HOSTS_PATH)): raise Error(("%s does not seem to be a Ganeti configuration" " directory") % options.data_dir) if not os.path.isdir(options.conf_dir): raise Error("Not a directory: %s" % options.conf_dir) config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH)) try: config_version = config_data["version"] except KeyError: raise Error("Unable to determine configuration version") (config_major, config_minor, config_revision) = \ constants.SplitVersion(config_version) logging.info("Found configuration version %s (%d.%d.%d)", config_version, config_major, config_minor, config_revision) if "config_version" in config_data["cluster"]: raise Error("Inconsistent configuration: found config_version in" " configuration file") # Downgrade to the previous stable version if options.downgrade: if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or (config_major == DOWNGRADE_MAJOR and config_minor == DOWNGRADE_MINOR)): raise Error("Downgrade supported only from the latest version (%s.%s)," " found %s (%s.%s.%s) instead" % (TARGET_MAJOR, TARGET_MINOR, config_version, config_major, config_minor, config_revision)) DowngradeAll(config_data) # Upgrade from 2.{0..7} to 2.9 elif config_major == 2 and config_minor in range(0, 10): if config_revision != 0: logging.warning("Config revision is %s, not 0", config_revision) UpgradeAll(config_data) elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: logging.info("No changes necessary") else: raise Error("Configuration version %d.%d.%d not supported by this tool" % (config_major, config_minor, config_revision)) try: logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH) utils.WriteFile(file_name=options.CONFIG_DATA_PATH, data=serializer.DumpJson(config_data), mode=0600, dry_run=options.dry_run, backup=True) if not options.dry_run: bootstrap.GenerateClusterCrypto( False, False, False, False, False, nodecert_file=options.SERVER_PEM_PATH, rapicert_file=options.RAPI_CERT_FILE, spicecert_file=options.SPICE_CERT_FILE, spicecacert_file=options.SPICE_CACERT_FILE, hmackey_file=options.CONFD_HMAC_KEY, cds_file=options.CDS_FILE) except Exception: logging.critical("Writing configuration failed. It is probably in an" " inconsistent state and needs manual intervention.") raise # test loading the config file all_ok = True if not (options.dry_run or options.no_verify): logging.info("Testing the new config file...") cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH, accept_foreign=options.ignore_hostname, offline=True) # if we reached this, it's all fine vrfy = cfg.VerifyConfig() if vrfy: logging.error("Errors after conversion:") for item in vrfy: logging.error(" - %s", item) all_ok = False else: logging.info("File loaded successfully after upgrading") del cfg if options.downgrade: action = "downgraded" out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) else: action = "upgraded" out_ver = constants.RELEASE_VERSION if all_ok: cli.ToStderr("Configuration successfully %s to version %s.", action, out_ver) else: cli.ToStderr("Configuration %s to version %s, but there are errors." "\nPlease review the file.", action, out_ver) if __name__ == "__main__": main()