#!/usr/bin/python
#
-# Copyright (C) 2007 Google Inc.
+# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""Tool to upgrade the configuration file.
-The upgrade is done by unpickling the configuration file into custom classes
-derivating from dict. We then update the configuration by modifying these
-dicts. To save the configuration, it's pickled into a buffer and unpickled
-again using the Ganeti objects before being finally pickled into a file.
-
-Not using the custom classes wouldn't allow us to rename or remove attributes
-between versions without loosing their values.
+This code handles only the types supported by simplejson. As an
+example, 'set' is a 'list'.
"""
import os.path
import sys
import optparse
-import cPickle
-import tempfile
+import logging
+import time
from cStringIO import StringIO
-from ganeti import objects
-
-class Error(Exception):
- """Generic exception"""
- pass
-
-
-def _BaseFindGlobal(module, name):
- """Helper function for the other FindGlobal functions.
+from ganeti import constants
+from ganeti import serializer
+from ganeti import utils
+from ganeti import cli
+from ganeti import bootstrap
+from ganeti import config
+from ganeti import netutils
+from ganeti import pathutils
- """
- return getattr(sys.modules[module], name)
+from ganeti.utils import version
-# Internal config representation
-class UpgradeDict(dict):
- """Base class for internal config classes.
-
- """
- def __setstate__(self, state):
- self.update(state)
+options = None
+args = None
- def __getstate__(self):
- return self.copy()
+#: Target major version we will upgrade to
+TARGET_MAJOR = 2
+#: Target minor version we will upgrade to
+TARGET_MINOR = 10
+#: Target major version for downgrade
+DOWNGRADE_MAJOR = 2
+#: Target minor version for downgrade
+DOWNGRADE_MINOR = 9
-class UpgradeConfigData(UpgradeDict): pass
-class UpgradeCluster(UpgradeDict): pass
-class UpgradeNode(UpgradeDict): pass
-class UpgradeInstance(UpgradeDict): pass
-class UpgradeDisk(UpgradeDict): pass
-class UpgradeNIC(UpgradeDict): pass
-class UpgradeOS(UpgradeDict): pass
+# map of legacy device types
+# (mapping differing old LD_* constants to new DT_* constants)
+DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
+# (mapping differing new DT_* constants to old LD_* constants)
+DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
-_ClassMap = {
- objects.ConfigData: UpgradeConfigData,
- objects.Cluster: UpgradeCluster,
- objects.Node: UpgradeNode,
- objects.Instance: UpgradeInstance,
- objects.Disk: UpgradeDisk,
- objects.NIC: UpgradeNIC,
- objects.OS: UpgradeOS,
-}
-
-# Build mapping dicts
-WriteMapping = dict()
-ReadMapping = dict()
-for key, value in _ClassMap.iteritems():
- WriteMapping[value.__name__] = key
- ReadMapping[key.__name__] = value
+class Error(Exception):
+ """Generic exception"""
+ pass
-# Read config
-def _ReadFindGlobal(module, name):
- """Wraps Ganeti config classes to internal ones.
+def SetupLogging():
+ """Configures the logging module.
"""
- if module == "ganeti.objects" and name in ReadMapping:
- return ReadMapping[name]
-
- return _BaseFindGlobal(module, name)
-
-
-def ReadConfig(path):
- """Reads configuration file.
+ formatter = logging.Formatter("%(asctime)s: %(message)s")
+
+ stderr_handler = logging.StreamHandler()
+ stderr_handler.setFormatter(formatter)
+ if options.debug:
+ stderr_handler.setLevel(logging.NOTSET)
+ elif options.verbose:
+ stderr_handler.setLevel(logging.INFO)
+ else:
+ stderr_handler.setLevel(logging.WARNING)
- """
- f = open(path, 'r')
- try:
- loader = cPickle.Unpickler(f)
- loader.find_global = _ReadFindGlobal
- data = loader.load()
- finally:
- f.close()
+ root_logger = logging.getLogger("")
+ root_logger.setLevel(logging.NOTSET)
+ root_logger.addHandler(stderr_handler)
- return data
+def CheckHostname(path):
+ """Ensures hostname matches ssconf value.
-# Write config
-def _WriteFindGlobal(module, name):
- """Maps our internal config classes to Ganeti's.
+ @param path: Path to ssconf file
"""
- if module == "__main__" and name in WriteMapping:
- return WriteMapping[name]
-
- return _BaseFindGlobal(module, name)
-
-
-def WriteConfig(path, data, dry_run):
- """Writes the configuration file.
+ ssconf_master_node = utils.ReadOneLineFile(path)
+ hostname = netutils.GetHostname().name
+
+ if ssconf_master_node == hostname:
+ return True
+
+ logging.warning("Warning: ssconf says master node is '%s', but this"
+ " machine's name is '%s'; this tool must be run on"
+ " the master node", ssconf_master_node, hostname)
+ return False
+
+
+def _FillIPolicySpecs(default_ipolicy, ipolicy):
+ if "minmax" in ipolicy:
+ for (key, spec) in ipolicy["minmax"][0].items():
+ for (par, val) in default_ipolicy["minmax"][0][key].items():
+ if par not in spec:
+ spec[par] = val
+
+
+def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
+ minmax_keys = ["min", "max"]
+ if any((k in ipolicy) for k in minmax_keys):
+ minmax = {}
+ for key in minmax_keys:
+ if key in ipolicy:
+ if ipolicy[key]:
+ minmax[key] = ipolicy[key]
+ del ipolicy[key]
+ if minmax:
+ ipolicy["minmax"] = [minmax]
+ if isgroup and "std" in ipolicy:
+ del ipolicy["std"]
+ _FillIPolicySpecs(default_ipolicy, ipolicy)
+
+
+def UpgradeNetworks(config_data):
+ networks = config_data.get("networks", None)
+ if not networks:
+ config_data["networks"] = {}
+
+
+def UpgradeCluster(config_data):
+ cluster = config_data.get("cluster", None)
+ if cluster is None:
+ raise Error("Cannot find cluster")
+ ipolicy = cluster.setdefault("ipolicy", None)
+ if ipolicy:
+ UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
+
+
+def UpgradeGroups(config_data):
+ cl_ipolicy = config_data["cluster"].get("ipolicy")
+ for group in config_data["nodegroups"].values():
+ networks = group.get("networks", None)
+ if not networks:
+ group["networks"] = {}
+ ipolicy = group.get("ipolicy", None)
+ if ipolicy:
+ if cl_ipolicy is None:
+ raise Error("A group defines an instance policy but there is no"
+ " instance policy at cluster level")
+ UpgradeIPolicy(ipolicy, cl_ipolicy, True)
+
+
+def GetExclusiveStorageValue(config_data):
+ """Return a conservative value of the exclusive_storage flag.
+
+ Return C{True} if the cluster or at least a nodegroup have the flag set.
"""
- buf = StringIO()
-
- # Write intermediate representation
- dumper = cPickle.Pickler(buf, cPickle.HIGHEST_PROTOCOL)
- dumper.dump(data)
- del dumper
-
- # Convert back to Ganeti objects
- buf.seek(0)
- loader = cPickle.Unpickler(buf)
- loader.find_global = _WriteFindGlobal
- data = loader.load()
-
- # Write target file
- (fd, name) = tempfile.mkstemp(dir=os.path.dirname(path))
- f = os.fdopen(fd, 'w')
- try:
- try:
- dumper = cPickle.Pickler(f, cPickle.HIGHEST_PROTOCOL)
- dumper.dump(data)
- f.flush()
- if dry_run:
- os.unlink(name)
- else:
- os.rename(name, path)
- except:
- os.unlink(name)
- raise
- finally:
- f.close()
-
-
-def UpdateFromVersion2To3(cfg):
- """Updates the configuration from version 2 to 3.
-
+ ret = False
+ cluster = config_data["cluster"]
+ ndparams = cluster.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ for group in config_data["nodegroups"].values():
+ ndparams = group.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ return ret
+
+
+def RemovePhysicalId(disk):
+ if "children" in disk:
+ for d in disk["children"]:
+ RemovePhysicalId(d)
+ if "physical_id" in disk:
+ del disk["physical_id"]
+
+
+def ChangeDiskDevType(disk, dev_type_map):
+ """Replaces disk's dev_type attributes according to the given map.
+
+ This can be used for both, up or downgrading the disks.
"""
- if cfg['cluster']['config_version'] != 2:
- return
-
- # Add port pool
- if 'tcpudp_port_pool' not in cfg['cluster']:
- cfg['cluster']['tcpudp_port_pool'] = set()
+ if disk["dev_type"] in dev_type_map:
+ disk["dev_type"] = dev_type_map[disk["dev_type"]]
+ if "children" in disk:
+ for child in disk["children"]:
+ ChangeDiskDevType(child, dev_type_map)
+
+
+def UpgradeDiskDevType(disk):
+ """Upgrades the disks' device type."""
+ ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
+
+
+def UpgradeInstances(config_data):
+ """Upgrades the instances' configuration."""
+
+ network2uuid = dict((n["name"], n["uuid"])
+ for n in config_data["networks"].values())
+ if "instances" not in config_data:
+ raise Error("Can't find the 'instances' key in the configuration!")
+
+ missing_spindles = False
+ for instance, iobj in config_data["instances"].items():
+ for nic in iobj["nics"]:
+ name = nic.get("network", None)
+ if name:
+ uuid = network2uuid.get(name, None)
+ if uuid:
+ print("NIC with network name %s found."
+ " Substituting with uuid %s." % (name, uuid))
+ nic["network"] = uuid
+
+ if "disks" not in iobj:
+ raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
+ disks = iobj["disks"]
+ for idx, dobj in enumerate(disks):
+ RemovePhysicalId(dobj)
+
+ expected = "disk/%s" % idx
+ current = dobj.get("iv_name", "")
+ if current != expected:
+ logging.warning("Updating iv_name for instance %s/disk %s"
+ " from '%s' to '%s'",
+ instance, idx, current, expected)
+ dobj["iv_name"] = expected
+
+ if "dev_type" in dobj:
+ UpgradeDiskDevType(dobj)
+
+ if not "spindles" in dobj:
+ missing_spindles = True
+
+ if GetExclusiveStorageValue(config_data) and missing_spindles:
+ # We cannot be sure that the instances that are missing spindles have
+ # exclusive storage enabled (the check would be more complicated), so we
+ # give a noncommittal message
+ logging.warning("Some instance disks could be needing to update the"
+ " spindles parameter; you can check by running"
+ " 'gnt-cluster verify', and fix any problem with"
+ " 'gnt-cluster repair-disk-sizes'")
+
+
+def UpgradeRapiUsers():
+ if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
+ not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
+ if os.path.exists(options.RAPI_USERS_FILE):
+ raise Error("Found pre-2.4 RAPI users file at %s, but another file"
+ " already exists at %s" %
+ (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
+ logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
+ options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
+ if not options.dry_run:
+ utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
+ mkdir=True, mkdir_mode=0750)
+
+ # Create a symlink for RAPI users file
+ if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
+ os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
+ os.path.isfile(options.RAPI_USERS_FILE)):
+ logging.info("Creating symlink from %s to %s",
+ options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
+ if not options.dry_run:
+ os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
+
+
+def UpgradeWatcher():
+ # Remove old watcher state file if it exists
+ if os.path.exists(options.WATCHER_STATEFILE):
+ logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
+ if not options.dry_run:
+ utils.RemoveFile(options.WATCHER_STATEFILE)
+
+
+def UpgradeFileStoragePaths(config_data):
+ # Write file storage paths
+ if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
+ cluster = config_data["cluster"]
+ file_storage_dir = cluster.get("file_storage_dir")
+ shared_file_storage_dir = cluster.get("shared_file_storage_dir")
+ del cluster
+
+ logging.info("Ganeti 2.7 and later only allow whitelisted directories"
+ " for file storage; writing existing configuration values"
+ " into '%s'",
+ options.FILE_STORAGE_PATHS_FILE)
+
+ if file_storage_dir:
+ logging.info("File storage directory: %s", file_storage_dir)
+ if shared_file_storage_dir:
+ logging.info("Shared file storage directory: %s",
+ shared_file_storage_dir)
+
+ buf = StringIO()
+ buf.write("# List automatically generated from configuration by\n")
+ buf.write("# cfgupgrade at %s\n" % time.asctime())
+ if file_storage_dir:
+ buf.write("%s\n" % file_storage_dir)
+ if shared_file_storage_dir:
+ buf.write("%s\n" % shared_file_storage_dir)
+ utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
+ data=buf.getvalue(),
+ mode=0600,
+ dry_run=options.dry_run,
+ backup=True)
+
+
+def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+ if old_key not in nodes_by_old_key:
+ logging.warning("Can't find node '%s' in configuration, assuming that it's"
+ " already up-to-date", old_key)
+ return old_key
+ return nodes_by_old_key[old_key][new_key_field]
+
+
+def ChangeNodeIndices(config_data, old_key_field, new_key_field):
+ def ChangeDiskNodeIndices(disk):
+ # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
+ # considered when up/downgrading from/to any versions touching 2.9 on the
+ # way.
+ drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
+ if disk["dev_type"] in drbd_disk_types:
+ for i in range(0, 2):
+ disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
+ disk["logical_id"][i],
+ new_key_field)
+ if "children" in disk:
+ for child in disk["children"]:
+ ChangeDiskNodeIndices(child)
+
+ nodes_by_old_key = {}
+ nodes_by_new_key = {}
+ for (_, node) in config_data["nodes"].items():
+ nodes_by_old_key[node[old_key_field]] = node
+ nodes_by_new_key[node[new_key_field]] = node
+
+ config_data["nodes"] = nodes_by_new_key
+
+ cluster = config_data["cluster"]
+ cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
+ cluster["master_node"],
+ new_key_field)
+
+ for inst in config_data["instances"].values():
+ inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
+ inst["primary_node"],
+ new_key_field)
+ for disk in inst["disks"]:
+ ChangeDiskNodeIndices(disk)
+
+
+def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
+ insts_by_old_key = {}
+ insts_by_new_key = {}
+ for (_, inst) in config_data["instances"].items():
+ insts_by_old_key[inst[old_key_field]] = inst
+ insts_by_new_key[inst[new_key_field]] = inst
+
+ config_data["instances"] = insts_by_new_key
+
+
+def UpgradeNodeIndices(config_data):
+ ChangeNodeIndices(config_data, "name", "uuid")
+
+
+def UpgradeInstanceIndices(config_data):
+ ChangeInstanceIndices(config_data, "name", "uuid")
+
+
+def UpgradeAll(config_data):
+ config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
+ UpgradeRapiUsers()
+ UpgradeWatcher()
+ UpgradeFileStoragePaths(config_data)
+ UpgradeNetworks(config_data)
+ UpgradeCluster(config_data)
+ UpgradeGroups(config_data)
+ UpgradeInstances(config_data)
+ UpgradeNodeIndices(config_data)
+ UpgradeInstanceIndices(config_data)
+
+
+def DowngradeInstances(config_data):
+ if "instances" not in config_data:
+ raise Error("Cannot find the 'instances' key in the configuration!")
+ for (iname, iobj) in config_data["instances"].items():
+ DowngradeNicParamsVLAN(iobj["nics"], iname)
+
+
+def DowngradeNicParamsVLAN(nics, owner):
+ for nic in nics:
+ vlan = nic["nicparams"].get("vlan", None)
+ if vlan:
+ logging.warning("Instance with name %s found. Removing VLAN information"
+ " %s.", owner, vlan)
+ del nic["nicparams"]["vlan"]
+
+
+def DowngradeAll(config_data):
+ # Any code specific to a particular version should be labeled that way, so
+ # it can be removed when updating to the next version.
+ config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
+ DOWNGRADE_MINOR, 0)
+ DowngradeInstances(config_data)
- # Add bridge settings
- if 'default_bridge' not in cfg['cluster']:
- cfg['cluster']['default_bridge'] = 'xen-br0'
- for inst in cfg['instances'].values():
- for nic in inst['nics']:
- if 'bridge' not in nic:
- nic['bridge'] = None
- cfg['cluster']['config_version'] = 3
+def main():
+ """Main program.
+ """
+ global options, args # pylint: disable=W0603
-# Main program
-if __name__ == "__main__":
# Option parsing
- parser = optparse.OptionParser()
- parser.add_option('--dry-run', dest='dry_run',
- action="store_true",
- help="Try to do the conversion, but don't write "
- "output file")
- parser.add_option('--verbose', dest='verbose',
+ parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
+ parser.add_option("--dry-run", dest="dry_run",
action="store_true",
- help="Verbose output")
+ help="Try to do the conversion, but don't write"
+ " output file")
+ parser.add_option(cli.FORCE_OPT)
+ parser.add_option(cli.DEBUG_OPT)
+ parser.add_option(cli.VERBOSE_OPT)
+ parser.add_option("--ignore-hostname", dest="ignore_hostname",
+ action="store_true", default=False,
+ help="Don't abort if hostname doesn't match")
+ parser.add_option("--path", help="Convert configuration in this"
+ " directory instead of '%s'" % pathutils.DATA_DIR,
+ default=pathutils.DATA_DIR, dest="data_dir")
+ parser.add_option("--confdir",
+ help=("Use this directory instead of '%s'" %
+ pathutils.CONF_DIR),
+ default=pathutils.CONF_DIR, dest="conf_dir")
+ parser.add_option("--no-verify",
+ help="Do not verify configuration after upgrade",
+ action="store_true", dest="no_verify", default=False)
+ parser.add_option("--downgrade",
+ help="Downgrade to the previous stable version",
+ action="store_true", dest="downgrade", default=False)
(options, args) = parser.parse_args()
+ # We need to keep filenames locally because they might be renamed between
+ # versions.
+ options.data_dir = os.path.abspath(options.data_dir)
+ options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
+ options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
+ options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
+ options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
+ options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
+ options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
+ options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
+ options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
+ options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
+ options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
+ options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
+ options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
+ options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
+
+ SetupLogging()
+
# Option checking
if args:
- cfg_file = args[0]
- else:
- raise Error("Configuration file not specified")
+ raise Error("No arguments expected")
+ if options.downgrade and not options.no_verify:
+ options.no_verify = True
+
+ # Check master name
+ if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
+ logging.error("Aborting due to hostname mismatch")
+ sys.exit(constants.EXIT_FAILURE)
+
+ if not options.force:
+ if options.downgrade:
+ usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
+ " Some configuration data might be removed if they don't fit"
+ " in the old format. Please make sure you have read the"
+ " upgrade notes (available in the UPGRADE file and included"
+ " in other documentation formats) to understand what they"
+ " are. Continue with *DOWNGRADING* the configuration?" %
+ (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
+ else:
+ usertext = ("Please make sure you have read the upgrade notes for"
+ " Ganeti %s (available in the UPGRADE file and included"
+ " in other documentation formats). Continue with upgrading"
+ " configuration?" % constants.RELEASE_VERSION)
+ if not cli.AskUser(usertext):
+ sys.exit(constants.EXIT_FAILURE)
+
+ # Check whether it's a Ganeti configuration directory
+ if not (os.path.isfile(options.CONFIG_DATA_PATH) and
+ os.path.isfile(options.SERVER_PEM_PATH) and
+ os.path.isfile(options.KNOWN_HOSTS_PATH)):
+ raise Error(("%s does not seem to be a Ganeti configuration"
+ " directory") % options.data_dir)
+
+ if not os.path.isdir(options.conf_dir):
+ raise Error("Not a directory: %s" % options.conf_dir)
+
+ config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
- config = ReadConfig(cfg_file)
+ try:
+ config_version = config_data["version"]
+ except KeyError:
+ raise Error("Unable to determine configuration version")
+
+ (config_major, config_minor, config_revision) = \
+ version.SplitVersion(config_version)
+
+ logging.info("Found configuration version %s (%d.%d.%d)",
+ config_version, config_major, config_minor, config_revision)
+
+ if "config_version" in config_data["cluster"]:
+ raise Error("Inconsistent configuration: found config_version in"
+ " configuration file")
+
+ # Downgrade to the previous stable version
+ if options.downgrade:
+ if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
+ (config_major == DOWNGRADE_MAJOR and
+ config_minor == DOWNGRADE_MINOR)):
+ raise Error("Downgrade supported only from the latest version (%s.%s),"
+ " found %s (%s.%s.%s) instead" %
+ (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
+ config_minor, config_revision))
+ DowngradeAll(config_data)
+
+ # Upgrade from 2.{0..9} to 2.10
+ elif config_major == 2 and config_minor in range(0, 10):
+ if config_revision != 0:
+ logging.warning("Config revision is %s, not 0", config_revision)
+ UpgradeAll(config_data)
+
+ elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
+ logging.info("No changes necessary")
- UpdateFromVersion2To3(config)
+ else:
+ raise Error("Configuration version %d.%d.%d not supported by this tool" %
+ (config_major, config_minor, config_revision))
- if options.verbose:
- import pprint
- pprint.pprint(config)
+ try:
+ logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
+ utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
+ data=serializer.DumpJson(config_data),
+ mode=0600,
+ dry_run=options.dry_run,
+ backup=True)
+
+ if not options.dry_run:
+ bootstrap.GenerateClusterCrypto(
+ False, False, False, False, False,
+ nodecert_file=options.SERVER_PEM_PATH,
+ rapicert_file=options.RAPI_CERT_FILE,
+ spicecert_file=options.SPICE_CERT_FILE,
+ spicecacert_file=options.SPICE_CACERT_FILE,
+ hmackey_file=options.CONFD_HMAC_KEY,
+ cds_file=options.CDS_FILE)
+
+ except Exception:
+ logging.critical("Writing configuration failed. It is probably in an"
+ " inconsistent state and needs manual intervention.")
+ raise
+
+ # test loading the config file
+ all_ok = True
+ if not (options.dry_run or options.no_verify):
+ logging.info("Testing the new config file...")
+ cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
+ accept_foreign=options.ignore_hostname,
+ offline=True)
+ # if we reached this, it's all fine
+ vrfy = cfg.VerifyConfig()
+ if vrfy:
+ logging.error("Errors after conversion:")
+ for item in vrfy:
+ logging.error(" - %s", item)
+ all_ok = False
+ else:
+ logging.info("File loaded successfully after upgrading")
+ del cfg
+
+ if options.downgrade:
+ action = "downgraded"
+ out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
+ else:
+ action = "upgraded"
+ out_ver = constants.RELEASE_VERSION
+ if all_ok:
+ cli.ToStderr("Configuration successfully %s to version %s.",
+ action, out_ver)
+ else:
+ cli.ToStderr("Configuration %s to version %s, but there are errors."
+ "\nPlease review the file.", action, out_ver)
- WriteConfig(cfg_file, config, options.dry_run)
+
+if __name__ == "__main__":
+ main()