#: Target major version we will upgrade to
TARGET_MAJOR = 2
#: Target minor version we will upgrade to
-TARGET_MINOR = 7
+TARGET_MINOR = 8
+#: Target major version for downgrade
+DOWNGRADE_MAJOR = 2
+#: Target minor version for downgrade
+DOWNGRADE_MINOR = 7
class Error(Exception):
return False
+def _FillIPolicySpecs(default_ipolicy, ipolicy):
+ if "minmax" in ipolicy:
+ for (key, spec) in ipolicy["minmax"][0].items():
+ for (par, val) in default_ipolicy["minmax"][0][key].items():
+ if par not in spec:
+ spec[par] = val
+
+
+def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
+ minmax_keys = ["min", "max"]
+ if any((k in ipolicy) for k in minmax_keys):
+ minmax = {}
+ for key in minmax_keys:
+ if key in ipolicy:
+ if ipolicy[key]:
+ minmax[key] = ipolicy[key]
+ del ipolicy[key]
+ if minmax:
+ ipolicy["minmax"] = [minmax]
+ if isgroup and "std" in ipolicy:
+ del ipolicy["std"]
+ _FillIPolicySpecs(default_ipolicy, ipolicy)
+
+
def UpgradeNetworks(config_data):
networks = config_data.get("networks", None)
if not networks:
config_data["networks"] = {}
+def UpgradeCluster(config_data):
+ cluster = config_data.get("cluster", None)
+ if cluster is None:
+ raise Error("Cannot find cluster")
+ ipolicy = cluster.setdefault("ipolicy", None)
+ if ipolicy:
+ UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
+
+
def UpgradeGroups(config_data):
+ cl_ipolicy = config_data["cluster"].get("ipolicy")
for group in config_data["nodegroups"].values():
networks = group.get("networks", None)
if not networks:
group["networks"] = {}
+ ipolicy = group.get("ipolicy", None)
+ if ipolicy:
+ if cl_ipolicy is None:
+ raise Error("A group defines an instance policy but there is no"
+ " instance policy at cluster level")
+ UpgradeIPolicy(ipolicy, cl_ipolicy, True)
+
+
+def GetExclusiveStorageValue(config_data):
+ """Return a conservative value of the exclusive_storage flag.
+
+ Return C{True} if the cluster or at least a nodegroup have the flag set.
+
+ """
+ ret = False
+ cluster = config_data["cluster"]
+ ndparams = cluster.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ for group in config_data["nodegroups"].values():
+ ndparams = group.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ return ret
def UpgradeInstances(config_data):
if "instances" not in config_data:
raise Error("Can't find the 'instances' key in the configuration!")
+ missing_spindles = False
for instance, iobj in config_data["instances"].items():
for nic in iobj["nics"]:
name = nic.get("network", None)
" from '%s' to '%s'",
instance, idx, current, expected)
dobj["iv_name"] = expected
+ if not "spindles" in dobj:
+ missing_spindles = True
+
+ if GetExclusiveStorageValue(config_data) and missing_spindles:
+ # We cannot be sure that the instances that are missing spindles have
+ # exclusive storage enabled (the check would be more complicated), so we
+ # give a noncommittal message
+ logging.warning("Some instance disks could be needing to update the"
+ " spindles parameter; you can check by running"
+ " 'gnt-cluster verify', and fix any problem with"
+ " 'gnt-cluster repair-disk-sizes'")
def UpgradeRapiUsers():
backup=True)
+def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+ if old_key not in nodes_by_old_key:
+ logging.warning("Can't find node '%s' in configuration, assuming that it's"
+ " already up-to-date", old_key)
+ return old_key
+ return nodes_by_old_key[old_key][new_key_field]
+
+
+def ChangeNodeIndices(config_data, old_key_field, new_key_field):
+ def ChangeDiskNodeIndices(disk):
+ if disk["dev_type"] in constants.LDS_DRBD:
+ for i in range(0, 2):
+ disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
+ disk["logical_id"][i],
+ new_key_field)
+ if "children" in disk:
+ for child in disk["children"]:
+ ChangeDiskNodeIndices(child)
+
+ nodes_by_old_key = {}
+ nodes_by_new_key = {}
+ for (_, node) in config_data["nodes"].items():
+ nodes_by_old_key[node[old_key_field]] = node
+ nodes_by_new_key[node[new_key_field]] = node
+
+ config_data["nodes"] = nodes_by_new_key
+
+ cluster = config_data["cluster"]
+ cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
+ cluster["master_node"],
+ new_key_field)
+
+ for inst in config_data["instances"].values():
+ inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
+ inst["primary_node"],
+ new_key_field)
+ for disk in inst["disks"]:
+ ChangeDiskNodeIndices(disk)
+
+
+def UpgradeNodeIndices(config_data):
+ ChangeNodeIndices(config_data, "name", "uuid")
+
+
def UpgradeAll(config_data):
config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
TARGET_MINOR, 0)
UpgradeWatcher()
UpgradeFileStoragePaths(config_data)
UpgradeNetworks(config_data)
+ UpgradeCluster(config_data)
UpgradeGroups(config_data)
UpgradeInstances(config_data)
+ UpgradeNodeIndices(config_data)
+
+
+def DowngradeDisks(disks, owner):
+ for disk in disks:
+ # Remove spindles to downgrade to 2.8
+ if "spindles" in disk:
+ logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
+ " instance %s",
+ disk["spindles"], disk["iv_name"], disk["uuid"], owner)
+ del disk["spindles"]
+
+
+def DowngradeInstances(config_data):
+ if "instances" not in config_data:
+ raise Error("Cannot find the 'instances' key in the configuration!")
+ for (iname, iobj) in config_data["instances"].items():
+ if "disks" not in iobj:
+ raise Error("Cannot find 'disks' key for instance %s" % iname)
+ DowngradeDisks(iobj["disks"], iname)
+
+
+def DowngradeNodeIndices(config_data):
+ ChangeNodeIndices(config_data, "uuid", "name")
+
+
+def DowngradeAll(config_data):
+ # Any code specific to a particular version should be labeled that way, so
+ # it can be removed when updating to the next version.
+ config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
+ DOWNGRADE_MINOR, 0)
+ DowngradeInstances(config_data)
+ DowngradeNodeIndices(config_data)
def main():
parser.add_option("--no-verify",
help="Do not verify configuration after upgrade",
action="store_true", dest="no_verify", default=False)
+ parser.add_option("--downgrade",
+ help="Downgrade to the previous stable version",
+ action="store_true", dest="downgrade", default=False)
(options, args) = parser.parse_args()
# We need to keep filenames locally because they might be renamed between
# Option checking
if args:
raise Error("No arguments expected")
+ if options.downgrade and not options.no_verify:
+ options.no_verify = True
# Check master name
if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
sys.exit(constants.EXIT_FAILURE)
if not options.force:
- usertext = ("Please make sure you have read the upgrade notes for"
- " Ganeti %s (available in the UPGRADE file and included"
- " in other documentation formats). Continue with upgrading"
- " configuration?" % constants.RELEASE_VERSION)
+ if options.downgrade:
+ usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
+ " Some configuration data might be removed if they don't fit"
+ " in the old format. Please make sure you have read the"
+ " upgrade notes (available in the UPGRADE file and included"
+ " in other documentation formats) to understand what they"
+ " are. Continue with *DOWNGRADING* the configuration?" %
+ (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
+ else:
+ usertext = ("Please make sure you have read the upgrade notes for"
+ " Ganeti %s (available in the UPGRADE file and included"
+ " in other documentation formats). Continue with upgrading"
+ " configuration?" % constants.RELEASE_VERSION)
if not cli.AskUser(usertext):
sys.exit(constants.EXIT_FAILURE)
raise Error("Inconsistent configuration: found config_version in"
" configuration file")
- # Upgrade from 2.{0..6} to 2.7
- if config_major == 2 and config_minor in (0, 1, 2, 3, 4, 5, 6):
+ # Downgrade to the previous stable version
+ if options.downgrade:
+ if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
+ (config_major == DOWNGRADE_MAJOR and
+ config_minor == DOWNGRADE_MINOR)):
+ raise Error("Downgrade supported only from the latest version (%s.%s),"
+ " found %s (%s.%s.%s) instead" %
+ (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
+ config_minor, config_revision))
+ DowngradeAll(config_data)
+
+ # Upgrade from 2.{0..7} to 2.8
+ elif config_major == 2 and config_minor in range(0, 9):
if config_revision != 0:
logging.warning("Config revision is %s, not 0", config_revision)
UpgradeAll(config_data)
logging.info("File loaded successfully after upgrading")
del cfg
+ if options.downgrade:
+ action = "downgraded"
+ out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
+ else:
+ action = "upgraded"
+ out_ver = constants.RELEASE_VERSION
if all_ok:
- cli.ToStderr("Configuration successfully upgraded to version %s.",
- constants.RELEASE_VERSION)
+ cli.ToStderr("Configuration successfully %s to version %s.",
+ action, out_ver)
else:
- cli.ToStderr("Configuration upgraded to version %s, but there are errors."
- "\nPlease review the file.", constants.RELEASE_VERSION)
+ cli.ToStderr("Configuration %s to version %s, but there are errors."
+ "\nPlease review the file.", action, out_ver)
if __name__ == "__main__":