UpgradeIPolicy(ipolicy, cl_ipolicy, True)
+def GetExclusiveStorageValue(config_data):
+ """Return a conservative value of the exclusive_storage flag.
+
+ Return C{True} if the cluster or at least a nodegroup have the flag set.
+
+ """
+ ret = False
+ cluster = config_data["cluster"]
+ ndparams = cluster.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ for group in config_data["nodegroups"].values():
+ ndparams = group.get("ndparams")
+ if ndparams is not None and ndparams.get("exclusive_storage"):
+ ret = True
+ return ret
+
+
def UpgradeInstances(config_data):
network2uuid = dict((n["name"], n["uuid"])
for n in config_data["networks"].values())
if "instances" not in config_data:
raise Error("Can't find the 'instances' key in the configuration!")
+ missing_spindles = False
for instance, iobj in config_data["instances"].items():
for nic in iobj["nics"]:
name = nic.get("network", None)
" from '%s' to '%s'",
instance, idx, current, expected)
dobj["iv_name"] = expected
+ if not "spindles" in dobj:
+ missing_spindles = True
+
+ if GetExclusiveStorageValue(config_data) and missing_spindles:
+ # We cannot be sure that the instances that are missing spindles have
+ # exclusive storage enabled (the check would be more complicated), so we
+ # give a noncommittal message
+ logging.warning("Some instance disks could be needing to update the"
+ " spindles parameter; you can check by running"
+ " 'gnt-cluster verify', and fix any problem with"
+ " 'gnt-cluster repair-disk-sizes'")
def UpgradeRapiUsers():
backup=True)
+def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+ if old_key not in nodes_by_old_key:
+ logging.warning("Can't find node '%s' in configuration, assuming that it's"
+ " already up-to-date", old_key)
+ return old_key
+ return nodes_by_old_key[old_key][new_key_field]
+
+
+def ChangeNodeIndices(config_data, old_key_field, new_key_field):
+ def ChangeDiskNodeIndices(disk):
+ if disk["dev_type"] in constants.LDS_DRBD:
+ for i in range(0, 2):
+ disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
+ disk["logical_id"][i],
+ new_key_field)
+ if "children" in disk:
+ for child in disk["children"]:
+ ChangeDiskNodeIndices(child)
+
+ nodes_by_old_key = {}
+ nodes_by_new_key = {}
+ for (_, node) in config_data["nodes"].items():
+ nodes_by_old_key[node[old_key_field]] = node
+ nodes_by_new_key[node[new_key_field]] = node
+
+ config_data["nodes"] = nodes_by_new_key
+
+ cluster = config_data["cluster"]
+ cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
+ cluster["master_node"],
+ new_key_field)
+
+ for inst in config_data["instances"].values():
+ inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
+ inst["primary_node"],
+ new_key_field)
+ for disk in inst["disks"]:
+ ChangeDiskNodeIndices(disk)
+
+
+def UpgradeNodeIndices(config_data):
+ ChangeNodeIndices(config_data, "name", "uuid")
+
+
def UpgradeAll(config_data):
config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
TARGET_MINOR, 0)
UpgradeCluster(config_data)
UpgradeGroups(config_data)
UpgradeInstances(config_data)
+ UpgradeNodeIndices(config_data)
-def DowngradeIPolicy(ipolicy, owner):
- # Downgrade IPolicy to 2.7 (stable)
- minmax_keys = ["min", "max"]
- specs_is_split = any((k in ipolicy) for k in minmax_keys)
- if not specs_is_split:
- if "minmax" in ipolicy:
- if type(ipolicy["minmax"]) is not list:
- raise Error("Invalid minmax type in %s ipolicy: %s" %
- (owner, type(ipolicy["minmax"])))
- if len(ipolicy["minmax"]) > 1:
- logging.warning("Discarding some limit specs values from %s policy",
- owner)
- minmax = ipolicy["minmax"][0]
- del ipolicy["minmax"]
- else:
- minmax = {}
- for key in minmax_keys:
- spec = minmax.get(key, {})
- ipolicy[key] = spec
- if "std" not in ipolicy:
- ipolicy["std"] = {}
-
-
-def DowngradeGroups(config_data):
- for group in config_data["nodegroups"].values():
- ipolicy = group.get("ipolicy", None)
- if ipolicy is not None:
- DowngradeIPolicy(ipolicy, "group \"%s\"" % group.get("name"))
-
-
-def DowngradeEnabledTemplates(cluster):
- # Remove enabled disk templates to downgrade to 2.7
- edt_key = "enabled_disk_templates"
- if edt_key in cluster:
- logging.warning("Removing cluster's enabled disk templates; value = %s",
- utils.CommaJoin(cluster[edt_key]))
- del cluster[edt_key]
-
-
-def DowngradeCluster(config_data):
- cluster = config_data.get("cluster", None)
- if cluster is None:
- raise Error("Cannot find cluster")
- DowngradeEnabledTemplates(cluster)
- ipolicy = cluster.get("ipolicy", None)
- if ipolicy:
- DowngradeIPolicy(ipolicy, "cluster")
+def DowngradeDisks(disks, owner):
+ for disk in disks:
+ # Remove spindles to downgrade to 2.8
+ if "spindles" in disk:
+ logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
+ " instance %s",
+ disk["spindles"], disk["iv_name"], disk["uuid"], owner)
+ del disk["spindles"]
def DowngradeInstances(config_data):
if "instances" not in config_data:
- raise Error("Can't find the 'instances' key in the configuration!")
+ raise Error("Cannot find the 'instances' key in the configuration!")
+ for (iname, iobj) in config_data["instances"].items():
+ if "disks" not in iobj:
+ raise Error("Cannot find 'disks' key for instance %s" % iname)
+ DowngradeDisks(iobj["disks"], iname)
+
- for _, iobj in config_data["instances"].items():
- if "disks_active" in iobj:
- del iobj["disks_active"]
+def DowngradeNodeIndices(config_data):
+ ChangeNodeIndices(config_data, "uuid", "name")
def DowngradeAll(config_data):
# it can be removed when updating to the next version.
config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
DOWNGRADE_MINOR, 0)
- DowngradeCluster(config_data)
- DowngradeGroups(config_data)
DowngradeInstances(config_data)
+ DowngradeNodeIndices(config_data)
def main():