Prepare version numbers for 2.10 release cycle
[ganeti-local] / tools / cfgupgrade
index a2bdc26..1135103 100755 (executable)
@@ -44,6 +44,8 @@ from ganeti import config
 from ganeti import netutils
 from ganeti import pathutils
 
+from ganeti.utils import version
+
 
 options = None
 args = None
@@ -52,11 +54,17 @@ args = None
 #: Target major version we will upgrade to
 TARGET_MAJOR = 2
 #: Target minor version we will upgrade to
-TARGET_MINOR = 7
+TARGET_MINOR = 10
 #: Target major version for downgrade
 DOWNGRADE_MAJOR = 2
 #: Target minor version for downgrade
-DOWNGRADE_MINOR = 7
+DOWNGRADE_MINOR = 9
+
+# map of legacy device types
+# (mapping differing old LD_* constants to new DT_* constants)
+DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
+# (mapping differing new DT_* constants to old LD_* constants)
+DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
 
 
 class Error(Exception):
@@ -155,12 +163,58 @@ def UpgradeGroups(config_data):
       UpgradeIPolicy(ipolicy, cl_ipolicy, True)
 
 
+def GetExclusiveStorageValue(config_data):
+  """Return a conservative value of the exclusive_storage flag.
+
+  Return C{True} if the cluster or at least a nodegroup have the flag set.
+
+  """
+  ret = False
+  cluster = config_data["cluster"]
+  ndparams = cluster.get("ndparams")
+  if ndparams is not None and ndparams.get("exclusive_storage"):
+    ret = True
+  for group in config_data["nodegroups"].values():
+    ndparams = group.get("ndparams")
+    if ndparams is not None and ndparams.get("exclusive_storage"):
+      ret = True
+  return ret
+
+
+def RemovePhysicalId(disk):
+  if "children" in disk:
+    for d in disk["children"]:
+      RemovePhysicalId(d)
+  if "physical_id" in disk:
+    del disk["physical_id"]
+
+
+def ChangeDiskDevType(disk, dev_type_map):
+  """Replaces disk's dev_type attributes according to the given map.
+
+  This can be used for both, up or downgrading the disks.
+  """
+  if disk["dev_type"] in dev_type_map:
+    disk["dev_type"] = dev_type_map[disk["dev_type"]]
+  if "children" in disk:
+    for child in disk["children"]:
+      ChangeDiskDevType(child, dev_type_map)
+
+
+def UpgradeDiskDevType(disk):
+  """Upgrades the disks' device type."""
+  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
+
+
 def UpgradeInstances(config_data):
+  """Upgrades the instances' configuration."""
+
   network2uuid = dict((n["name"], n["uuid"])
                       for n in config_data["networks"].values())
   if "instances" not in config_data:
     raise Error("Can't find the 'instances' key in the configuration!")
 
+  missing_spindles = False
   for instance, iobj in config_data["instances"].items():
     for nic in iobj["nics"]:
       name = nic.get("network", None)
@@ -175,6 +229,8 @@ def UpgradeInstances(config_data):
       raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
     disks = iobj["disks"]
     for idx, dobj in enumerate(disks):
+      RemovePhysicalId(dobj)
+
       expected = "disk/%s" % idx
       current = dobj.get("iv_name", "")
       if current != expected:
@@ -183,6 +239,21 @@ def UpgradeInstances(config_data):
                         instance, idx, current, expected)
         dobj["iv_name"] = expected
 
+      if "dev_type" in dobj:
+        UpgradeDiskDevType(dobj)
+
+      if not "spindles" in dobj:
+        missing_spindles = True
+
+  if GetExclusiveStorageValue(config_data) and missing_spindles:
+    # We cannot be sure that the instances that are missing spindles have
+    # exclusive storage enabled (the check would be more complicated), so we
+    # give a noncommittal message
+    logging.warning("Some instance disks could be needing to update the"
+                    " spindles parameter; you can check by running"
+                    " 'gnt-cluster verify', and fix any problem with"
+                    " 'gnt-cluster repair-disk-sizes'")
+
 
 def UpgradeRapiUsers():
   if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
@@ -248,9 +319,70 @@ def UpgradeFileStoragePaths(config_data):
                     backup=True)
 
 
+def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+  if old_key not in nodes_by_old_key:
+    logging.warning("Can't find node '%s' in configuration, assuming that it's"
+                    " already up-to-date", old_key)
+    return old_key
+  return nodes_by_old_key[old_key][new_key_field]
+
+
+def ChangeNodeIndices(config_data, old_key_field, new_key_field):
+  def ChangeDiskNodeIndices(disk):
+    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
+    # considered when up/downgrading from/to any versions touching 2.9 on the
+    # way.
+    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
+    if disk["dev_type"] in drbd_disk_types:
+      for i in range(0, 2):
+        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
+                                                disk["logical_id"][i],
+                                                new_key_field)
+    if "children" in disk:
+      for child in disk["children"]:
+        ChangeDiskNodeIndices(child)
+
+  nodes_by_old_key = {}
+  nodes_by_new_key = {}
+  for (_, node) in config_data["nodes"].items():
+    nodes_by_old_key[node[old_key_field]] = node
+    nodes_by_new_key[node[new_key_field]] = node
+
+  config_data["nodes"] = nodes_by_new_key
+
+  cluster = config_data["cluster"]
+  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
+                                           cluster["master_node"],
+                                           new_key_field)
+
+  for inst in config_data["instances"].values():
+    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
+                                           inst["primary_node"],
+                                           new_key_field)
+    for disk in inst["disks"]:
+      ChangeDiskNodeIndices(disk)
+
+
+def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
+  insts_by_old_key = {}
+  insts_by_new_key = {}
+  for (_, inst) in config_data["instances"].items():
+    insts_by_old_key[inst[old_key_field]] = inst
+    insts_by_new_key[inst[new_key_field]] = inst
+
+  config_data["instances"] = insts_by_new_key
+
+
+def UpgradeNodeIndices(config_data):
+  ChangeNodeIndices(config_data, "name", "uuid")
+
+
+def UpgradeInstanceIndices(config_data):
+  ChangeInstanceIndices(config_data, "name", "uuid")
+
+
 def UpgradeAll(config_data):
-  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
-                                                  TARGET_MINOR, 0)
+  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
   UpgradeRapiUsers()
   UpgradeWatcher()
   UpgradeFileStoragePaths(config_data)
@@ -258,30 +390,31 @@ def UpgradeAll(config_data):
   UpgradeCluster(config_data)
   UpgradeGroups(config_data)
   UpgradeInstances(config_data)
-
-
-def DowngradeDisks(disks, owner):
-  for disk in disks:
-    # Remove spindles to downgrade to 2.8
-    if "spindles" in disk:
-      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
-                      " instance %s",
-                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
-      del disk["spindles"]
+  UpgradeNodeIndices(config_data)
+  UpgradeInstanceIndices(config_data)
 
 
 def DowngradeInstances(config_data):
   if "instances" not in config_data:
     raise Error("Cannot find the 'instances' key in the configuration!")
   for (iname, iobj) in config_data["instances"].items():
-    if "disks" not in iobj:
-      raise Error("Cannot find 'disks' key for instance %s" % iname)
-    DowngradeDisks(iobj["disks"], iname)
+    DowngradeNicParamsVLAN(iobj["nics"], iname)
+
+
+def DowngradeNicParamsVLAN(nics, owner):
+  for nic in nics:
+    vlan = nic["nicparams"].get("vlan", None)
+    if vlan:
+      logging.warning("Instance with name %s found. Removing VLAN information"
+                      " %s.", owner, vlan)
+      del nic["nicparams"]["vlan"]
 
 
 def DowngradeAll(config_data):
   # Any code specific to a particular version should be labeled that way, so
   # it can be removed when updating to the next version.
+  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
+                                                DOWNGRADE_MINOR, 0)
   DowngradeInstances(config_data)
 
 
@@ -383,7 +516,7 @@ def main():
     raise Error("Unable to determine configuration version")
 
   (config_major, config_minor, config_revision) = \
-    constants.SplitVersion(config_version)
+    version.SplitVersion(config_version)
 
   logging.info("Found configuration version %s (%d.%d.%d)",
                config_version, config_major, config_minor, config_revision)
@@ -394,15 +527,17 @@ def main():
 
   # Downgrade to the previous stable version
   if options.downgrade:
-    if config_major != TARGET_MAJOR or config_minor != TARGET_MINOR:
+    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
+            (config_major == DOWNGRADE_MAJOR and
+             config_minor == DOWNGRADE_MINOR)):
       raise Error("Downgrade supported only from the latest version (%s.%s),"
                   " found %s (%s.%s.%s) instead" %
                   (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
                    config_minor, config_revision))
     DowngradeAll(config_data)
 
-  # Upgrade from 2.{0..7} to 2.7
-  elif config_major == 2 and config_minor in range(0, 8):
+  # Upgrade from 2.{0..9} to 2.10
+  elif config_major == 2 and config_minor in range(0, 10):
     if config_revision != 0:
       logging.warning("Config revision is %s, not 0", config_revision)
     UpgradeAll(config_data)