Merge branch 'stable-2.8' into 'master'
[ganeti-local] / tools / cfgupgrade
index e7a2a15..e239bd1 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/python
 #
 
-# Copyright (C) 2007 Google Inc.
+# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -21,9 +21,8 @@
 
 """Tool to upgrade the configuration file.
 
-This code handles only the types supported by simplejson. As an example, "set"
-is a "list". Old Pickle based configurations files are converted to JSON during
-the process.
+This code handles only the types supported by simplejson. As an
+example, 'set' is a 'list'.
 
 """
 
@@ -31,187 +30,528 @@ the process.
 import os
 import os.path
 import sys
-import re
 import optparse
-import tempfile
-import simplejson
+import logging
+import time
+from cStringIO import StringIO
 
+from ganeti import constants
+from ganeti import serializer
 from ganeti import utils
-from ganeti.cli import AskUser, FORCE_OPT
+from ganeti import cli
+from ganeti import bootstrap
+from ganeti import config
+from ganeti import netutils
+from ganeti import pathutils
 
 
 options = None
 args = None
 
 
+#: Target major version we will upgrade to
+TARGET_MAJOR = 2
+#: Target minor version we will upgrade to
+TARGET_MINOR = 8
+#: Target major version for downgrade
+DOWNGRADE_MAJOR = 2
+#: Target minor version for downgrade
+DOWNGRADE_MINOR = 7
+
+
 class Error(Exception):
   """Generic exception"""
   pass
 
 
-# {{{ Support for old Pickle files
-class UpgradeDict(dict):
-  """Base class for internal config classes.
-
-  """
-  def __setstate__(self, state):
-    self.update(state)
-
-  def __getstate__(self):
-    return self.copy()
-
-
-def FindGlobal(module, name):
-  """Wraps Ganeti config classes to internal ones.
-
-  This function may only return types supported by simplejson.
-
-  """
-  if module == "ganeti.objects":
-    return UpgradeDict
-  elif module == "__builtin__" and name == "set":
-    return list
-
-  return getattr(sys.modules[module], name)
-
-
-def ReadPickleFile(f):
-  """Reads an old Pickle configuration.
-
-  """
-  import cPickle
-
-  loader = cPickle.Unpickler(f)
-  loader.find_global = FindGlobal
-  return loader.load()
-
-
-def IsPickleFile(f):
-  """Checks whether a file is using the Pickle format.
+def SetupLogging():
+  """Configures the logging module.
 
   """
-  magic = f.read(128)
-  try:
-    return not re.match('^\s*\{', magic)
-  finally:
-    f.seek(-len(magic), 1)
-# }}}
-
+  formatter = logging.Formatter("%(asctime)s: %(message)s")
+
+  stderr_handler = logging.StreamHandler()
+  stderr_handler.setFormatter(formatter)
+  if options.debug:
+    stderr_handler.setLevel(logging.NOTSET)
+  elif options.verbose:
+    stderr_handler.setLevel(logging.INFO)
+  else:
+    stderr_handler.setLevel(logging.WARNING)
 
-def ReadJsonFile(f):
-  """Reads a JSON file.
+  root_logger = logging.getLogger("")
+  root_logger.setLevel(logging.NOTSET)
+  root_logger.addHandler(stderr_handler)
 
-  """
-  return simplejson.load(f)
 
+def CheckHostname(path):
+  """Ensures hostname matches ssconf value.
 
-def ReadConfig(path):
-  """Reads configuration file.
+  @param path: Path to ssconf file
 
   """
-  f = open(path, 'r')
-  try:
-    if IsPickleFile(f):
-      return ReadPickleFile(f)
-    else:
-      return ReadJsonFile(f)
-  finally:
-    f.close()
-
-
-def WriteConfig(path, data):
-  """Writes the configuration file.
+  ssconf_master_node = utils.ReadOneLineFile(path)
+  hostname = netutils.GetHostname().name
+
+  if ssconf_master_node == hostname:
+    return True
+
+  logging.warning("Warning: ssconf says master node is '%s', but this"
+                  " machine's name is '%s'; this tool must be run on"
+                  " the master node", ssconf_master_node, hostname)
+  return False
+
+
+def _FillIPolicySpecs(default_ipolicy, ipolicy):
+  if "minmax" in ipolicy:
+    for (key, spec) in ipolicy["minmax"][0].items():
+      for (par, val) in default_ipolicy["minmax"][0][key].items():
+        if par not in spec:
+          spec[par] = val
+
+
+def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
+  minmax_keys = ["min", "max"]
+  if any((k in ipolicy) for k in minmax_keys):
+    minmax = {}
+    for key in minmax_keys:
+      if key in ipolicy:
+        if ipolicy[key]:
+          minmax[key] = ipolicy[key]
+        del ipolicy[key]
+    if minmax:
+      ipolicy["minmax"] = [minmax]
+  if isgroup and "std" in ipolicy:
+    del ipolicy["std"]
+  _FillIPolicySpecs(default_ipolicy, ipolicy)
+
+
+def UpgradeNetworks(config_data):
+  networks = config_data.get("networks", None)
+  if not networks:
+    config_data["networks"] = {}
+
+
+def UpgradeCluster(config_data):
+  cluster = config_data.get("cluster", None)
+  if cluster is None:
+    raise Error("Cannot find cluster")
+  ipolicy = cluster.setdefault("ipolicy", None)
+  if ipolicy:
+    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
+
+
+def UpgradeGroups(config_data):
+  cl_ipolicy = config_data["cluster"].get("ipolicy")
+  for group in config_data["nodegroups"].values():
+    networks = group.get("networks", None)
+    if not networks:
+      group["networks"] = {}
+    ipolicy = group.get("ipolicy", None)
+    if ipolicy:
+      if cl_ipolicy is None:
+        raise Error("A group defines an instance policy but there is no"
+                    " instance policy at cluster level")
+      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
+
+
+def GetExclusiveStorageValue(config_data):
+  """Return a conservative value of the exclusive_storage flag.
+
+  Return C{True} if the cluster or at least a nodegroup have the flag set.
 
   """
-  if not options.dry_run:
-    utils.CreateBackup(path)
-
-  (fd, name) = tempfile.mkstemp(dir=os.path.dirname(path))
-  f = os.fdopen(fd, 'w')
-  try:
-    try:
-      simplejson.dump(data, f)
-      f.flush()
-      if options.dry_run:
-        os.unlink(name)
-      else:
-        os.rename(name, path)
-    except:
-      os.unlink(name)
-      raise
-  finally:
-    f.close()
-
-
-def UpdateFromVersion2To3(cfg):
-  """Updates the configuration from version 2 to 3.
+  ret = False
+  cluster = config_data["cluster"]
+  ndparams = cluster.get("ndparams")
+  if ndparams is not None and ndparams.get("exclusive_storage"):
+    ret = True
+  for group in config_data["nodegroups"].values():
+    ndparams = group.get("ndparams")
+    if ndparams is not None and ndparams.get("exclusive_storage"):
+      ret = True
+  return ret
+
+
+def UpgradeInstances(config_data):
+  network2uuid = dict((n["name"], n["uuid"])
+                      for n in config_data["networks"].values())
+  if "instances" not in config_data:
+    raise Error("Can't find the 'instances' key in the configuration!")
+
+  missing_spindles = False
+  for instance, iobj in config_data["instances"].items():
+    for nic in iobj["nics"]:
+      name = nic.get("network", None)
+      if name:
+        uuid = network2uuid.get(name, None)
+        if uuid:
+          print("NIC with network name %s found."
+                " Substituting with uuid %s." % (name, uuid))
+          nic["network"] = uuid
+
+    if "disks" not in iobj:
+      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
+    disks = iobj["disks"]
+    for idx, dobj in enumerate(disks):
+      expected = "disk/%s" % idx
+      current = dobj.get("iv_name", "")
+      if current != expected:
+        logging.warning("Updating iv_name for instance %s/disk %s"
+                        " from '%s' to '%s'",
+                        instance, idx, current, expected)
+        dobj["iv_name"] = expected
+      if not "spindles" in dobj:
+        missing_spindles = True
+
+  if GetExclusiveStorageValue(config_data) and missing_spindles:
+    # We cannot be sure that the instances that are missing spindles have
+    # exclusive storage enabled (the check would be more complicated), so we
+    # give a noncommittal message
+    logging.warning("Some instance disks could be needing to update the"
+                    " spindles parameter; you can check by running"
+                    " 'gnt-cluster verify', and fix any problem with"
+                    " 'gnt-cluster repair-disk-sizes'")
+
+
+def UpgradeRapiUsers():
+  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
+      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
+    if os.path.exists(options.RAPI_USERS_FILE):
+      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
+                  " already exists at %s" %
+                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
+    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
+                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
+    if not options.dry_run:
+      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
+                       mkdir=True, mkdir_mode=0750)
+
+  # Create a symlink for RAPI users file
+  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
+           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
+      os.path.isfile(options.RAPI_USERS_FILE)):
+    logging.info("Creating symlink from %s to %s",
+                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
+    if not options.dry_run:
+      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
+
+
+def UpgradeWatcher():
+  # Remove old watcher state file if it exists
+  if os.path.exists(options.WATCHER_STATEFILE):
+    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
+    if not options.dry_run:
+      utils.RemoveFile(options.WATCHER_STATEFILE)
+
+
+def UpgradeFileStoragePaths(config_data):
+  # Write file storage paths
+  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
+    cluster = config_data["cluster"]
+    file_storage_dir = cluster.get("file_storage_dir")
+    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
+    del cluster
+
+    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
+                 " for file storage; writing existing configuration values"
+                 " into '%s'",
+                 options.FILE_STORAGE_PATHS_FILE)
+
+    if file_storage_dir:
+      logging.info("File storage directory: %s", file_storage_dir)
+    if shared_file_storage_dir:
+      logging.info("Shared file storage directory: %s",
+                   shared_file_storage_dir)
+
+    buf = StringIO()
+    buf.write("# List automatically generated from configuration by\n")
+    buf.write("# cfgupgrade at %s\n" % time.asctime())
+    if file_storage_dir:
+      buf.write("%s\n" % file_storage_dir)
+    if shared_file_storage_dir:
+      buf.write("%s\n" % shared_file_storage_dir)
+    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
+                    data=buf.getvalue(),
+                    mode=0600,
+                    dry_run=options.dry_run,
+                    backup=True)
+
+
+def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+  if old_key not in nodes_by_old_key:
+    logging.warning("Can't find node '%s' in configuration, assuming that it's"
+                    " already up-to-date", old_key)
+    return old_key
+  return nodes_by_old_key[old_key][new_key_field]
+
+
+def ChangeNodeIndices(config_data, old_key_field, new_key_field):
+  def ChangeDiskNodeIndices(disk):
+    if disk["dev_type"] in constants.LDS_DRBD:
+      for i in range(0, 2):
+        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
+                                                disk["logical_id"][i],
+                                                new_key_field)
+    if "children" in disk:
+      for child in disk["children"]:
+        ChangeDiskNodeIndices(child)
+
+  nodes_by_old_key = {}
+  nodes_by_new_key = {}
+  for (_, node) in config_data["nodes"].items():
+    nodes_by_old_key[node[old_key_field]] = node
+    nodes_by_new_key[node[new_key_field]] = node
+
+  config_data["nodes"] = nodes_by_new_key
+
+  cluster = config_data["cluster"]
+  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
+                                           cluster["master_node"],
+                                           new_key_field)
+
+  for inst in config_data["instances"].values():
+    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
+                                           inst["primary_node"],
+                                           new_key_field)
+    for disk in inst["disks"]:
+      ChangeDiskNodeIndices(disk)
+
+
+def UpgradeNodeIndices(config_data):
+  ChangeNodeIndices(config_data, "name", "uuid")
+
+
+def UpgradeAll(config_data):
+  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
+                                                  TARGET_MINOR, 0)
+  UpgradeRapiUsers()
+  UpgradeWatcher()
+  UpgradeFileStoragePaths(config_data)
+  UpgradeNetworks(config_data)
+  UpgradeCluster(config_data)
+  UpgradeGroups(config_data)
+  UpgradeInstances(config_data)
+  UpgradeNodeIndices(config_data)
+
+
+def DowngradeDisks(disks, owner):
+  for disk in disks:
+    # Remove spindles to downgrade to 2.8
+    if "spindles" in disk:
+      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
+                      " instance %s",
+                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
+      del disk["spindles"]
+
+
+def DowngradeInstances(config_data):
+  if "instances" not in config_data:
+    raise Error("Cannot find the 'instances' key in the configuration!")
+  for (iname, iobj) in config_data["instances"].items():
+    if "disks" not in iobj:
+      raise Error("Cannot find 'disks' key for instance %s" % iname)
+    DowngradeDisks(iobj["disks"], iname)
+
+
+def DowngradeNodeIndices(config_data):
+  ChangeNodeIndices(config_data, "uuid", "name")
+
+
+def DowngradeAll(config_data):
+  # Any code specific to a particular version should be labeled that way, so
+  # it can be removed when updating to the next version.
+  config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
+                                                  DOWNGRADE_MINOR, 0)
+  DowngradeInstances(config_data)
+  DowngradeNodeIndices(config_data)
+
+
+def main():
+  """Main program.
 
   """
-  if cfg['cluster']['config_version'] != 2:
-    return
-
-  # Add port pool
-  if 'tcpudp_port_pool' not in cfg['cluster']:
-    cfg['cluster']['tcpudp_port_pool'] = []
-
-  # Add bridge settings
-  if 'default_bridge' not in cfg['cluster']:
-    cfg['cluster']['default_bridge'] = 'xen-br0'
-  for inst in cfg['instances'].values():
-    for nic in inst['nics']:
-      if 'bridge' not in nic:
-        nic['bridge'] = None
-
-  cfg['cluster']['config_version'] = 3
-
-
-# Main program
-if __name__ == "__main__":
-  program = os.path.basename(sys.argv[0])
+  global options, args # pylint: disable=W0603
 
   # Option parsing
-  parser = optparse.OptionParser()
-  parser.add_option('--dry-run', dest='dry_run',
+  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
+  parser.add_option("--dry-run", dest="dry_run",
                     action="store_true",
                     help="Try to do the conversion, but don't write"
                          " output file")
-  parser.add_option(FORCE_OPT)
-  parser.add_option('--verbose', dest='verbose',
-                    action="store_true",
-                    help="Verbose output")
+  parser.add_option(cli.FORCE_OPT)
+  parser.add_option(cli.DEBUG_OPT)
+  parser.add_option(cli.VERBOSE_OPT)
+  parser.add_option("--ignore-hostname", dest="ignore_hostname",
+                    action="store_true", default=False,
+                    help="Don't abort if hostname doesn't match")
+  parser.add_option("--path", help="Convert configuration in this"
+                    " directory instead of '%s'" % pathutils.DATA_DIR,
+                    default=pathutils.DATA_DIR, dest="data_dir")
+  parser.add_option("--confdir",
+                    help=("Use this directory instead of '%s'" %
+                          pathutils.CONF_DIR),
+                    default=pathutils.CONF_DIR, dest="conf_dir")
+  parser.add_option("--no-verify",
+                    help="Do not verify configuration after upgrade",
+                    action="store_true", dest="no_verify", default=False)
+  parser.add_option("--downgrade",
+                    help="Downgrade to the previous stable version",
+                    action="store_true", dest="downgrade", default=False)
   (options, args) = parser.parse_args()
 
+  # We need to keep filenames locally because they might be renamed between
+  # versions.
+  options.data_dir = os.path.abspath(options.data_dir)
+  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
+  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
+  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
+  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
+  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
+  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
+  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
+  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
+  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
+  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
+  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
+  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
+  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
+
+  SetupLogging()
+
   # Option checking
   if args:
-    cfg_file = args[0]
-  else:
-    raise Error("Configuration file not specified")
+    raise Error("No arguments expected")
+  if options.downgrade and not options.no_verify:
+    options.no_verify = True
+
+  # Check master name
+  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
+    logging.error("Aborting due to hostname mismatch")
+    sys.exit(constants.EXIT_FAILURE)
 
   if not options.force:
-    usertext = ("%s MUST run on the master node. Is this the master"
-                " node?" % program)
-    if not AskUser(usertext):
-      sys.exit(1)
+    if options.downgrade:
+      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
+                  " Some configuration data might be removed if they don't fit"
+                  " in the old format. Please make sure you have read the"
+                  " upgrade notes (available in the UPGRADE file and included"
+                  " in other documentation formats) to understand what they"
+                  " are. Continue with *DOWNGRADING* the configuration?" %
+                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
+    else:
+      usertext = ("Please make sure you have read the upgrade notes for"
+                  " Ganeti %s (available in the UPGRADE file and included"
+                  " in other documentation formats). Continue with upgrading"
+                  " configuration?" % constants.RELEASE_VERSION)
+    if not cli.AskUser(usertext):
+      sys.exit(constants.EXIT_FAILURE)
 
-  config = ReadConfig(cfg_file)
+  # Check whether it's a Ganeti configuration directory
+  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
+          os.path.isfile(options.SERVER_PEM_PATH) and
+          os.path.isfile(options.KNOWN_HOSTS_PATH)):
+    raise Error(("%s does not seem to be a Ganeti configuration"
+                 " directory") % options.data_dir)
 
-  if options.verbose:
-    import pprint
-    print "Before upgrade:"
-    pprint.pprint(config)
-    print
+  if not os.path.isdir(options.conf_dir):
+    raise Error("Not a directory: %s" % options.conf_dir)
+
+  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
+
+  try:
+    config_version = config_data["version"]
+  except KeyError:
+    raise Error("Unable to determine configuration version")
+
+  (config_major, config_minor, config_revision) = \
+    constants.SplitVersion(config_version)
+
+  logging.info("Found configuration version %s (%d.%d.%d)",
+               config_version, config_major, config_minor, config_revision)
+
+  if "config_version" in config_data["cluster"]:
+    raise Error("Inconsistent configuration: found config_version in"
+                " configuration file")
+
+  # Downgrade to the previous stable version
+  if options.downgrade:
+    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
+            (config_major == DOWNGRADE_MAJOR and
+             config_minor == DOWNGRADE_MINOR)):
+      raise Error("Downgrade supported only from the latest version (%s.%s),"
+                  " found %s (%s.%s.%s) instead" %
+                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
+                   config_minor, config_revision))
+    DowngradeAll(config_data)
+
+  # Upgrade from 2.{0..7} to 2.8
+  elif config_major == 2 and config_minor in range(0, 9):
+    if config_revision != 0:
+      logging.warning("Config revision is %s, not 0", config_revision)
+    UpgradeAll(config_data)
+
+  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
+    logging.info("No changes necessary")
 
-  UpdateFromVersion2To3(config)
+  else:
+    raise Error("Configuration version %d.%d.%d not supported by this tool" %
+                (config_major, config_minor, config_revision))
 
-  if options.verbose:
-    print "After upgrade:"
-    pprint.pprint(config)
-    print
+  try:
+    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
+    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
+                    data=serializer.DumpJson(config_data),
+                    mode=0600,
+                    dry_run=options.dry_run,
+                    backup=True)
+
+    if not options.dry_run:
+      bootstrap.GenerateClusterCrypto(
+        False, False, False, False, False,
+        nodecert_file=options.SERVER_PEM_PATH,
+        rapicert_file=options.RAPI_CERT_FILE,
+        spicecert_file=options.SPICE_CERT_FILE,
+        spicecacert_file=options.SPICE_CACERT_FILE,
+        hmackey_file=options.CONFD_HMAC_KEY,
+        cds_file=options.CDS_FILE)
+
+  except Exception:
+    logging.critical("Writing configuration failed. It is probably in an"
+                     " inconsistent state and needs manual intervention.")
+    raise
+
+  # test loading the config file
+  all_ok = True
+  if not (options.dry_run or options.no_verify):
+    logging.info("Testing the new config file...")
+    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
+                              accept_foreign=options.ignore_hostname,
+                              offline=True)
+    # if we reached this, it's all fine
+    vrfy = cfg.VerifyConfig()
+    if vrfy:
+      logging.error("Errors after conversion:")
+      for item in vrfy:
+        logging.error(" - %s", item)
+      all_ok = False
+    else:
+      logging.info("File loaded successfully after upgrading")
+    del cfg
 
-  WriteConfig(cfg_file, config)
+  if options.downgrade:
+    action = "downgraded"
+    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
+  else:
+    action = "upgraded"
+    out_ver = constants.RELEASE_VERSION
+  if all_ok:
+    cli.ToStderr("Configuration successfully %s to version %s.",
+                 action, out_ver)
+  else:
+    cli.ToStderr("Configuration %s to version %s, but there are errors."
+                 "\nPlease review the file.", action, out_ver)
 
-  print "The configuration file has been updated successfully. Please run"
-  print "  gnt-cluster copyfile %s" % cfg_file
-  print "now."
 
-# vim: set foldmethod=marker :
+if __name__ == "__main__":
+  main()