Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ bbc6620d

History | View | Annotate | Download (21.2 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 11
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 10
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150

    
151

    
152
def UpgradeGroups(config_data):
153
  cl_ipolicy = config_data["cluster"].get("ipolicy")
154
  for group in config_data["nodegroups"].values():
155
    networks = group.get("networks", None)
156
    if not networks:
157
      group["networks"] = {}
158
    ipolicy = group.get("ipolicy", None)
159
    if ipolicy:
160
      if cl_ipolicy is None:
161
        raise Error("A group defines an instance policy but there is no"
162
                    " instance policy at cluster level")
163
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
164

    
165

    
166
def GetExclusiveStorageValue(config_data):
167
  """Return a conservative value of the exclusive_storage flag.
168

    
169
  Return C{True} if the cluster or at least a nodegroup have the flag set.
170

    
171
  """
172
  ret = False
173
  cluster = config_data["cluster"]
174
  ndparams = cluster.get("ndparams")
175
  if ndparams is not None and ndparams.get("exclusive_storage"):
176
    ret = True
177
  for group in config_data["nodegroups"].values():
178
    ndparams = group.get("ndparams")
179
    if ndparams is not None and ndparams.get("exclusive_storage"):
180
      ret = True
181
  return ret
182

    
183

    
184
def RemovePhysicalId(disk):
185
  if "children" in disk:
186
    for d in disk["children"]:
187
      RemovePhysicalId(d)
188
  if "physical_id" in disk:
189
    del disk["physical_id"]
190

    
191

    
192
def ChangeDiskDevType(disk, dev_type_map):
193
  """Replaces disk's dev_type attributes according to the given map.
194

    
195
  This can be used for both, up or downgrading the disks.
196
  """
197
  if disk["dev_type"] in dev_type_map:
198
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
199
  if "children" in disk:
200
    for child in disk["children"]:
201
      ChangeDiskDevType(child, dev_type_map)
202

    
203

    
204
def UpgradeDiskDevType(disk):
205
  """Upgrades the disks' device type."""
206
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
207

    
208

    
209
def UpgradeInstances(config_data):
210
  """Upgrades the instances' configuration."""
211

    
212
  network2uuid = dict((n["name"], n["uuid"])
213
                      for n in config_data["networks"].values())
214
  if "instances" not in config_data:
215
    raise Error("Can't find the 'instances' key in the configuration!")
216

    
217
  missing_spindles = False
218
  for instance, iobj in config_data["instances"].items():
219
    for nic in iobj["nics"]:
220
      name = nic.get("network", None)
221
      if name:
222
        uuid = network2uuid.get(name, None)
223
        if uuid:
224
          print("NIC with network name %s found."
225
                " Substituting with uuid %s." % (name, uuid))
226
          nic["network"] = uuid
227

    
228
    if "disks" not in iobj:
229
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
230
    disks = iobj["disks"]
231
    for idx, dobj in enumerate(disks):
232
      RemovePhysicalId(dobj)
233

    
234
      expected = "disk/%s" % idx
235
      current = dobj.get("iv_name", "")
236
      if current != expected:
237
        logging.warning("Updating iv_name for instance %s/disk %s"
238
                        " from '%s' to '%s'",
239
                        instance, idx, current, expected)
240
        dobj["iv_name"] = expected
241

    
242
      if "dev_type" in dobj:
243
        UpgradeDiskDevType(dobj)
244

    
245
      if not "spindles" in dobj:
246
        missing_spindles = True
247

    
248
  if GetExclusiveStorageValue(config_data) and missing_spindles:
249
    # We cannot be sure that the instances that are missing spindles have
250
    # exclusive storage enabled (the check would be more complicated), so we
251
    # give a noncommittal message
252
    logging.warning("Some instance disks could be needing to update the"
253
                    " spindles parameter; you can check by running"
254
                    " 'gnt-cluster verify', and fix any problem with"
255
                    " 'gnt-cluster repair-disk-sizes'")
256

    
257

    
258
def UpgradeRapiUsers():
259
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
260
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
261
    if os.path.exists(options.RAPI_USERS_FILE):
262
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
263
                  " already exists at %s" %
264
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
265
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
266
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
267
    if not options.dry_run:
268
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
269
                       mkdir=True, mkdir_mode=0750)
270

    
271
  # Create a symlink for RAPI users file
272
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
273
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
274
      os.path.isfile(options.RAPI_USERS_FILE)):
275
    logging.info("Creating symlink from %s to %s",
276
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
277
    if not options.dry_run:
278
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
279

    
280

    
281
def UpgradeWatcher():
282
  # Remove old watcher state file if it exists
283
  if os.path.exists(options.WATCHER_STATEFILE):
284
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
285
    if not options.dry_run:
286
      utils.RemoveFile(options.WATCHER_STATEFILE)
287

    
288

    
289
def UpgradeFileStoragePaths(config_data):
290
  # Write file storage paths
291
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
292
    cluster = config_data["cluster"]
293
    file_storage_dir = cluster.get("file_storage_dir")
294
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
295
    del cluster
296

    
297
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
298
                 " for file storage; writing existing configuration values"
299
                 " into '%s'",
300
                 options.FILE_STORAGE_PATHS_FILE)
301

    
302
    if file_storage_dir:
303
      logging.info("File storage directory: %s", file_storage_dir)
304
    if shared_file_storage_dir:
305
      logging.info("Shared file storage directory: %s",
306
                   shared_file_storage_dir)
307

    
308
    buf = StringIO()
309
    buf.write("# List automatically generated from configuration by\n")
310
    buf.write("# cfgupgrade at %s\n" % time.asctime())
311
    if file_storage_dir:
312
      buf.write("%s\n" % file_storage_dir)
313
    if shared_file_storage_dir:
314
      buf.write("%s\n" % shared_file_storage_dir)
315
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
316
                    data=buf.getvalue(),
317
                    mode=0600,
318
                    dry_run=options.dry_run,
319
                    backup=True)
320

    
321

    
322
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
323
  if old_key not in nodes_by_old_key:
324
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
325
                    " already up-to-date", old_key)
326
    return old_key
327
  return nodes_by_old_key[old_key][new_key_field]
328

    
329

    
330
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
331
  def ChangeDiskNodeIndices(disk):
332
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
333
    # considered when up/downgrading from/to any versions touching 2.9 on the
334
    # way.
335
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
336
    if disk["dev_type"] in drbd_disk_types:
337
      for i in range(0, 2):
338
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
339
                                                disk["logical_id"][i],
340
                                                new_key_field)
341
    if "children" in disk:
342
      for child in disk["children"]:
343
        ChangeDiskNodeIndices(child)
344

    
345
  nodes_by_old_key = {}
346
  nodes_by_new_key = {}
347
  for (_, node) in config_data["nodes"].items():
348
    nodes_by_old_key[node[old_key_field]] = node
349
    nodes_by_new_key[node[new_key_field]] = node
350

    
351
  config_data["nodes"] = nodes_by_new_key
352

    
353
  cluster = config_data["cluster"]
354
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
355
                                           cluster["master_node"],
356
                                           new_key_field)
357

    
358
  for inst in config_data["instances"].values():
359
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
360
                                           inst["primary_node"],
361
                                           new_key_field)
362
    for disk in inst["disks"]:
363
      ChangeDiskNodeIndices(disk)
364

    
365

    
366
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
367
  insts_by_old_key = {}
368
  insts_by_new_key = {}
369
  for (_, inst) in config_data["instances"].items():
370
    insts_by_old_key[inst[old_key_field]] = inst
371
    insts_by_new_key[inst[new_key_field]] = inst
372

    
373
  config_data["instances"] = insts_by_new_key
374

    
375

    
376
def UpgradeNodeIndices(config_data):
377
  ChangeNodeIndices(config_data, "name", "uuid")
378

    
379

    
380
def UpgradeInstanceIndices(config_data):
381
  ChangeInstanceIndices(config_data, "name", "uuid")
382

    
383

    
384
def UpgradeAll(config_data):
385
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
386
  UpgradeRapiUsers()
387
  UpgradeWatcher()
388
  UpgradeFileStoragePaths(config_data)
389
  UpgradeNetworks(config_data)
390
  UpgradeCluster(config_data)
391
  UpgradeGroups(config_data)
392
  UpgradeInstances(config_data)
393
  UpgradeNodeIndices(config_data)
394
  UpgradeInstanceIndices(config_data)
395

    
396

    
397
# DOWNGRADE ------------------------------------------------------------
398

    
399

    
400
def DowngradeCluster(config_data):
401
  DowngradeNdparams(config_data["cluster"])
402

    
403

    
404
def DowngradeGroups(config_data):
405
  for group in config_data["nodegroups"].values():
406
    DowngradeNdparams(group)
407

    
408

    
409
def DowngradeNdparams(group_or_cluster):
410
  ssh_port = group_or_cluster["ndparams"].pop("ssh_port", None)
411
  if (ssh_port is not None) and (ssh_port != 22):
412
    raise Error(("The cluster or some node group has configured SSH port %d."
413
                 " Refusing to downgrade as it will most certainly fail."
414
                 ) % (ssh_port, ))
415

    
416

    
417
def DowngradeAll(config_data):
418
  # Any code specific to a particular version should be labeled that way, so
419
  # it can be removed when updating to the next version.
420
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
421
                                                DOWNGRADE_MINOR, 0)
422
  DowngradeCluster(config_data)
423
  DowngradeGroups(config_data)
424

    
425

    
426
def main():
427
  """Main program.
428

    
429
  """
430
  global options, args # pylint: disable=W0603
431

    
432
  # Option parsing
433
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
434
  parser.add_option("--dry-run", dest="dry_run",
435
                    action="store_true",
436
                    help="Try to do the conversion, but don't write"
437
                         " output file")
438
  parser.add_option(cli.FORCE_OPT)
439
  parser.add_option(cli.DEBUG_OPT)
440
  parser.add_option(cli.VERBOSE_OPT)
441
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
442
                    action="store_true", default=False,
443
                    help="Don't abort if hostname doesn't match")
444
  parser.add_option("--path", help="Convert configuration in this"
445
                    " directory instead of '%s'" % pathutils.DATA_DIR,
446
                    default=pathutils.DATA_DIR, dest="data_dir")
447
  parser.add_option("--confdir",
448
                    help=("Use this directory instead of '%s'" %
449
                          pathutils.CONF_DIR),
450
                    default=pathutils.CONF_DIR, dest="conf_dir")
451
  parser.add_option("--no-verify",
452
                    help="Do not verify configuration after upgrade",
453
                    action="store_true", dest="no_verify", default=False)
454
  parser.add_option("--downgrade",
455
                    help="Downgrade to the previous stable version",
456
                    action="store_true", dest="downgrade", default=False)
457
  (options, args) = parser.parse_args()
458

    
459
  # We need to keep filenames locally because they might be renamed between
460
  # versions.
461
  options.data_dir = os.path.abspath(options.data_dir)
462
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
463
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
464
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
465
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
466
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
467
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
468
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
469
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
470
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
471
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
472
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
473
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
474
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
475

    
476
  SetupLogging()
477

    
478
  # Option checking
479
  if args:
480
    raise Error("No arguments expected")
481
  if options.downgrade and not options.no_verify:
482
    options.no_verify = True
483

    
484
  # Check master name
485
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
486
    logging.error("Aborting due to hostname mismatch")
487
    sys.exit(constants.EXIT_FAILURE)
488

    
489
  if not options.force:
490
    if options.downgrade:
491
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
492
                  " Some configuration data might be removed if they don't fit"
493
                  " in the old format. Please make sure you have read the"
494
                  " upgrade notes (available in the UPGRADE file and included"
495
                  " in other documentation formats) to understand what they"
496
                  " are. Continue with *DOWNGRADING* the configuration?" %
497
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
498
    else:
499
      usertext = ("Please make sure you have read the upgrade notes for"
500
                  " Ganeti %s (available in the UPGRADE file and included"
501
                  " in other documentation formats). Continue with upgrading"
502
                  " configuration?" % constants.RELEASE_VERSION)
503
    if not cli.AskUser(usertext):
504
      sys.exit(constants.EXIT_FAILURE)
505

    
506
  # Check whether it's a Ganeti configuration directory
507
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
508
          os.path.isfile(options.SERVER_PEM_PATH) and
509
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
510
    raise Error(("%s does not seem to be a Ganeti configuration"
511
                 " directory") % options.data_dir)
512

    
513
  if not os.path.isdir(options.conf_dir):
514
    raise Error("Not a directory: %s" % options.conf_dir)
515

    
516
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
517

    
518
  try:
519
    config_version = config_data["version"]
520
  except KeyError:
521
    raise Error("Unable to determine configuration version")
522

    
523
  (config_major, config_minor, config_revision) = \
524
    version.SplitVersion(config_version)
525

    
526
  logging.info("Found configuration version %s (%d.%d.%d)",
527
               config_version, config_major, config_minor, config_revision)
528

    
529
  if "config_version" in config_data["cluster"]:
530
    raise Error("Inconsistent configuration: found config_version in"
531
                " configuration file")
532

    
533
  # Downgrade to the previous stable version
534
  if options.downgrade:
535
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
536
            (config_major == DOWNGRADE_MAJOR and
537
             config_minor == DOWNGRADE_MINOR)):
538
      raise Error("Downgrade supported only from the latest version (%s.%s),"
539
                  " found %s (%s.%s.%s) instead" %
540
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
541
                   config_minor, config_revision))
542
    DowngradeAll(config_data)
543

    
544
  # Upgrade from 2.{0..10} to 2.11
545
  elif config_major == 2 and config_minor in range(0, 11):
546
    if config_revision != 0:
547
      logging.warning("Config revision is %s, not 0", config_revision)
548
    UpgradeAll(config_data)
549

    
550
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
551
    logging.info("No changes necessary")
552

    
553
  else:
554
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
555
                (config_major, config_minor, config_revision))
556

    
557
  try:
558
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
559
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
560
                    data=serializer.DumpJson(config_data),
561
                    mode=0600,
562
                    dry_run=options.dry_run,
563
                    backup=True)
564

    
565
    if not options.dry_run:
566
      bootstrap.GenerateClusterCrypto(
567
        False, False, False, False, False,
568
        nodecert_file=options.SERVER_PEM_PATH,
569
        rapicert_file=options.RAPI_CERT_FILE,
570
        spicecert_file=options.SPICE_CERT_FILE,
571
        spicecacert_file=options.SPICE_CACERT_FILE,
572
        hmackey_file=options.CONFD_HMAC_KEY,
573
        cds_file=options.CDS_FILE)
574

    
575
  except Exception:
576
    logging.critical("Writing configuration failed. It is probably in an"
577
                     " inconsistent state and needs manual intervention.")
578
    raise
579

    
580
  # test loading the config file
581
  all_ok = True
582
  if not (options.dry_run or options.no_verify):
583
    logging.info("Testing the new config file...")
584
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
585
                              accept_foreign=options.ignore_hostname,
586
                              offline=True)
587
    # if we reached this, it's all fine
588
    vrfy = cfg.VerifyConfig()
589
    if vrfy:
590
      logging.error("Errors after conversion:")
591
      for item in vrfy:
592
        logging.error(" - %s", item)
593
      all_ok = False
594
    else:
595
      logging.info("File loaded successfully after upgrading")
596
    del cfg
597

    
598
  if options.downgrade:
599
    action = "downgraded"
600
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
601
  else:
602
    action = "upgraded"
603
    out_ver = constants.RELEASE_VERSION
604
  if all_ok:
605
    cli.ToStderr("Configuration successfully %s to version %s.",
606
                 action, out_ver)
607
  else:
608
    cli.ToStderr("Configuration %s to version %s, but there are errors."
609
                 "\nPlease review the file.", action, out_ver)
610

    
611

    
612
if __name__ == "__main__":
613
  main()