Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ 60cc531d

History | View | Annotate | Download (21.5 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 11
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 10
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150
  ial_params = cluster.get("default_iallocator_params", None)
151
  if not ial_params:
152
    cluster["default_iallocator_params"] = {}
153

    
154

    
155
def UpgradeGroups(config_data):
156
  cl_ipolicy = config_data["cluster"].get("ipolicy")
157
  for group in config_data["nodegroups"].values():
158
    networks = group.get("networks", None)
159
    if not networks:
160
      group["networks"] = {}
161
    ipolicy = group.get("ipolicy", None)
162
    if ipolicy:
163
      if cl_ipolicy is None:
164
        raise Error("A group defines an instance policy but there is no"
165
                    " instance policy at cluster level")
166
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
167

    
168

    
169
def GetExclusiveStorageValue(config_data):
170
  """Return a conservative value of the exclusive_storage flag.
171

    
172
  Return C{True} if the cluster or at least a nodegroup have the flag set.
173

    
174
  """
175
  ret = False
176
  cluster = config_data["cluster"]
177
  ndparams = cluster.get("ndparams")
178
  if ndparams is not None and ndparams.get("exclusive_storage"):
179
    ret = True
180
  for group in config_data["nodegroups"].values():
181
    ndparams = group.get("ndparams")
182
    if ndparams is not None and ndparams.get("exclusive_storage"):
183
      ret = True
184
  return ret
185

    
186

    
187
def RemovePhysicalId(disk):
188
  if "children" in disk:
189
    for d in disk["children"]:
190
      RemovePhysicalId(d)
191
  if "physical_id" in disk:
192
    del disk["physical_id"]
193

    
194

    
195
def ChangeDiskDevType(disk, dev_type_map):
196
  """Replaces disk's dev_type attributes according to the given map.
197

    
198
  This can be used for both, up or downgrading the disks.
199
  """
200
  if disk["dev_type"] in dev_type_map:
201
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
202
  if "children" in disk:
203
    for child in disk["children"]:
204
      ChangeDiskDevType(child, dev_type_map)
205

    
206

    
207
def UpgradeDiskDevType(disk):
208
  """Upgrades the disks' device type."""
209
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
210

    
211

    
212
def UpgradeInstances(config_data):
213
  """Upgrades the instances' configuration."""
214

    
215
  network2uuid = dict((n["name"], n["uuid"])
216
                      for n in config_data["networks"].values())
217
  if "instances" not in config_data:
218
    raise Error("Can't find the 'instances' key in the configuration!")
219

    
220
  missing_spindles = False
221
  for instance, iobj in config_data["instances"].items():
222
    for nic in iobj["nics"]:
223
      name = nic.get("network", None)
224
      if name:
225
        uuid = network2uuid.get(name, None)
226
        if uuid:
227
          print("NIC with network name %s found."
228
                " Substituting with uuid %s." % (name, uuid))
229
          nic["network"] = uuid
230

    
231
    if "disks" not in iobj:
232
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
233
    disks = iobj["disks"]
234
    for idx, dobj in enumerate(disks):
235
      RemovePhysicalId(dobj)
236

    
237
      expected = "disk/%s" % idx
238
      current = dobj.get("iv_name", "")
239
      if current != expected:
240
        logging.warning("Updating iv_name for instance %s/disk %s"
241
                        " from '%s' to '%s'",
242
                        instance, idx, current, expected)
243
        dobj["iv_name"] = expected
244

    
245
      if "dev_type" in dobj:
246
        UpgradeDiskDevType(dobj)
247

    
248
      if not "spindles" in dobj:
249
        missing_spindles = True
250

    
251
  if GetExclusiveStorageValue(config_data) and missing_spindles:
252
    # We cannot be sure that the instances that are missing spindles have
253
    # exclusive storage enabled (the check would be more complicated), so we
254
    # give a noncommittal message
255
    logging.warning("Some instance disks could be needing to update the"
256
                    " spindles parameter; you can check by running"
257
                    " 'gnt-cluster verify', and fix any problem with"
258
                    " 'gnt-cluster repair-disk-sizes'")
259

    
260

    
261
def UpgradeRapiUsers():
262
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
263
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
264
    if os.path.exists(options.RAPI_USERS_FILE):
265
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
266
                  " already exists at %s" %
267
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
268
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
269
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
270
    if not options.dry_run:
271
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
272
                       mkdir=True, mkdir_mode=0750)
273

    
274
  # Create a symlink for RAPI users file
275
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
276
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
277
      os.path.isfile(options.RAPI_USERS_FILE)):
278
    logging.info("Creating symlink from %s to %s",
279
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
280
    if not options.dry_run:
281
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
282

    
283

    
284
def UpgradeWatcher():
285
  # Remove old watcher state file if it exists
286
  if os.path.exists(options.WATCHER_STATEFILE):
287
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
288
    if not options.dry_run:
289
      utils.RemoveFile(options.WATCHER_STATEFILE)
290

    
291

    
292
def UpgradeFileStoragePaths(config_data):
293
  # Write file storage paths
294
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
295
    cluster = config_data["cluster"]
296
    file_storage_dir = cluster.get("file_storage_dir")
297
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
298
    del cluster
299

    
300
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
301
                 " for file storage; writing existing configuration values"
302
                 " into '%s'",
303
                 options.FILE_STORAGE_PATHS_FILE)
304

    
305
    if file_storage_dir:
306
      logging.info("File storage directory: %s", file_storage_dir)
307
    if shared_file_storage_dir:
308
      logging.info("Shared file storage directory: %s",
309
                   shared_file_storage_dir)
310

    
311
    buf = StringIO()
312
    buf.write("# List automatically generated from configuration by\n")
313
    buf.write("# cfgupgrade at %s\n" % time.asctime())
314
    if file_storage_dir:
315
      buf.write("%s\n" % file_storage_dir)
316
    if shared_file_storage_dir:
317
      buf.write("%s\n" % shared_file_storage_dir)
318
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
319
                    data=buf.getvalue(),
320
                    mode=0600,
321
                    dry_run=options.dry_run,
322
                    backup=True)
323

    
324

    
325
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
326
  if old_key not in nodes_by_old_key:
327
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
328
                    " already up-to-date", old_key)
329
    return old_key
330
  return nodes_by_old_key[old_key][new_key_field]
331

    
332

    
333
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
334
  def ChangeDiskNodeIndices(disk):
335
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
336
    # considered when up/downgrading from/to any versions touching 2.9 on the
337
    # way.
338
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
339
    if disk["dev_type"] in drbd_disk_types:
340
      for i in range(0, 2):
341
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
342
                                                disk["logical_id"][i],
343
                                                new_key_field)
344
    if "children" in disk:
345
      for child in disk["children"]:
346
        ChangeDiskNodeIndices(child)
347

    
348
  nodes_by_old_key = {}
349
  nodes_by_new_key = {}
350
  for (_, node) in config_data["nodes"].items():
351
    nodes_by_old_key[node[old_key_field]] = node
352
    nodes_by_new_key[node[new_key_field]] = node
353

    
354
  config_data["nodes"] = nodes_by_new_key
355

    
356
  cluster = config_data["cluster"]
357
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
358
                                           cluster["master_node"],
359
                                           new_key_field)
360

    
361
  for inst in config_data["instances"].values():
362
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
363
                                           inst["primary_node"],
364
                                           new_key_field)
365
    for disk in inst["disks"]:
366
      ChangeDiskNodeIndices(disk)
367

    
368

    
369
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
370
  insts_by_old_key = {}
371
  insts_by_new_key = {}
372
  for (_, inst) in config_data["instances"].items():
373
    insts_by_old_key[inst[old_key_field]] = inst
374
    insts_by_new_key[inst[new_key_field]] = inst
375

    
376
  config_data["instances"] = insts_by_new_key
377

    
378

    
379
def UpgradeNodeIndices(config_data):
380
  ChangeNodeIndices(config_data, "name", "uuid")
381

    
382

    
383
def UpgradeInstanceIndices(config_data):
384
  ChangeInstanceIndices(config_data, "name", "uuid")
385

    
386

    
387
def UpgradeAll(config_data):
388
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
389
  UpgradeRapiUsers()
390
  UpgradeWatcher()
391
  UpgradeFileStoragePaths(config_data)
392
  UpgradeNetworks(config_data)
393
  UpgradeCluster(config_data)
394
  UpgradeGroups(config_data)
395
  UpgradeInstances(config_data)
396
  UpgradeNodeIndices(config_data)
397
  UpgradeInstanceIndices(config_data)
398

    
399

    
400
# DOWNGRADE ------------------------------------------------------------
401

    
402

    
403
def DowngradeCluster(config_data):
404
  cluster = config_data.get("cluster", None)
405
  if not cluster:
406
    raise Error("Cannot find the 'cluster' key in the configuration!")
407
  DowngradeNdparams(cluster)
408
  if "default_iallocator_params" in cluster:
409
    del cluster["default_iallocator_params"]
410

    
411

    
412
def DowngradeGroups(config_data):
413
  for group in config_data["nodegroups"].values():
414
    DowngradeNdparams(group)
415

    
416

    
417
def DowngradeNdparams(group_or_cluster):
418
  ssh_port = group_or_cluster["ndparams"].pop("ssh_port", None)
419
  if (ssh_port is not None) and (ssh_port != 22):
420
    raise Error(("The cluster or some node group has configured SSH port %d."
421
                 " Refusing to downgrade as it will most certainly fail."
422
                 ) % (ssh_port, ))
423

    
424

    
425
def DowngradeAll(config_data):
426
  # Any code specific to a particular version should be labeled that way, so
427
  # it can be removed when updating to the next version.
428
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
429
                                                DOWNGRADE_MINOR, 0)
430
  DowngradeCluster(config_data)
431
  DowngradeGroups(config_data)
432

    
433

    
434
def main():
435
  """Main program.
436

    
437
  """
438
  global options, args # pylint: disable=W0603
439

    
440
  # Option parsing
441
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
442
  parser.add_option("--dry-run", dest="dry_run",
443
                    action="store_true",
444
                    help="Try to do the conversion, but don't write"
445
                         " output file")
446
  parser.add_option(cli.FORCE_OPT)
447
  parser.add_option(cli.DEBUG_OPT)
448
  parser.add_option(cli.VERBOSE_OPT)
449
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
450
                    action="store_true", default=False,
451
                    help="Don't abort if hostname doesn't match")
452
  parser.add_option("--path", help="Convert configuration in this"
453
                    " directory instead of '%s'" % pathutils.DATA_DIR,
454
                    default=pathutils.DATA_DIR, dest="data_dir")
455
  parser.add_option("--confdir",
456
                    help=("Use this directory instead of '%s'" %
457
                          pathutils.CONF_DIR),
458
                    default=pathutils.CONF_DIR, dest="conf_dir")
459
  parser.add_option("--no-verify",
460
                    help="Do not verify configuration after upgrade",
461
                    action="store_true", dest="no_verify", default=False)
462
  parser.add_option("--downgrade",
463
                    help="Downgrade to the previous stable version",
464
                    action="store_true", dest="downgrade", default=False)
465
  (options, args) = parser.parse_args()
466

    
467
  # We need to keep filenames locally because they might be renamed between
468
  # versions.
469
  options.data_dir = os.path.abspath(options.data_dir)
470
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
471
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
472
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
473
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
474
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
475
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
476
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
477
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
478
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
479
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
480
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
481
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
482
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
483

    
484
  SetupLogging()
485

    
486
  # Option checking
487
  if args:
488
    raise Error("No arguments expected")
489
  if options.downgrade and not options.no_verify:
490
    options.no_verify = True
491

    
492
  # Check master name
493
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
494
    logging.error("Aborting due to hostname mismatch")
495
    sys.exit(constants.EXIT_FAILURE)
496

    
497
  if not options.force:
498
    if options.downgrade:
499
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
500
                  " Some configuration data might be removed if they don't fit"
501
                  " in the old format. Please make sure you have read the"
502
                  " upgrade notes (available in the UPGRADE file and included"
503
                  " in other documentation formats) to understand what they"
504
                  " are. Continue with *DOWNGRADING* the configuration?" %
505
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
506
    else:
507
      usertext = ("Please make sure you have read the upgrade notes for"
508
                  " Ganeti %s (available in the UPGRADE file and included"
509
                  " in other documentation formats). Continue with upgrading"
510
                  " configuration?" % constants.RELEASE_VERSION)
511
    if not cli.AskUser(usertext):
512
      sys.exit(constants.EXIT_FAILURE)
513

    
514
  # Check whether it's a Ganeti configuration directory
515
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
516
          os.path.isfile(options.SERVER_PEM_PATH) and
517
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
518
    raise Error(("%s does not seem to be a Ganeti configuration"
519
                 " directory") % options.data_dir)
520

    
521
  if not os.path.isdir(options.conf_dir):
522
    raise Error("Not a directory: %s" % options.conf_dir)
523

    
524
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
525

    
526
  try:
527
    config_version = config_data["version"]
528
  except KeyError:
529
    raise Error("Unable to determine configuration version")
530

    
531
  (config_major, config_minor, config_revision) = \
532
    version.SplitVersion(config_version)
533

    
534
  logging.info("Found configuration version %s (%d.%d.%d)",
535
               config_version, config_major, config_minor, config_revision)
536

    
537
  if "config_version" in config_data["cluster"]:
538
    raise Error("Inconsistent configuration: found config_version in"
539
                " configuration file")
540

    
541
  # Downgrade to the previous stable version
542
  if options.downgrade:
543
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
544
            (config_major == DOWNGRADE_MAJOR and
545
             config_minor == DOWNGRADE_MINOR)):
546
      raise Error("Downgrade supported only from the latest version (%s.%s),"
547
                  " found %s (%s.%s.%s) instead" %
548
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
549
                   config_minor, config_revision))
550
    DowngradeAll(config_data)
551

    
552
  # Upgrade from 2.{0..10} to 2.11
553
  elif config_major == 2 and config_minor in range(0, 11):
554
    if config_revision != 0:
555
      logging.warning("Config revision is %s, not 0", config_revision)
556
    UpgradeAll(config_data)
557

    
558
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
559
    logging.info("No changes necessary")
560

    
561
  else:
562
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
563
                (config_major, config_minor, config_revision))
564

    
565
  try:
566
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
567
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
568
                    data=serializer.DumpJson(config_data),
569
                    mode=0600,
570
                    dry_run=options.dry_run,
571
                    backup=True)
572

    
573
    if not options.dry_run:
574
      # FIXME: fix node client certificate
575
      bootstrap.GenerateClusterCrypto(
576
        False, False, False, False, False, False,
577
        nodecert_file=options.SERVER_PEM_PATH,
578
        rapicert_file=options.RAPI_CERT_FILE,
579
        spicecert_file=options.SPICE_CERT_FILE,
580
        spicecacert_file=options.SPICE_CACERT_FILE,
581
        hmackey_file=options.CONFD_HMAC_KEY,
582
        cds_file=options.CDS_FILE)
583

    
584
  except Exception:
585
    logging.critical("Writing configuration failed. It is probably in an"
586
                     " inconsistent state and needs manual intervention.")
587
    raise
588

    
589
  # test loading the config file
590
  all_ok = True
591
  if not (options.dry_run or options.no_verify):
592
    logging.info("Testing the new config file...")
593
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
594
                              accept_foreign=options.ignore_hostname,
595
                              offline=True)
596
    # if we reached this, it's all fine
597
    vrfy = cfg.VerifyConfig()
598
    if vrfy:
599
      logging.error("Errors after conversion:")
600
      for item in vrfy:
601
        logging.error(" - %s", item)
602
      all_ok = False
603
    else:
604
      logging.info("File loaded successfully after upgrading")
605
    del cfg
606

    
607
  if options.downgrade:
608
    action = "downgraded"
609
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
610
  else:
611
    action = "upgraded"
612
    out_ver = constants.RELEASE_VERSION
613
  if all_ok:
614
    cli.ToStderr("Configuration successfully %s to version %s.",
615
                 action, out_ver)
616
  else:
617
    cli.ToStderr("Configuration %s to version %s, but there are errors."
618
                 "\nPlease review the file.", action, out_ver)
619

    
620

    
621
if __name__ == "__main__":
622
  main()