Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ 8a5d326f

History | View | Annotate | Download (21.6 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 12
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 11
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150
  ial_params = cluster.get("default_iallocator_params", None)
151
  if not ial_params:
152
    cluster["default_iallocator_params"] = {}
153
  if not "candidate_certs" in cluster:
154
    cluster["candidate_certs"] = {}
155
  cluster["instance_communication_network"] = \
156
    cluster.get("instance_communication_network", "")
157

    
158

    
159
def UpgradeGroups(config_data):
160
  cl_ipolicy = config_data["cluster"].get("ipolicy")
161
  for group in config_data["nodegroups"].values():
162
    networks = group.get("networks", None)
163
    if not networks:
164
      group["networks"] = {}
165
    ipolicy = group.get("ipolicy", None)
166
    if ipolicy:
167
      if cl_ipolicy is None:
168
        raise Error("A group defines an instance policy but there is no"
169
                    " instance policy at cluster level")
170
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
171

    
172

    
173
def GetExclusiveStorageValue(config_data):
174
  """Return a conservative value of the exclusive_storage flag.
175

    
176
  Return C{True} if the cluster or at least a nodegroup have the flag set.
177

    
178
  """
179
  ret = False
180
  cluster = config_data["cluster"]
181
  ndparams = cluster.get("ndparams")
182
  if ndparams is not None and ndparams.get("exclusive_storage"):
183
    ret = True
184
  for group in config_data["nodegroups"].values():
185
    ndparams = group.get("ndparams")
186
    if ndparams is not None and ndparams.get("exclusive_storage"):
187
      ret = True
188
  return ret
189

    
190

    
191
def RemovePhysicalId(disk):
192
  if "children" in disk:
193
    for d in disk["children"]:
194
      RemovePhysicalId(d)
195
  if "physical_id" in disk:
196
    del disk["physical_id"]
197

    
198

    
199
def ChangeDiskDevType(disk, dev_type_map):
200
  """Replaces disk's dev_type attributes according to the given map.
201

    
202
  This can be used for both, up or downgrading the disks.
203
  """
204
  if disk["dev_type"] in dev_type_map:
205
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
206
  if "children" in disk:
207
    for child in disk["children"]:
208
      ChangeDiskDevType(child, dev_type_map)
209

    
210

    
211
def UpgradeDiskDevType(disk):
212
  """Upgrades the disks' device type."""
213
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
214

    
215

    
216
def UpgradeInstances(config_data):
217
  """Upgrades the instances' configuration."""
218

    
219
  network2uuid = dict((n["name"], n["uuid"])
220
                      for n in config_data["networks"].values())
221
  if "instances" not in config_data:
222
    raise Error("Can't find the 'instances' key in the configuration!")
223

    
224
  missing_spindles = False
225
  for instance, iobj in config_data["instances"].items():
226
    for nic in iobj["nics"]:
227
      name = nic.get("network", None)
228
      if name:
229
        uuid = network2uuid.get(name, None)
230
        if uuid:
231
          print("NIC with network name %s found."
232
                " Substituting with uuid %s." % (name, uuid))
233
          nic["network"] = uuid
234

    
235
    if "disks" not in iobj:
236
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
237
    disks = iobj["disks"]
238
    for idx, dobj in enumerate(disks):
239
      RemovePhysicalId(dobj)
240

    
241
      expected = "disk/%s" % idx
242
      current = dobj.get("iv_name", "")
243
      if current != expected:
244
        logging.warning("Updating iv_name for instance %s/disk %s"
245
                        " from '%s' to '%s'",
246
                        instance, idx, current, expected)
247
        dobj["iv_name"] = expected
248

    
249
      if "dev_type" in dobj:
250
        UpgradeDiskDevType(dobj)
251

    
252
      if not "spindles" in dobj:
253
        missing_spindles = True
254

    
255
  if GetExclusiveStorageValue(config_data) and missing_spindles:
256
    # We cannot be sure that the instances that are missing spindles have
257
    # exclusive storage enabled (the check would be more complicated), so we
258
    # give a noncommittal message
259
    logging.warning("Some instance disks could be needing to update the"
260
                    " spindles parameter; you can check by running"
261
                    " 'gnt-cluster verify', and fix any problem with"
262
                    " 'gnt-cluster repair-disk-sizes'")
263

    
264

    
265
def UpgradeRapiUsers():
266
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
267
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
268
    if os.path.exists(options.RAPI_USERS_FILE):
269
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
270
                  " already exists at %s" %
271
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
272
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
273
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
274
    if not options.dry_run:
275
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
276
                       mkdir=True, mkdir_mode=0750)
277

    
278
  # Create a symlink for RAPI users file
279
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
280
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
281
      os.path.isfile(options.RAPI_USERS_FILE)):
282
    logging.info("Creating symlink from %s to %s",
283
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
284
    if not options.dry_run:
285
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
286

    
287

    
288
def UpgradeWatcher():
289
  # Remove old watcher state file if it exists
290
  if os.path.exists(options.WATCHER_STATEFILE):
291
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
292
    if not options.dry_run:
293
      utils.RemoveFile(options.WATCHER_STATEFILE)
294

    
295

    
296
def UpgradeFileStoragePaths(config_data):
297
  # Write file storage paths
298
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
299
    cluster = config_data["cluster"]
300
    file_storage_dir = cluster.get("file_storage_dir")
301
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
302
    del cluster
303

    
304
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
305
                 " for file storage; writing existing configuration values"
306
                 " into '%s'",
307
                 options.FILE_STORAGE_PATHS_FILE)
308

    
309
    if file_storage_dir:
310
      logging.info("File storage directory: %s", file_storage_dir)
311
    if shared_file_storage_dir:
312
      logging.info("Shared file storage directory: %s",
313
                   shared_file_storage_dir)
314

    
315
    buf = StringIO()
316
    buf.write("# List automatically generated from configuration by\n")
317
    buf.write("# cfgupgrade at %s\n" % time.asctime())
318
    if file_storage_dir:
319
      buf.write("%s\n" % file_storage_dir)
320
    if shared_file_storage_dir:
321
      buf.write("%s\n" % shared_file_storage_dir)
322
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
323
                    data=buf.getvalue(),
324
                    mode=0600,
325
                    dry_run=options.dry_run,
326
                    backup=True)
327

    
328

    
329
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
330
  if old_key not in nodes_by_old_key:
331
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
332
                    " already up-to-date", old_key)
333
    return old_key
334
  return nodes_by_old_key[old_key][new_key_field]
335

    
336

    
337
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
338
  def ChangeDiskNodeIndices(disk):
339
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
340
    # considered when up/downgrading from/to any versions touching 2.9 on the
341
    # way.
342
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
343
    if disk["dev_type"] in drbd_disk_types:
344
      for i in range(0, 2):
345
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
346
                                                disk["logical_id"][i],
347
                                                new_key_field)
348
    if "children" in disk:
349
      for child in disk["children"]:
350
        ChangeDiskNodeIndices(child)
351

    
352
  nodes_by_old_key = {}
353
  nodes_by_new_key = {}
354
  for (_, node) in config_data["nodes"].items():
355
    nodes_by_old_key[node[old_key_field]] = node
356
    nodes_by_new_key[node[new_key_field]] = node
357

    
358
  config_data["nodes"] = nodes_by_new_key
359

    
360
  cluster = config_data["cluster"]
361
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
362
                                           cluster["master_node"],
363
                                           new_key_field)
364

    
365
  for inst in config_data["instances"].values():
366
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
367
                                           inst["primary_node"],
368
                                           new_key_field)
369
    for disk in inst["disks"]:
370
      ChangeDiskNodeIndices(disk)
371

    
372

    
373
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
374
  insts_by_old_key = {}
375
  insts_by_new_key = {}
376
  for (_, inst) in config_data["instances"].items():
377
    insts_by_old_key[inst[old_key_field]] = inst
378
    insts_by_new_key[inst[new_key_field]] = inst
379

    
380
  config_data["instances"] = insts_by_new_key
381

    
382

    
383
def UpgradeNodeIndices(config_data):
384
  ChangeNodeIndices(config_data, "name", "uuid")
385

    
386

    
387
def UpgradeInstanceIndices(config_data):
388
  ChangeInstanceIndices(config_data, "name", "uuid")
389

    
390

    
391
def UpgradeAll(config_data):
392
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
393
  UpgradeRapiUsers()
394
  UpgradeWatcher()
395
  UpgradeFileStoragePaths(config_data)
396
  UpgradeNetworks(config_data)
397
  UpgradeCluster(config_data)
398
  UpgradeGroups(config_data)
399
  UpgradeInstances(config_data)
400
  UpgradeNodeIndices(config_data)
401
  UpgradeInstanceIndices(config_data)
402

    
403

    
404
# DOWNGRADE ------------------------------------------------------------
405

    
406

    
407
def DowngradeCluster(config_data):
408
  cluster = config_data.get("cluster", None)
409
  if not cluster:
410
    raise Error("Cannot find the 'cluster' key in the configuration")
411

    
412
  if "osparams_private_cluster" in cluster:
413
    del cluster["osparams_private_cluster"]
414

    
415
  if "instance_communication_network" in cluster:
416
    del cluster["instance_communication_network"]
417

    
418

    
419
def DowngradeInstances(config_data):
420
  instances = config_data.get("instances", None)
421
  if instances is None:
422
    raise Error("Cannot find the 'instances' key in the configuration")
423

    
424
  for (_, iobj) in instances.items():
425
    if "osparams_private" in iobj:
426
      del iobj["osparams_private"]
427

    
428

    
429
def DowngradeAll(config_data):
430
  # Any code specific to a particular version should be labeled that way, so
431
  # it can be removed when updating to the next version.
432
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
433
                                                DOWNGRADE_MINOR, 0)
434
  DowngradeCluster(config_data)
435
  DowngradeInstances(config_data)
436

    
437

    
438
def main():
439
  """Main program.
440

    
441
  """
442
  global options, args # pylint: disable=W0603
443

    
444
  # Option parsing
445
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
446
  parser.add_option("--dry-run", dest="dry_run",
447
                    action="store_true",
448
                    help="Try to do the conversion, but don't write"
449
                         " output file")
450
  parser.add_option(cli.FORCE_OPT)
451
  parser.add_option(cli.DEBUG_OPT)
452
  parser.add_option(cli.VERBOSE_OPT)
453
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
454
                    action="store_true", default=False,
455
                    help="Don't abort if hostname doesn't match")
456
  parser.add_option("--path", help="Convert configuration in this"
457
                    " directory instead of '%s'" % pathutils.DATA_DIR,
458
                    default=pathutils.DATA_DIR, dest="data_dir")
459
  parser.add_option("--confdir",
460
                    help=("Use this directory instead of '%s'" %
461
                          pathutils.CONF_DIR),
462
                    default=pathutils.CONF_DIR, dest="conf_dir")
463
  parser.add_option("--no-verify",
464
                    help="Do not verify configuration after upgrade",
465
                    action="store_true", dest="no_verify", default=False)
466
  parser.add_option("--downgrade",
467
                    help="Downgrade to the previous stable version",
468
                    action="store_true", dest="downgrade", default=False)
469
  (options, args) = parser.parse_args()
470

    
471
  # We need to keep filenames locally because they might be renamed between
472
  # versions.
473
  options.data_dir = os.path.abspath(options.data_dir)
474
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
475
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
476
  options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
477
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
478
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
479
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
480
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
481
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
482
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
483
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
484
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
485
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
486
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
487
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
488

    
489
  SetupLogging()
490

    
491
  # Option checking
492
  if args:
493
    raise Error("No arguments expected")
494
  if options.downgrade and not options.no_verify:
495
    options.no_verify = True
496

    
497
  # Check master name
498
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
499
    logging.error("Aborting due to hostname mismatch")
500
    sys.exit(constants.EXIT_FAILURE)
501

    
502
  if not options.force:
503
    if options.downgrade:
504
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
505
                  " Some configuration data might be removed if they don't fit"
506
                  " in the old format. Please make sure you have read the"
507
                  " upgrade notes (available in the UPGRADE file and included"
508
                  " in other documentation formats) to understand what they"
509
                  " are. Continue with *DOWNGRADING* the configuration?" %
510
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
511
    else:
512
      usertext = ("Please make sure you have read the upgrade notes for"
513
                  " Ganeti %s (available in the UPGRADE file and included"
514
                  " in other documentation formats). Continue with upgrading"
515
                  " configuration?" % constants.RELEASE_VERSION)
516
    if not cli.AskUser(usertext):
517
      sys.exit(constants.EXIT_FAILURE)
518

    
519
  # Check whether it's a Ganeti configuration directory
520
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
521
          os.path.isfile(options.SERVER_PEM_PATH) and
522
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
523
    raise Error(("%s does not seem to be a Ganeti configuration"
524
                 " directory") % options.data_dir)
525

    
526
  if not os.path.isdir(options.conf_dir):
527
    raise Error("Not a directory: %s" % options.conf_dir)
528

    
529
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
530

    
531
  try:
532
    config_version = config_data["version"]
533
  except KeyError:
534
    raise Error("Unable to determine configuration version")
535

    
536
  (config_major, config_minor, config_revision) = \
537
    version.SplitVersion(config_version)
538

    
539
  logging.info("Found configuration version %s (%d.%d.%d)",
540
               config_version, config_major, config_minor, config_revision)
541

    
542
  if "config_version" in config_data["cluster"]:
543
    raise Error("Inconsistent configuration: found config_version in"
544
                " configuration file")
545

    
546
  # Downgrade to the previous stable version
547
  if options.downgrade:
548
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
549
            (config_major == DOWNGRADE_MAJOR and
550
             config_minor == DOWNGRADE_MINOR)):
551
      raise Error("Downgrade supported only from the latest version (%s.%s),"
552
                  " found %s (%s.%s.%s) instead" %
553
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
554
                   config_minor, config_revision))
555
    DowngradeAll(config_data)
556

    
557
  # Upgrade from 2.{0..10} to 2.12
558
  elif config_major == 2 and config_minor in range(0, 12):
559
    if config_revision != 0:
560
      logging.warning("Config revision is %s, not 0", config_revision)
561
    UpgradeAll(config_data)
562

    
563
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
564
    logging.info("No changes necessary")
565

    
566
  else:
567
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
568
                (config_major, config_minor, config_revision))
569

    
570
  try:
571
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
572
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
573
                    data=serializer.DumpJson(config_data),
574
                    mode=0600,
575
                    dry_run=options.dry_run,
576
                    backup=True)
577

    
578
    if not options.dry_run:
579
      bootstrap.GenerateClusterCrypto(
580
        False, False, False, False, False,
581
        nodecert_file=options.SERVER_PEM_PATH,
582
        rapicert_file=options.RAPI_CERT_FILE,
583
        spicecert_file=options.SPICE_CERT_FILE,
584
        spicecacert_file=options.SPICE_CACERT_FILE,
585
        hmackey_file=options.CONFD_HMAC_KEY,
586
        cds_file=options.CDS_FILE)
587

    
588
  except Exception:
589
    logging.critical("Writing configuration failed. It is probably in an"
590
                     " inconsistent state and needs manual intervention.")
591
    raise
592

    
593
  # test loading the config file
594
  all_ok = True
595
  if not (options.dry_run or options.no_verify):
596
    logging.info("Testing the new config file...")
597
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
598
                              accept_foreign=options.ignore_hostname,
599
                              offline=True)
600
    # if we reached this, it's all fine
601
    vrfy = cfg.VerifyConfig()
602
    if vrfy:
603
      logging.error("Errors after conversion:")
604
      for item in vrfy:
605
        logging.error(" - %s", item)
606
      all_ok = False
607
    else:
608
      logging.info("File loaded successfully after upgrading")
609
    del cfg
610

    
611
  if options.downgrade:
612
    action = "downgraded"
613
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
614
  else:
615
    action = "upgraded"
616
    out_ver = constants.RELEASE_VERSION
617
  if all_ok:
618
    cli.ToStderr("Configuration successfully %s to version %s.",
619
                 action, out_ver)
620
  else:
621
    cli.ToStderr("Configuration %s to version %s, but there are errors."
622
                 "\nPlease review the file.", action, out_ver)
623

    
624

    
625
if __name__ == "__main__":
626
  main()