Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ d4b81bdd

History | View | Annotate | Download (21.8 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 11
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 10
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150
  ial_params = cluster.get("default_iallocator_params", None)
151
  if not ial_params:
152
    cluster["default_iallocator_params"] = {}
153
  if not "candidate_certs" in cluster:
154
    cluster["candidate_certs"] = {}
155

    
156

    
157
def UpgradeGroups(config_data):
158
  cl_ipolicy = config_data["cluster"].get("ipolicy")
159
  for group in config_data["nodegroups"].values():
160
    networks = group.get("networks", None)
161
    if not networks:
162
      group["networks"] = {}
163
    ipolicy = group.get("ipolicy", None)
164
    if ipolicy:
165
      if cl_ipolicy is None:
166
        raise Error("A group defines an instance policy but there is no"
167
                    " instance policy at cluster level")
168
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
169

    
170

    
171
def GetExclusiveStorageValue(config_data):
172
  """Return a conservative value of the exclusive_storage flag.
173

    
174
  Return C{True} if the cluster or at least a nodegroup have the flag set.
175

    
176
  """
177
  ret = False
178
  cluster = config_data["cluster"]
179
  ndparams = cluster.get("ndparams")
180
  if ndparams is not None and ndparams.get("exclusive_storage"):
181
    ret = True
182
  for group in config_data["nodegroups"].values():
183
    ndparams = group.get("ndparams")
184
    if ndparams is not None and ndparams.get("exclusive_storage"):
185
      ret = True
186
  return ret
187

    
188

    
189
def RemovePhysicalId(disk):
190
  if "children" in disk:
191
    for d in disk["children"]:
192
      RemovePhysicalId(d)
193
  if "physical_id" in disk:
194
    del disk["physical_id"]
195

    
196

    
197
def ChangeDiskDevType(disk, dev_type_map):
198
  """Replaces disk's dev_type attributes according to the given map.
199

    
200
  This can be used for both, up or downgrading the disks.
201
  """
202
  if disk["dev_type"] in dev_type_map:
203
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
204
  if "children" in disk:
205
    for child in disk["children"]:
206
      ChangeDiskDevType(child, dev_type_map)
207

    
208

    
209
def UpgradeDiskDevType(disk):
210
  """Upgrades the disks' device type."""
211
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
212

    
213

    
214
def UpgradeInstances(config_data):
215
  """Upgrades the instances' configuration."""
216

    
217
  network2uuid = dict((n["name"], n["uuid"])
218
                      for n in config_data["networks"].values())
219
  if "instances" not in config_data:
220
    raise Error("Can't find the 'instances' key in the configuration!")
221

    
222
  missing_spindles = False
223
  for instance, iobj in config_data["instances"].items():
224
    for nic in iobj["nics"]:
225
      name = nic.get("network", None)
226
      if name:
227
        uuid = network2uuid.get(name, None)
228
        if uuid:
229
          print("NIC with network name %s found."
230
                " Substituting with uuid %s." % (name, uuid))
231
          nic["network"] = uuid
232

    
233
    if "disks" not in iobj:
234
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
235
    disks = iobj["disks"]
236
    for idx, dobj in enumerate(disks):
237
      RemovePhysicalId(dobj)
238

    
239
      expected = "disk/%s" % idx
240
      current = dobj.get("iv_name", "")
241
      if current != expected:
242
        logging.warning("Updating iv_name for instance %s/disk %s"
243
                        " from '%s' to '%s'",
244
                        instance, idx, current, expected)
245
        dobj["iv_name"] = expected
246

    
247
      if "dev_type" in dobj:
248
        UpgradeDiskDevType(dobj)
249

    
250
      if not "spindles" in dobj:
251
        missing_spindles = True
252

    
253
  if GetExclusiveStorageValue(config_data) and missing_spindles:
254
    # We cannot be sure that the instances that are missing spindles have
255
    # exclusive storage enabled (the check would be more complicated), so we
256
    # give a noncommittal message
257
    logging.warning("Some instance disks could be needing to update the"
258
                    " spindles parameter; you can check by running"
259
                    " 'gnt-cluster verify', and fix any problem with"
260
                    " 'gnt-cluster repair-disk-sizes'")
261

    
262

    
263
def UpgradeRapiUsers():
264
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
265
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
266
    if os.path.exists(options.RAPI_USERS_FILE):
267
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
268
                  " already exists at %s" %
269
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
270
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
271
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
272
    if not options.dry_run:
273
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
274
                       mkdir=True, mkdir_mode=0750)
275

    
276
  # Create a symlink for RAPI users file
277
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
278
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
279
      os.path.isfile(options.RAPI_USERS_FILE)):
280
    logging.info("Creating symlink from %s to %s",
281
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
282
    if not options.dry_run:
283
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
284

    
285

    
286
def UpgradeWatcher():
287
  # Remove old watcher state file if it exists
288
  if os.path.exists(options.WATCHER_STATEFILE):
289
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
290
    if not options.dry_run:
291
      utils.RemoveFile(options.WATCHER_STATEFILE)
292

    
293

    
294
def UpgradeFileStoragePaths(config_data):
295
  # Write file storage paths
296
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
297
    cluster = config_data["cluster"]
298
    file_storage_dir = cluster.get("file_storage_dir")
299
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
300
    del cluster
301

    
302
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
303
                 " for file storage; writing existing configuration values"
304
                 " into '%s'",
305
                 options.FILE_STORAGE_PATHS_FILE)
306

    
307
    if file_storage_dir:
308
      logging.info("File storage directory: %s", file_storage_dir)
309
    if shared_file_storage_dir:
310
      logging.info("Shared file storage directory: %s",
311
                   shared_file_storage_dir)
312

    
313
    buf = StringIO()
314
    buf.write("# List automatically generated from configuration by\n")
315
    buf.write("# cfgupgrade at %s\n" % time.asctime())
316
    if file_storage_dir:
317
      buf.write("%s\n" % file_storage_dir)
318
    if shared_file_storage_dir:
319
      buf.write("%s\n" % shared_file_storage_dir)
320
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
321
                    data=buf.getvalue(),
322
                    mode=0600,
323
                    dry_run=options.dry_run,
324
                    backup=True)
325

    
326

    
327
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
328
  if old_key not in nodes_by_old_key:
329
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
330
                    " already up-to-date", old_key)
331
    return old_key
332
  return nodes_by_old_key[old_key][new_key_field]
333

    
334

    
335
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
336
  def ChangeDiskNodeIndices(disk):
337
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
338
    # considered when up/downgrading from/to any versions touching 2.9 on the
339
    # way.
340
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
341
    if disk["dev_type"] in drbd_disk_types:
342
      for i in range(0, 2):
343
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
344
                                                disk["logical_id"][i],
345
                                                new_key_field)
346
    if "children" in disk:
347
      for child in disk["children"]:
348
        ChangeDiskNodeIndices(child)
349

    
350
  nodes_by_old_key = {}
351
  nodes_by_new_key = {}
352
  for (_, node) in config_data["nodes"].items():
353
    nodes_by_old_key[node[old_key_field]] = node
354
    nodes_by_new_key[node[new_key_field]] = node
355

    
356
  config_data["nodes"] = nodes_by_new_key
357

    
358
  cluster = config_data["cluster"]
359
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
360
                                           cluster["master_node"],
361
                                           new_key_field)
362

    
363
  for inst in config_data["instances"].values():
364
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
365
                                           inst["primary_node"],
366
                                           new_key_field)
367
    for disk in inst["disks"]:
368
      ChangeDiskNodeIndices(disk)
369

    
370

    
371
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
372
  insts_by_old_key = {}
373
  insts_by_new_key = {}
374
  for (_, inst) in config_data["instances"].items():
375
    insts_by_old_key[inst[old_key_field]] = inst
376
    insts_by_new_key[inst[new_key_field]] = inst
377

    
378
  config_data["instances"] = insts_by_new_key
379

    
380

    
381
def UpgradeNodeIndices(config_data):
382
  ChangeNodeIndices(config_data, "name", "uuid")
383

    
384

    
385
def UpgradeInstanceIndices(config_data):
386
  ChangeInstanceIndices(config_data, "name", "uuid")
387

    
388

    
389
def UpgradeAll(config_data):
390
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
391
  UpgradeRapiUsers()
392
  UpgradeWatcher()
393
  UpgradeFileStoragePaths(config_data)
394
  UpgradeNetworks(config_data)
395
  UpgradeCluster(config_data)
396
  UpgradeGroups(config_data)
397
  UpgradeInstances(config_data)
398
  UpgradeNodeIndices(config_data)
399
  UpgradeInstanceIndices(config_data)
400

    
401

    
402
# DOWNGRADE ------------------------------------------------------------
403

    
404

    
405
def DowngradeCluster(config_data):
406
  cluster = config_data.get("cluster", None)
407
  if not cluster:
408
    raise Error("Cannot find the 'cluster' key in the configuration!")
409
  DowngradeNdparams(cluster)
410
  if "default_iallocator_params" in cluster:
411
    del cluster["default_iallocator_params"]
412
  if "candidate_certs" in cluster:
413
    del cluster["candidate_certs"]
414
  for param in ["gluster", "gluster_storage_dir"]:
415
    if param in cluster:
416
      del cluster[param]
417

    
418

    
419
def DowngradeGroups(config_data):
420
  for group in config_data["nodegroups"].values():
421
    DowngradeNdparams(group)
422

    
423

    
424
def DowngradeNdparams(group_or_cluster):
425
  ssh_port = group_or_cluster["ndparams"].pop("ssh_port", None)
426
  if (ssh_port is not None) and (ssh_port != 22):
427
    raise Error(("The cluster or some node group has configured SSH port %d."
428
                 " Refusing to downgrade as it will most certainly fail."
429
                 ) % (ssh_port, ))
430

    
431

    
432
def DowngradeAll(config_data):
433
  # Any code specific to a particular version should be labeled that way, so
434
  # it can be removed when updating to the next version.
435
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
436
                                                DOWNGRADE_MINOR, 0)
437
  DowngradeCluster(config_data)
438
  DowngradeGroups(config_data)
439

    
440

    
441
def main():
442
  """Main program.
443

    
444
  """
445
  global options, args # pylint: disable=W0603
446

    
447
  # Option parsing
448
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
449
  parser.add_option("--dry-run", dest="dry_run",
450
                    action="store_true",
451
                    help="Try to do the conversion, but don't write"
452
                         " output file")
453
  parser.add_option(cli.FORCE_OPT)
454
  parser.add_option(cli.DEBUG_OPT)
455
  parser.add_option(cli.VERBOSE_OPT)
456
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
457
                    action="store_true", default=False,
458
                    help="Don't abort if hostname doesn't match")
459
  parser.add_option("--path", help="Convert configuration in this"
460
                    " directory instead of '%s'" % pathutils.DATA_DIR,
461
                    default=pathutils.DATA_DIR, dest="data_dir")
462
  parser.add_option("--confdir",
463
                    help=("Use this directory instead of '%s'" %
464
                          pathutils.CONF_DIR),
465
                    default=pathutils.CONF_DIR, dest="conf_dir")
466
  parser.add_option("--no-verify",
467
                    help="Do not verify configuration after upgrade",
468
                    action="store_true", dest="no_verify", default=False)
469
  parser.add_option("--downgrade",
470
                    help="Downgrade to the previous stable version",
471
                    action="store_true", dest="downgrade", default=False)
472
  (options, args) = parser.parse_args()
473

    
474
  # We need to keep filenames locally because they might be renamed between
475
  # versions.
476
  options.data_dir = os.path.abspath(options.data_dir)
477
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
478
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
479
  options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
480
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
481
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
482
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
483
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
484
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
485
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
486
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
487
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
488
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
489
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
490
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
491

    
492
  SetupLogging()
493

    
494
  # Option checking
495
  if args:
496
    raise Error("No arguments expected")
497
  if options.downgrade and not options.no_verify:
498
    options.no_verify = True
499

    
500
  # Check master name
501
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
502
    logging.error("Aborting due to hostname mismatch")
503
    sys.exit(constants.EXIT_FAILURE)
504

    
505
  if not options.force:
506
    if options.downgrade:
507
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
508
                  " Some configuration data might be removed if they don't fit"
509
                  " in the old format. Please make sure you have read the"
510
                  " upgrade notes (available in the UPGRADE file and included"
511
                  " in other documentation formats) to understand what they"
512
                  " are. Continue with *DOWNGRADING* the configuration?" %
513
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
514
    else:
515
      usertext = ("Please make sure you have read the upgrade notes for"
516
                  " Ganeti %s (available in the UPGRADE file and included"
517
                  " in other documentation formats). Continue with upgrading"
518
                  " configuration?" % constants.RELEASE_VERSION)
519
    if not cli.AskUser(usertext):
520
      sys.exit(constants.EXIT_FAILURE)
521

    
522
  # Check whether it's a Ganeti configuration directory
523
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
524
          os.path.isfile(options.SERVER_PEM_PATH) and
525
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
526
    raise Error(("%s does not seem to be a Ganeti configuration"
527
                 " directory") % options.data_dir)
528

    
529
  if not os.path.isdir(options.conf_dir):
530
    raise Error("Not a directory: %s" % options.conf_dir)
531

    
532
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
533

    
534
  try:
535
    config_version = config_data["version"]
536
  except KeyError:
537
    raise Error("Unable to determine configuration version")
538

    
539
  (config_major, config_minor, config_revision) = \
540
    version.SplitVersion(config_version)
541

    
542
  logging.info("Found configuration version %s (%d.%d.%d)",
543
               config_version, config_major, config_minor, config_revision)
544

    
545
  if "config_version" in config_data["cluster"]:
546
    raise Error("Inconsistent configuration: found config_version in"
547
                " configuration file")
548

    
549
  # Downgrade to the previous stable version
550
  if options.downgrade:
551
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
552
            (config_major == DOWNGRADE_MAJOR and
553
             config_minor == DOWNGRADE_MINOR)):
554
      raise Error("Downgrade supported only from the latest version (%s.%s),"
555
                  " found %s (%s.%s.%s) instead" %
556
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
557
                   config_minor, config_revision))
558
    DowngradeAll(config_data)
559

    
560
  # Upgrade from 2.{0..10} to 2.11
561
  elif config_major == 2 and config_minor in range(0, 11):
562
    if config_revision != 0:
563
      logging.warning("Config revision is %s, not 0", config_revision)
564
    UpgradeAll(config_data)
565

    
566
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
567
    logging.info("No changes necessary")
568

    
569
  else:
570
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
571
                (config_major, config_minor, config_revision))
572

    
573
  try:
574
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
575
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
576
                    data=serializer.DumpJson(config_data),
577
                    mode=0600,
578
                    dry_run=options.dry_run,
579
                    backup=True)
580

    
581
    if not options.dry_run:
582
      bootstrap.GenerateClusterCrypto(
583
        False, False, False, False, False,
584
        nodecert_file=options.SERVER_PEM_PATH,
585
        rapicert_file=options.RAPI_CERT_FILE,
586
        spicecert_file=options.SPICE_CERT_FILE,
587
        spicecacert_file=options.SPICE_CACERT_FILE,
588
        hmackey_file=options.CONFD_HMAC_KEY,
589
        cds_file=options.CDS_FILE)
590

    
591
  except Exception:
592
    logging.critical("Writing configuration failed. It is probably in an"
593
                     " inconsistent state and needs manual intervention.")
594
    raise
595

    
596
  # test loading the config file
597
  all_ok = True
598
  if not (options.dry_run or options.no_verify):
599
    logging.info("Testing the new config file...")
600
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
601
                              accept_foreign=options.ignore_hostname,
602
                              offline=True)
603
    # if we reached this, it's all fine
604
    vrfy = cfg.VerifyConfig()
605
    if vrfy:
606
      logging.error("Errors after conversion:")
607
      for item in vrfy:
608
        logging.error(" - %s", item)
609
      all_ok = False
610
    else:
611
      logging.info("File loaded successfully after upgrading")
612
    del cfg
613

    
614
  if options.downgrade:
615
    action = "downgraded"
616
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
617
  else:
618
    action = "upgraded"
619
    out_ver = constants.RELEASE_VERSION
620
  if all_ok:
621
    cli.ToStderr("Configuration successfully %s to version %s.",
622
                 action, out_ver)
623
  else:
624
    cli.ToStderr("Configuration %s to version %s, but there are errors."
625
                 "\nPlease review the file.", action, out_ver)
626

    
627

    
628
if __name__ == "__main__":
629
  main()