Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ 1d9f9df7

History | View | Annotate | Download (22 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47

    
48
options = None
49
args = None
50

    
51

    
52
#: Target major version we will upgrade to
53
TARGET_MAJOR = 2
54
#: Target minor version we will upgrade to
55
TARGET_MINOR = 9
56
#: Target major version for downgrade
57
DOWNGRADE_MAJOR = 2
58
#: Target minor version for downgrade
59
DOWNGRADE_MINOR = 8
60

    
61
# map of legacy device types
62
# (mapping differing old LD_* constants to new DT_* constants)
63
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
64
# (mapping differing new DT_* constants to old LD_* constants)
65
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
66

    
67

    
68
class Error(Exception):
69
  """Generic exception"""
70
  pass
71

    
72

    
73
def SetupLogging():
74
  """Configures the logging module.
75

    
76
  """
77
  formatter = logging.Formatter("%(asctime)s: %(message)s")
78

    
79
  stderr_handler = logging.StreamHandler()
80
  stderr_handler.setFormatter(formatter)
81
  if options.debug:
82
    stderr_handler.setLevel(logging.NOTSET)
83
  elif options.verbose:
84
    stderr_handler.setLevel(logging.INFO)
85
  else:
86
    stderr_handler.setLevel(logging.WARNING)
87

    
88
  root_logger = logging.getLogger("")
89
  root_logger.setLevel(logging.NOTSET)
90
  root_logger.addHandler(stderr_handler)
91

    
92

    
93
def CheckHostname(path):
94
  """Ensures hostname matches ssconf value.
95

    
96
  @param path: Path to ssconf file
97

    
98
  """
99
  ssconf_master_node = utils.ReadOneLineFile(path)
100
  hostname = netutils.GetHostname().name
101

    
102
  if ssconf_master_node == hostname:
103
    return True
104

    
105
  logging.warning("Warning: ssconf says master node is '%s', but this"
106
                  " machine's name is '%s'; this tool must be run on"
107
                  " the master node", ssconf_master_node, hostname)
108
  return False
109

    
110

    
111
def _FillIPolicySpecs(default_ipolicy, ipolicy):
112
  if "minmax" in ipolicy:
113
    for (key, spec) in ipolicy["minmax"][0].items():
114
      for (par, val) in default_ipolicy["minmax"][0][key].items():
115
        if par not in spec:
116
          spec[par] = val
117

    
118

    
119
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
120
  minmax_keys = ["min", "max"]
121
  if any((k in ipolicy) for k in minmax_keys):
122
    minmax = {}
123
    for key in minmax_keys:
124
      if key in ipolicy:
125
        if ipolicy[key]:
126
          minmax[key] = ipolicy[key]
127
        del ipolicy[key]
128
    if minmax:
129
      ipolicy["minmax"] = [minmax]
130
  if isgroup and "std" in ipolicy:
131
    del ipolicy["std"]
132
  _FillIPolicySpecs(default_ipolicy, ipolicy)
133

    
134

    
135
def UpgradeNetworks(config_data):
136
  networks = config_data.get("networks", None)
137
  if not networks:
138
    config_data["networks"] = {}
139

    
140

    
141
def UpgradeCluster(config_data):
142
  cluster = config_data.get("cluster", None)
143
  if cluster is None:
144
    raise Error("Cannot find cluster")
145
  ipolicy = cluster.setdefault("ipolicy", None)
146
  if ipolicy:
147
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
148

    
149

    
150
def UpgradeGroups(config_data):
151
  cl_ipolicy = config_data["cluster"].get("ipolicy")
152
  for group in config_data["nodegroups"].values():
153
    networks = group.get("networks", None)
154
    if not networks:
155
      group["networks"] = {}
156
    ipolicy = group.get("ipolicy", None)
157
    if ipolicy:
158
      if cl_ipolicy is None:
159
        raise Error("A group defines an instance policy but there is no"
160
                    " instance policy at cluster level")
161
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
162

    
163

    
164
def GetExclusiveStorageValue(config_data):
165
  """Return a conservative value of the exclusive_storage flag.
166

    
167
  Return C{True} if the cluster or at least a nodegroup have the flag set.
168

    
169
  """
170
  ret = False
171
  cluster = config_data["cluster"]
172
  ndparams = cluster.get("ndparams")
173
  if ndparams is not None and ndparams.get("exclusive_storage"):
174
    ret = True
175
  for group in config_data["nodegroups"].values():
176
    ndparams = group.get("ndparams")
177
    if ndparams is not None and ndparams.get("exclusive_storage"):
178
      ret = True
179
  return ret
180

    
181

    
182
def ChangeDiskDevType(disk, dev_type_map):
183
  """Replaces disk's dev_type attributes according to the given map.
184

    
185
  This can be used for both, up or downgrading the disks.
186
  """
187
  if disk["dev_type"] in dev_type_map:
188
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
189
  if "children" in disk:
190
    for child in disk["children"]:
191
      ChangeDiskDevType(child, dev_type_map)
192

    
193

    
194
def UpgradeDiskDevType(disk):
195
  """Upgrades the disks' device type."""
196
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
197

    
198

    
199
def UpgradeInstances(config_data):
200
  """Upgrades the instances' configuration."""
201

    
202
  network2uuid = dict((n["name"], n["uuid"])
203
                      for n in config_data["networks"].values())
204
  if "instances" not in config_data:
205
    raise Error("Can't find the 'instances' key in the configuration!")
206

    
207
  missing_spindles = False
208
  for instance, iobj in config_data["instances"].items():
209
    for nic in iobj["nics"]:
210
      name = nic.get("network", None)
211
      if name:
212
        uuid = network2uuid.get(name, None)
213
        if uuid:
214
          print("NIC with network name %s found."
215
                " Substituting with uuid %s." % (name, uuid))
216
          nic["network"] = uuid
217

    
218
    if "disks" not in iobj:
219
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
220
    disks = iobj["disks"]
221
    for idx, dobj in enumerate(disks):
222
      expected = "disk/%s" % idx
223
      current = dobj.get("iv_name", "")
224
      if current != expected:
225
        logging.warning("Updating iv_name for instance %s/disk %s"
226
                        " from '%s' to '%s'",
227
                        instance, idx, current, expected)
228
        dobj["iv_name"] = expected
229

    
230
      if "dev_type" in dobj:
231
        UpgradeDiskDevType(dobj)
232

    
233
      if not "spindles" in dobj:
234
        missing_spindles = True
235

    
236
  if GetExclusiveStorageValue(config_data) and missing_spindles:
237
    # We cannot be sure that the instances that are missing spindles have
238
    # exclusive storage enabled (the check would be more complicated), so we
239
    # give a noncommittal message
240
    logging.warning("Some instance disks could be needing to update the"
241
                    " spindles parameter; you can check by running"
242
                    " 'gnt-cluster verify', and fix any problem with"
243
                    " 'gnt-cluster repair-disk-sizes'")
244

    
245

    
246
def UpgradeRapiUsers():
247
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
248
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
249
    if os.path.exists(options.RAPI_USERS_FILE):
250
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
251
                  " already exists at %s" %
252
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
253
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
254
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
255
    if not options.dry_run:
256
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
257
                       mkdir=True, mkdir_mode=0750)
258

    
259
  # Create a symlink for RAPI users file
260
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
261
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
262
      os.path.isfile(options.RAPI_USERS_FILE)):
263
    logging.info("Creating symlink from %s to %s",
264
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
265
    if not options.dry_run:
266
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
267

    
268

    
269
def UpgradeWatcher():
270
  # Remove old watcher state file if it exists
271
  if os.path.exists(options.WATCHER_STATEFILE):
272
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
273
    if not options.dry_run:
274
      utils.RemoveFile(options.WATCHER_STATEFILE)
275

    
276

    
277
def UpgradeFileStoragePaths(config_data):
278
  # Write file storage paths
279
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
280
    cluster = config_data["cluster"]
281
    file_storage_dir = cluster.get("file_storage_dir")
282
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
283
    del cluster
284

    
285
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
286
                 " for file storage; writing existing configuration values"
287
                 " into '%s'",
288
                 options.FILE_STORAGE_PATHS_FILE)
289

    
290
    if file_storage_dir:
291
      logging.info("File storage directory: %s", file_storage_dir)
292
    if shared_file_storage_dir:
293
      logging.info("Shared file storage directory: %s",
294
                   shared_file_storage_dir)
295

    
296
    buf = StringIO()
297
    buf.write("# List automatically generated from configuration by\n")
298
    buf.write("# cfgupgrade at %s\n" % time.asctime())
299
    if file_storage_dir:
300
      buf.write("%s\n" % file_storage_dir)
301
    if shared_file_storage_dir:
302
      buf.write("%s\n" % shared_file_storage_dir)
303
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
304
                    data=buf.getvalue(),
305
                    mode=0600,
306
                    dry_run=options.dry_run,
307
                    backup=True)
308

    
309

    
310
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
311
  if old_key not in nodes_by_old_key:
312
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
313
                    " already up-to-date", old_key)
314
    return old_key
315
  return nodes_by_old_key[old_key][new_key_field]
316

    
317

    
318
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
319
  def ChangeDiskNodeIndices(disk):
320
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
321
    # considered when up/downgrading from/to any versions touching 2.9 on the
322
    # way.
323
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
324
    if disk["dev_type"] in drbd_disk_types:
325
      for i in range(0, 2):
326
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
327
                                                disk["logical_id"][i],
328
                                                new_key_field)
329
    if "children" in disk:
330
      for child in disk["children"]:
331
        ChangeDiskNodeIndices(child)
332

    
333
  nodes_by_old_key = {}
334
  nodes_by_new_key = {}
335
  for (_, node) in config_data["nodes"].items():
336
    nodes_by_old_key[node[old_key_field]] = node
337
    nodes_by_new_key[node[new_key_field]] = node
338

    
339
  config_data["nodes"] = nodes_by_new_key
340

    
341
  cluster = config_data["cluster"]
342
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
343
                                           cluster["master_node"],
344
                                           new_key_field)
345

    
346
  for inst in config_data["instances"].values():
347
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
348
                                           inst["primary_node"],
349
                                           new_key_field)
350
    for disk in inst["disks"]:
351
      ChangeDiskNodeIndices(disk)
352

    
353

    
354
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
355
  insts_by_old_key = {}
356
  insts_by_new_key = {}
357
  for (_, inst) in config_data["instances"].items():
358
    insts_by_old_key[inst[old_key_field]] = inst
359
    insts_by_new_key[inst[new_key_field]] = inst
360

    
361
  config_data["instances"] = insts_by_new_key
362

    
363

    
364
def UpgradeNodeIndices(config_data):
365
  ChangeNodeIndices(config_data, "name", "uuid")
366

    
367

    
368
def UpgradeInstanceIndices(config_data):
369
  ChangeInstanceIndices(config_data, "name", "uuid")
370

    
371

    
372
def UpgradeAll(config_data):
373
  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
374
                                                  TARGET_MINOR, 0)
375
  UpgradeRapiUsers()
376
  UpgradeWatcher()
377
  UpgradeFileStoragePaths(config_data)
378
  UpgradeNetworks(config_data)
379
  UpgradeCluster(config_data)
380
  UpgradeGroups(config_data)
381
  UpgradeInstances(config_data)
382
  UpgradeNodeIndices(config_data)
383
  UpgradeInstanceIndices(config_data)
384

    
385

    
386
def DowngradeDiskDevType(disk):
387
  """Downgrades the disks' device type."""
388
  ChangeDiskDevType(disk, DEV_TYPE_NEW_OLD)
389

    
390

    
391
def DowngradeDisks(disks, owner):
392
  for disk in disks:
393
    # Remove spindles to downgrade to 2.8
394
    if "spindles" in disk:
395
      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
396
                      " instance %s",
397
                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
398
      del disk["spindles"]
399
    if "dev_type" in disk:
400
      DowngradeDiskDevType(disk)
401

    
402

    
403
def DowngradeInstances(config_data):
404
  if "instances" not in config_data:
405
    raise Error("Cannot find the 'instances' key in the configuration!")
406
  for (iname, iobj) in config_data["instances"].items():
407
    if "disks" not in iobj:
408
      raise Error("Cannot find 'disks' key for instance %s" % iname)
409
    DowngradeDisks(iobj["disks"], iname)
410

    
411

    
412
def DowngradeNodeIndices(config_data):
413
  ChangeNodeIndices(config_data, "uuid", "name")
414

    
415

    
416
def DowngradeInstanceIndices(config_data):
417
  ChangeInstanceIndices(config_data, "uuid", "name")
418

    
419

    
420
def DowngradeHvparams(config_data):
421
  """Downgrade the cluster's hypervisor parameters."""
422
  cluster = config_data["cluster"]
423
  if "hvparams" in cluster:
424
    hvparams = cluster["hvparams"]
425
    xen_params = None
426
    for xen_variant in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
427
      if xen_variant in hvparams:
428
        xen_params = hvparams[xen_variant]
429
        # 'xen_cmd' was introduced in 2.9
430
        if constants.HV_XEN_CMD in xen_params:
431
          del xen_params[constants.HV_XEN_CMD]
432

    
433

    
434
def DowngradeAll(config_data):
435
  # Any code specific to a particular version should be labeled that way, so
436
  # it can be removed when updating to the next version.
437
  config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
438
                                                  DOWNGRADE_MINOR, 0)
439
  DowngradeInstances(config_data)
440
  DowngradeNodeIndices(config_data)
441
  DowngradeInstanceIndices(config_data)
442
  DowngradeHvparams(config_data)
443

    
444

    
445
def main():
446
  """Main program.
447

    
448
  """
449
  global options, args # pylint: disable=W0603
450

    
451
  # Option parsing
452
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
453
  parser.add_option("--dry-run", dest="dry_run",
454
                    action="store_true",
455
                    help="Try to do the conversion, but don't write"
456
                         " output file")
457
  parser.add_option(cli.FORCE_OPT)
458
  parser.add_option(cli.DEBUG_OPT)
459
  parser.add_option(cli.VERBOSE_OPT)
460
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
461
                    action="store_true", default=False,
462
                    help="Don't abort if hostname doesn't match")
463
  parser.add_option("--path", help="Convert configuration in this"
464
                    " directory instead of '%s'" % pathutils.DATA_DIR,
465
                    default=pathutils.DATA_DIR, dest="data_dir")
466
  parser.add_option("--confdir",
467
                    help=("Use this directory instead of '%s'" %
468
                          pathutils.CONF_DIR),
469
                    default=pathutils.CONF_DIR, dest="conf_dir")
470
  parser.add_option("--no-verify",
471
                    help="Do not verify configuration after upgrade",
472
                    action="store_true", dest="no_verify", default=False)
473
  parser.add_option("--downgrade",
474
                    help="Downgrade to the previous stable version",
475
                    action="store_true", dest="downgrade", default=False)
476
  (options, args) = parser.parse_args()
477

    
478
  # We need to keep filenames locally because they might be renamed between
479
  # versions.
480
  options.data_dir = os.path.abspath(options.data_dir)
481
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
482
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
483
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
484
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
485
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
486
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
487
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
488
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
489
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
490
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
491
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
492
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
493
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
494

    
495
  SetupLogging()
496

    
497
  # Option checking
498
  if args:
499
    raise Error("No arguments expected")
500
  if options.downgrade and not options.no_verify:
501
    options.no_verify = True
502

    
503
  # Check master name
504
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
505
    logging.error("Aborting due to hostname mismatch")
506
    sys.exit(constants.EXIT_FAILURE)
507

    
508
  if not options.force:
509
    if options.downgrade:
510
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
511
                  " Some configuration data might be removed if they don't fit"
512
                  " in the old format. Please make sure you have read the"
513
                  " upgrade notes (available in the UPGRADE file and included"
514
                  " in other documentation formats) to understand what they"
515
                  " are. Continue with *DOWNGRADING* the configuration?" %
516
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
517
    else:
518
      usertext = ("Please make sure you have read the upgrade notes for"
519
                  " Ganeti %s (available in the UPGRADE file and included"
520
                  " in other documentation formats). Continue with upgrading"
521
                  " configuration?" % constants.RELEASE_VERSION)
522
    if not cli.AskUser(usertext):
523
      sys.exit(constants.EXIT_FAILURE)
524

    
525
  # Check whether it's a Ganeti configuration directory
526
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
527
          os.path.isfile(options.SERVER_PEM_PATH) and
528
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
529
    raise Error(("%s does not seem to be a Ganeti configuration"
530
                 " directory") % options.data_dir)
531

    
532
  if not os.path.isdir(options.conf_dir):
533
    raise Error("Not a directory: %s" % options.conf_dir)
534

    
535
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
536

    
537
  try:
538
    config_version = config_data["version"]
539
  except KeyError:
540
    raise Error("Unable to determine configuration version")
541

    
542
  (config_major, config_minor, config_revision) = \
543
    constants.SplitVersion(config_version)
544

    
545
  logging.info("Found configuration version %s (%d.%d.%d)",
546
               config_version, config_major, config_minor, config_revision)
547

    
548
  if "config_version" in config_data["cluster"]:
549
    raise Error("Inconsistent configuration: found config_version in"
550
                " configuration file")
551

    
552
  # Downgrade to the previous stable version
553
  if options.downgrade:
554
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
555
            (config_major == DOWNGRADE_MAJOR and
556
             config_minor == DOWNGRADE_MINOR)):
557
      raise Error("Downgrade supported only from the latest version (%s.%s),"
558
                  " found %s (%s.%s.%s) instead" %
559
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
560
                   config_minor, config_revision))
561
    DowngradeAll(config_data)
562

    
563
  # Upgrade from 2.{0..7} to 2.9
564
  elif config_major == 2 and config_minor in range(0, 10):
565
    if config_revision != 0:
566
      logging.warning("Config revision is %s, not 0", config_revision)
567
    UpgradeAll(config_data)
568

    
569
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
570
    logging.info("No changes necessary")
571

    
572
  else:
573
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
574
                (config_major, config_minor, config_revision))
575

    
576
  try:
577
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
578
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
579
                    data=serializer.DumpJson(config_data),
580
                    mode=0600,
581
                    dry_run=options.dry_run,
582
                    backup=True)
583

    
584
    if not options.dry_run:
585
      bootstrap.GenerateClusterCrypto(
586
        False, False, False, False, False,
587
        nodecert_file=options.SERVER_PEM_PATH,
588
        rapicert_file=options.RAPI_CERT_FILE,
589
        spicecert_file=options.SPICE_CERT_FILE,
590
        spicecacert_file=options.SPICE_CACERT_FILE,
591
        hmackey_file=options.CONFD_HMAC_KEY,
592
        cds_file=options.CDS_FILE)
593

    
594
  except Exception:
595
    logging.critical("Writing configuration failed. It is probably in an"
596
                     " inconsistent state and needs manual intervention.")
597
    raise
598

    
599
  # test loading the config file
600
  all_ok = True
601
  if not (options.dry_run or options.no_verify):
602
    logging.info("Testing the new config file...")
603
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
604
                              accept_foreign=options.ignore_hostname,
605
                              offline=True)
606
    # if we reached this, it's all fine
607
    vrfy = cfg.VerifyConfig()
608
    if vrfy:
609
      logging.error("Errors after conversion:")
610
      for item in vrfy:
611
        logging.error(" - %s", item)
612
      all_ok = False
613
    else:
614
      logging.info("File loaded successfully after upgrading")
615
    del cfg
616

    
617
  if options.downgrade:
618
    action = "downgraded"
619
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
620
  else:
621
    action = "upgraded"
622
    out_ver = constants.RELEASE_VERSION
623
  if all_ok:
624
    cli.ToStderr("Configuration successfully %s to version %s.",
625
                 action, out_ver)
626
  else:
627
    cli.ToStderr("Configuration %s to version %s, but there are errors."
628
                 "\nPlease review the file.", action, out_ver)
629

    
630

    
631
if __name__ == "__main__":
632
  main()