Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ c4402c5d

History | View | Annotate | Download (21.7 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 10
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 9
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150

    
151

    
152
def UpgradeGroups(config_data):
153
  cl_ipolicy = config_data["cluster"].get("ipolicy")
154
  for group in config_data["nodegroups"].values():
155
    networks = group.get("networks", None)
156
    if not networks:
157
      group["networks"] = {}
158
    ipolicy = group.get("ipolicy", None)
159
    if ipolicy:
160
      if cl_ipolicy is None:
161
        raise Error("A group defines an instance policy but there is no"
162
                    " instance policy at cluster level")
163
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
164

    
165

    
166
def GetExclusiveStorageValue(config_data):
167
  """Return a conservative value of the exclusive_storage flag.
168

    
169
  Return C{True} if the cluster or at least a nodegroup have the flag set.
170

    
171
  """
172
  ret = False
173
  cluster = config_data["cluster"]
174
  ndparams = cluster.get("ndparams")
175
  if ndparams is not None and ndparams.get("exclusive_storage"):
176
    ret = True
177
  for group in config_data["nodegroups"].values():
178
    ndparams = group.get("ndparams")
179
    if ndparams is not None and ndparams.get("exclusive_storage"):
180
      ret = True
181
  return ret
182

    
183

    
184
def RemovePhysicalId(disk):
185
  if "children" in disk:
186
    for d in disk["children"]:
187
      RemovePhysicalId(d)
188
  if "physical_id" in disk:
189
    del disk["physical_id"]
190

    
191

    
192
def ChangeDiskDevType(disk, dev_type_map):
193
  """Replaces disk's dev_type attributes according to the given map.
194

    
195
  This can be used for both, up or downgrading the disks.
196
  """
197
  if disk["dev_type"] in dev_type_map:
198
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
199
  if "children" in disk:
200
    for child in disk["children"]:
201
      ChangeDiskDevType(child, dev_type_map)
202

    
203

    
204
def UpgradeDiskDevType(disk):
205
  """Upgrades the disks' device type."""
206
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
207

    
208

    
209
def UpgradeInstances(config_data):
210
  """Upgrades the instances' configuration."""
211

    
212
  network2uuid = dict((n["name"], n["uuid"])
213
                      for n in config_data["networks"].values())
214
  if "instances" not in config_data:
215
    raise Error("Can't find the 'instances' key in the configuration!")
216

    
217
  missing_spindles = False
218
  for instance, iobj in config_data["instances"].items():
219
    for nic in iobj["nics"]:
220
      name = nic.get("network", None)
221
      if name:
222
        uuid = network2uuid.get(name, None)
223
        if uuid:
224
          print("NIC with network name %s found."
225
                " Substituting with uuid %s." % (name, uuid))
226
          nic["network"] = uuid
227

    
228
    if "disks" not in iobj:
229
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
230
    disks = iobj["disks"]
231
    for idx, dobj in enumerate(disks):
232
      RemovePhysicalId(dobj)
233

    
234
      expected = "disk/%s" % idx
235
      current = dobj.get("iv_name", "")
236
      if current != expected:
237
        logging.warning("Updating iv_name for instance %s/disk %s"
238
                        " from '%s' to '%s'",
239
                        instance, idx, current, expected)
240
        dobj["iv_name"] = expected
241

    
242
      if "dev_type" in dobj:
243
        UpgradeDiskDevType(dobj)
244

    
245
      if not "spindles" in dobj:
246
        missing_spindles = True
247

    
248
  if GetExclusiveStorageValue(config_data) and missing_spindles:
249
    # We cannot be sure that the instances that are missing spindles have
250
    # exclusive storage enabled (the check would be more complicated), so we
251
    # give a noncommittal message
252
    logging.warning("Some instance disks could be needing to update the"
253
                    " spindles parameter; you can check by running"
254
                    " 'gnt-cluster verify', and fix any problem with"
255
                    " 'gnt-cluster repair-disk-sizes'")
256

    
257

    
258
def UpgradeRapiUsers():
259
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
260
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
261
    if os.path.exists(options.RAPI_USERS_FILE):
262
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
263
                  " already exists at %s" %
264
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
265
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
266
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
267
    if not options.dry_run:
268
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
269
                       mkdir=True, mkdir_mode=0750)
270

    
271
  # Create a symlink for RAPI users file
272
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
273
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
274
      os.path.isfile(options.RAPI_USERS_FILE)):
275
    logging.info("Creating symlink from %s to %s",
276
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
277
    if not options.dry_run:
278
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
279

    
280

    
281
def UpgradeWatcher():
282
  # Remove old watcher state file if it exists
283
  if os.path.exists(options.WATCHER_STATEFILE):
284
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
285
    if not options.dry_run:
286
      utils.RemoveFile(options.WATCHER_STATEFILE)
287

    
288

    
289
def UpgradeFileStoragePaths(config_data):
290
  # Write file storage paths
291
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
292
    cluster = config_data["cluster"]
293
    file_storage_dir = cluster.get("file_storage_dir")
294
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
295
    del cluster
296

    
297
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
298
                 " for file storage; writing existing configuration values"
299
                 " into '%s'",
300
                 options.FILE_STORAGE_PATHS_FILE)
301

    
302
    if file_storage_dir:
303
      logging.info("File storage directory: %s", file_storage_dir)
304
    if shared_file_storage_dir:
305
      logging.info("Shared file storage directory: %s",
306
                   shared_file_storage_dir)
307

    
308
    buf = StringIO()
309
    buf.write("# List automatically generated from configuration by\n")
310
    buf.write("# cfgupgrade at %s\n" % time.asctime())
311
    if file_storage_dir:
312
      buf.write("%s\n" % file_storage_dir)
313
    if shared_file_storage_dir:
314
      buf.write("%s\n" % shared_file_storage_dir)
315
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
316
                    data=buf.getvalue(),
317
                    mode=0600,
318
                    dry_run=options.dry_run,
319
                    backup=True)
320

    
321

    
322
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
323
  if old_key not in nodes_by_old_key:
324
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
325
                    " already up-to-date", old_key)
326
    return old_key
327
  return nodes_by_old_key[old_key][new_key_field]
328

    
329

    
330
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
331
  def ChangeDiskNodeIndices(disk):
332
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
333
    # considered when up/downgrading from/to any versions touching 2.9 on the
334
    # way.
335
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
336
    if disk["dev_type"] in drbd_disk_types:
337
      for i in range(0, 2):
338
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
339
                                                disk["logical_id"][i],
340
                                                new_key_field)
341
    if "children" in disk:
342
      for child in disk["children"]:
343
        ChangeDiskNodeIndices(child)
344

    
345
  nodes_by_old_key = {}
346
  nodes_by_new_key = {}
347
  for (_, node) in config_data["nodes"].items():
348
    nodes_by_old_key[node[old_key_field]] = node
349
    nodes_by_new_key[node[new_key_field]] = node
350

    
351
  config_data["nodes"] = nodes_by_new_key
352

    
353
  cluster = config_data["cluster"]
354
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
355
                                           cluster["master_node"],
356
                                           new_key_field)
357

    
358
  for inst in config_data["instances"].values():
359
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
360
                                           inst["primary_node"],
361
                                           new_key_field)
362
    for disk in inst["disks"]:
363
      ChangeDiskNodeIndices(disk)
364

    
365

    
366
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
367
  insts_by_old_key = {}
368
  insts_by_new_key = {}
369
  for (_, inst) in config_data["instances"].items():
370
    insts_by_old_key[inst[old_key_field]] = inst
371
    insts_by_new_key[inst[new_key_field]] = inst
372

    
373
  config_data["instances"] = insts_by_new_key
374

    
375

    
376
def UpgradeNodeIndices(config_data):
377
  ChangeNodeIndices(config_data, "name", "uuid")
378

    
379

    
380
def UpgradeInstanceIndices(config_data):
381
  ChangeInstanceIndices(config_data, "name", "uuid")
382

    
383

    
384
def UpgradeAll(config_data):
385
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
386
  UpgradeRapiUsers()
387
  UpgradeWatcher()
388
  UpgradeFileStoragePaths(config_data)
389
  UpgradeNetworks(config_data)
390
  UpgradeCluster(config_data)
391
  UpgradeGroups(config_data)
392
  UpgradeInstances(config_data)
393
  UpgradeNodeIndices(config_data)
394
  UpgradeInstanceIndices(config_data)
395

    
396

    
397
def DowngradeNDParams(ndparams):
398
  for param in ["ovs", "ovs_link", "ovs_name"]:
399
    if param in ndparams:
400
      del ndparams[param]
401

    
402

    
403
def DowngradeNicParams(nicparams):
404
  if "vlan" in nicparams:
405
    del nicparams["vlan"]
406

    
407

    
408
def DowngradeHVParams(hvparams):
409
  for hv in ["xen-pvm", "xen-hvm"]:
410
    if hv not in hvparams:
411
      continue
412
    for param in ["cpuid", "soundhw"]:
413
      if param in hvparams[hv]:
414
        del hvparams[hv][param]
415

    
416

    
417
def DowngradeCluster(config_data):
418
  cluster = config_data["cluster"]
419
  DowngradeNDParams(cluster["ndparams"])
420
  DowngradeNicParams(cluster["nicparams"][constants.PP_DEFAULT])
421
  DowngradeHVParams(cluster["hvparams"])
422

    
423

    
424
def DowngradeNodeGroups(config_data):
425
  for (_, ngobj) in config_data["nodegroups"].items():
426
    DowngradeNDParams(ngobj["ndparams"])
427

    
428

    
429
def DowngradeNodes(config_data):
430
  for (_, nobj) in config_data["nodes"].items():
431
    DowngradeNDParams(nobj["ndparams"])
432

    
433

    
434
def DowngradeInstances(config_data):
435
  for (_, iobj) in config_data["instances"].items():
436
    DowngradeHVParams(iobj["hvparams"])
437
    for nic in iobj["nics"]:
438
      DowngradeNicParams(nic["nicparams"])
439

    
440

    
441
def DowngradeAll(config_data):
442
  # Any code specific to a particular version should be labeled that way, so
443
  # it can be removed when updating to the next version.
444
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
445
                                                DOWNGRADE_MINOR, 0)
446
  DowngradeCluster(config_data)
447
  DowngradeNodeGroups(config_data)
448
  DowngradeNodes(config_data)
449
  DowngradeInstances(config_data)
450

    
451

    
452
def main():
453
  """Main program.
454

    
455
  """
456
  global options, args # pylint: disable=W0603
457

    
458
  # Option parsing
459
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
460
  parser.add_option("--dry-run", dest="dry_run",
461
                    action="store_true",
462
                    help="Try to do the conversion, but don't write"
463
                         " output file")
464
  parser.add_option(cli.FORCE_OPT)
465
  parser.add_option(cli.DEBUG_OPT)
466
  parser.add_option(cli.VERBOSE_OPT)
467
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
468
                    action="store_true", default=False,
469
                    help="Don't abort if hostname doesn't match")
470
  parser.add_option("--path", help="Convert configuration in this"
471
                    " directory instead of '%s'" % pathutils.DATA_DIR,
472
                    default=pathutils.DATA_DIR, dest="data_dir")
473
  parser.add_option("--confdir",
474
                    help=("Use this directory instead of '%s'" %
475
                          pathutils.CONF_DIR),
476
                    default=pathutils.CONF_DIR, dest="conf_dir")
477
  parser.add_option("--no-verify",
478
                    help="Do not verify configuration after upgrade",
479
                    action="store_true", dest="no_verify", default=False)
480
  parser.add_option("--downgrade",
481
                    help="Downgrade to the previous stable version",
482
                    action="store_true", dest="downgrade", default=False)
483
  (options, args) = parser.parse_args()
484

    
485
  # We need to keep filenames locally because they might be renamed between
486
  # versions.
487
  options.data_dir = os.path.abspath(options.data_dir)
488
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
489
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
490
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
491
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
492
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
493
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
494
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
495
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
496
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
497
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
498
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
499
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
500
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
501

    
502
  SetupLogging()
503

    
504
  # Option checking
505
  if args:
506
    raise Error("No arguments expected")
507
  if options.downgrade and not options.no_verify:
508
    options.no_verify = True
509

    
510
  # Check master name
511
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
512
    logging.error("Aborting due to hostname mismatch")
513
    sys.exit(constants.EXIT_FAILURE)
514

    
515
  if not options.force:
516
    if options.downgrade:
517
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
518
                  " Some configuration data might be removed if they don't fit"
519
                  " in the old format. Please make sure you have read the"
520
                  " upgrade notes (available in the UPGRADE file and included"
521
                  " in other documentation formats) to understand what they"
522
                  " are. Continue with *DOWNGRADING* the configuration?" %
523
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
524
    else:
525
      usertext = ("Please make sure you have read the upgrade notes for"
526
                  " Ganeti %s (available in the UPGRADE file and included"
527
                  " in other documentation formats). Continue with upgrading"
528
                  " configuration?" % constants.RELEASE_VERSION)
529
    if not cli.AskUser(usertext):
530
      sys.exit(constants.EXIT_FAILURE)
531

    
532
  # Check whether it's a Ganeti configuration directory
533
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
534
          os.path.isfile(options.SERVER_PEM_PATH) and
535
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
536
    raise Error(("%s does not seem to be a Ganeti configuration"
537
                 " directory") % options.data_dir)
538

    
539
  if not os.path.isdir(options.conf_dir):
540
    raise Error("Not a directory: %s" % options.conf_dir)
541

    
542
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
543

    
544
  try:
545
    config_version = config_data["version"]
546
  except KeyError:
547
    raise Error("Unable to determine configuration version")
548

    
549
  (config_major, config_minor, config_revision) = \
550
    version.SplitVersion(config_version)
551

    
552
  logging.info("Found configuration version %s (%d.%d.%d)",
553
               config_version, config_major, config_minor, config_revision)
554

    
555
  if "config_version" in config_data["cluster"]:
556
    raise Error("Inconsistent configuration: found config_version in"
557
                " configuration file")
558

    
559
  # Downgrade to the previous stable version
560
  if options.downgrade:
561
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
562
            (config_major == DOWNGRADE_MAJOR and
563
             config_minor == DOWNGRADE_MINOR)):
564
      raise Error("Downgrade supported only from the latest version (%s.%s),"
565
                  " found %s (%s.%s.%s) instead" %
566
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
567
                   config_minor, config_revision))
568
    DowngradeAll(config_data)
569

    
570
  # Upgrade from 2.{0..9} to 2.10
571
  elif config_major == 2 and config_minor in range(0, 10):
572
    if config_revision != 0:
573
      logging.warning("Config revision is %s, not 0", config_revision)
574
    UpgradeAll(config_data)
575

    
576
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
577
    logging.info("No changes necessary")
578

    
579
  else:
580
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
581
                (config_major, config_minor, config_revision))
582

    
583
  try:
584
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
585
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
586
                    data=serializer.DumpJson(config_data),
587
                    mode=0600,
588
                    dry_run=options.dry_run,
589
                    backup=True)
590

    
591
    if not options.dry_run:
592
      bootstrap.GenerateClusterCrypto(
593
        False, False, False, False, False,
594
        nodecert_file=options.SERVER_PEM_PATH,
595
        rapicert_file=options.RAPI_CERT_FILE,
596
        spicecert_file=options.SPICE_CERT_FILE,
597
        spicecacert_file=options.SPICE_CACERT_FILE,
598
        hmackey_file=options.CONFD_HMAC_KEY,
599
        cds_file=options.CDS_FILE)
600

    
601
  except Exception:
602
    logging.critical("Writing configuration failed. It is probably in an"
603
                     " inconsistent state and needs manual intervention.")
604
    raise
605

    
606
  # test loading the config file
607
  all_ok = True
608
  if not (options.dry_run or options.no_verify):
609
    logging.info("Testing the new config file...")
610
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
611
                              accept_foreign=options.ignore_hostname,
612
                              offline=True)
613
    # if we reached this, it's all fine
614
    vrfy = cfg.VerifyConfig()
615
    if vrfy:
616
      logging.error("Errors after conversion:")
617
      for item in vrfy:
618
        logging.error(" - %s", item)
619
      all_ok = False
620
    else:
621
      logging.info("File loaded successfully after upgrading")
622
    del cfg
623

    
624
  if options.downgrade:
625
    action = "downgraded"
626
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
627
  else:
628
    action = "upgraded"
629
    out_ver = constants.RELEASE_VERSION
630
  if all_ok:
631
    cli.ToStderr("Configuration successfully %s to version %s.",
632
                 action, out_ver)
633
  else:
634
    cli.ToStderr("Configuration %s to version %s, but there are errors."
635
                 "\nPlease review the file.", action, out_ver)
636

    
637

    
638
if __name__ == "__main__":
639
  main()