Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ c7a02959

History | View | Annotate | Download (20.5 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 11
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 10
62

    
63
# map of legacy device types
64
# (mapping differing old LD_* constants to new DT_* constants)
65
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
66
# (mapping differing new DT_* constants to old LD_* constants)
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

    
69

    
70
class Error(Exception):
71
  """Generic exception"""
72
  pass
73

    
74

    
75
def SetupLogging():
76
  """Configures the logging module.
77

    
78
  """
79
  formatter = logging.Formatter("%(asctime)s: %(message)s")
80

    
81
  stderr_handler = logging.StreamHandler()
82
  stderr_handler.setFormatter(formatter)
83
  if options.debug:
84
    stderr_handler.setLevel(logging.NOTSET)
85
  elif options.verbose:
86
    stderr_handler.setLevel(logging.INFO)
87
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89

    
90
  root_logger = logging.getLogger("")
91
  root_logger.setLevel(logging.NOTSET)
92
  root_logger.addHandler(stderr_handler)
93

    
94

    
95
def CheckHostname(path):
96
  """Ensures hostname matches ssconf value.
97

    
98
  @param path: Path to ssconf file
99

    
100
  """
101
  ssconf_master_node = utils.ReadOneLineFile(path)
102
  hostname = netutils.GetHostname().name
103

    
104
  if ssconf_master_node == hostname:
105
    return True
106

    
107
  logging.warning("Warning: ssconf says master node is '%s', but this"
108
                  " machine's name is '%s'; this tool must be run on"
109
                  " the master node", ssconf_master_node, hostname)
110
  return False
111

    
112

    
113
def _FillIPolicySpecs(default_ipolicy, ipolicy):
114
  if "minmax" in ipolicy:
115
    for (key, spec) in ipolicy["minmax"][0].items():
116
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
        if par not in spec:
118
          spec[par] = val
119

    
120

    
121
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
  minmax_keys = ["min", "max"]
123
  if any((k in ipolicy) for k in minmax_keys):
124
    minmax = {}
125
    for key in minmax_keys:
126
      if key in ipolicy:
127
        if ipolicy[key]:
128
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
  if isgroup and "std" in ipolicy:
133
    del ipolicy["std"]
134
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135

    
136

    
137
def UpgradeNetworks(config_data):
138
  networks = config_data.get("networks", None)
139
  if not networks:
140
    config_data["networks"] = {}
141

    
142

    
143
def UpgradeCluster(config_data):
144
  cluster = config_data.get("cluster", None)
145
  if cluster is None:
146
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150

    
151

    
152
def UpgradeGroups(config_data):
153
  cl_ipolicy = config_data["cluster"].get("ipolicy")
154
  for group in config_data["nodegroups"].values():
155
    networks = group.get("networks", None)
156
    if not networks:
157
      group["networks"] = {}
158
    ipolicy = group.get("ipolicy", None)
159
    if ipolicy:
160
      if cl_ipolicy is None:
161
        raise Error("A group defines an instance policy but there is no"
162
                    " instance policy at cluster level")
163
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
164

    
165

    
166
def GetExclusiveStorageValue(config_data):
167
  """Return a conservative value of the exclusive_storage flag.
168

    
169
  Return C{True} if the cluster or at least a nodegroup have the flag set.
170

    
171
  """
172
  ret = False
173
  cluster = config_data["cluster"]
174
  ndparams = cluster.get("ndparams")
175
  if ndparams is not None and ndparams.get("exclusive_storage"):
176
    ret = True
177
  for group in config_data["nodegroups"].values():
178
    ndparams = group.get("ndparams")
179
    if ndparams is not None and ndparams.get("exclusive_storage"):
180
      ret = True
181
  return ret
182

    
183

    
184
def RemovePhysicalId(disk):
185
  if "children" in disk:
186
    for d in disk["children"]:
187
      RemovePhysicalId(d)
188
  if "physical_id" in disk:
189
    del disk["physical_id"]
190

    
191

    
192
def ChangeDiskDevType(disk, dev_type_map):
193
  """Replaces disk's dev_type attributes according to the given map.
194

    
195
  This can be used for both, up or downgrading the disks.
196
  """
197
  if disk["dev_type"] in dev_type_map:
198
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
199
  if "children" in disk:
200
    for child in disk["children"]:
201
      ChangeDiskDevType(child, dev_type_map)
202

    
203

    
204
def UpgradeDiskDevType(disk):
205
  """Upgrades the disks' device type."""
206
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
207

    
208

    
209
def UpgradeInstances(config_data):
210
  """Upgrades the instances' configuration."""
211

    
212
  network2uuid = dict((n["name"], n["uuid"])
213
                      for n in config_data["networks"].values())
214
  if "instances" not in config_data:
215
    raise Error("Can't find the 'instances' key in the configuration!")
216

    
217
  missing_spindles = False
218
  for instance, iobj in config_data["instances"].items():
219
    for nic in iobj["nics"]:
220
      name = nic.get("network", None)
221
      if name:
222
        uuid = network2uuid.get(name, None)
223
        if uuid:
224
          print("NIC with network name %s found."
225
                " Substituting with uuid %s." % (name, uuid))
226
          nic["network"] = uuid
227

    
228
    if "disks" not in iobj:
229
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
230
    disks = iobj["disks"]
231
    for idx, dobj in enumerate(disks):
232
      RemovePhysicalId(dobj)
233

    
234
      expected = "disk/%s" % idx
235
      current = dobj.get("iv_name", "")
236
      if current != expected:
237
        logging.warning("Updating iv_name for instance %s/disk %s"
238
                        " from '%s' to '%s'",
239
                        instance, idx, current, expected)
240
        dobj["iv_name"] = expected
241

    
242
      if "dev_type" in dobj:
243
        UpgradeDiskDevType(dobj)
244

    
245
      if not "spindles" in dobj:
246
        missing_spindles = True
247

    
248
  if GetExclusiveStorageValue(config_data) and missing_spindles:
249
    # We cannot be sure that the instances that are missing spindles have
250
    # exclusive storage enabled (the check would be more complicated), so we
251
    # give a noncommittal message
252
    logging.warning("Some instance disks could be needing to update the"
253
                    " spindles parameter; you can check by running"
254
                    " 'gnt-cluster verify', and fix any problem with"
255
                    " 'gnt-cluster repair-disk-sizes'")
256

    
257

    
258
def UpgradeRapiUsers():
259
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
260
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
261
    if os.path.exists(options.RAPI_USERS_FILE):
262
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
263
                  " already exists at %s" %
264
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
265
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
266
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
267
    if not options.dry_run:
268
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
269
                       mkdir=True, mkdir_mode=0750)
270

    
271
  # Create a symlink for RAPI users file
272
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
273
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
274
      os.path.isfile(options.RAPI_USERS_FILE)):
275
    logging.info("Creating symlink from %s to %s",
276
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
277
    if not options.dry_run:
278
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
279

    
280

    
281
def UpgradeWatcher():
282
  # Remove old watcher state file if it exists
283
  if os.path.exists(options.WATCHER_STATEFILE):
284
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
285
    if not options.dry_run:
286
      utils.RemoveFile(options.WATCHER_STATEFILE)
287

    
288

    
289
def UpgradeFileStoragePaths(config_data):
290
  # Write file storage paths
291
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
292
    cluster = config_data["cluster"]
293
    file_storage_dir = cluster.get("file_storage_dir")
294
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
295
    del cluster
296

    
297
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
298
                 " for file storage; writing existing configuration values"
299
                 " into '%s'",
300
                 options.FILE_STORAGE_PATHS_FILE)
301

    
302
    if file_storage_dir:
303
      logging.info("File storage directory: %s", file_storage_dir)
304
    if shared_file_storage_dir:
305
      logging.info("Shared file storage directory: %s",
306
                   shared_file_storage_dir)
307

    
308
    buf = StringIO()
309
    buf.write("# List automatically generated from configuration by\n")
310
    buf.write("# cfgupgrade at %s\n" % time.asctime())
311
    if file_storage_dir:
312
      buf.write("%s\n" % file_storage_dir)
313
    if shared_file_storage_dir:
314
      buf.write("%s\n" % shared_file_storage_dir)
315
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
316
                    data=buf.getvalue(),
317
                    mode=0600,
318
                    dry_run=options.dry_run,
319
                    backup=True)
320

    
321

    
322
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
323
  if old_key not in nodes_by_old_key:
324
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
325
                    " already up-to-date", old_key)
326
    return old_key
327
  return nodes_by_old_key[old_key][new_key_field]
328

    
329

    
330
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
331
  def ChangeDiskNodeIndices(disk):
332
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
333
    # considered when up/downgrading from/to any versions touching 2.9 on the
334
    # way.
335
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
336
    if disk["dev_type"] in drbd_disk_types:
337
      for i in range(0, 2):
338
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
339
                                                disk["logical_id"][i],
340
                                                new_key_field)
341
    if "children" in disk:
342
      for child in disk["children"]:
343
        ChangeDiskNodeIndices(child)
344

    
345
  nodes_by_old_key = {}
346
  nodes_by_new_key = {}
347
  for (_, node) in config_data["nodes"].items():
348
    nodes_by_old_key[node[old_key_field]] = node
349
    nodes_by_new_key[node[new_key_field]] = node
350

    
351
  config_data["nodes"] = nodes_by_new_key
352

    
353
  cluster = config_data["cluster"]
354
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
355
                                           cluster["master_node"],
356
                                           new_key_field)
357

    
358
  for inst in config_data["instances"].values():
359
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
360
                                           inst["primary_node"],
361
                                           new_key_field)
362
    for disk in inst["disks"]:
363
      ChangeDiskNodeIndices(disk)
364

    
365

    
366
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
367
  insts_by_old_key = {}
368
  insts_by_new_key = {}
369
  for (_, inst) in config_data["instances"].items():
370
    insts_by_old_key[inst[old_key_field]] = inst
371
    insts_by_new_key[inst[new_key_field]] = inst
372

    
373
  config_data["instances"] = insts_by_new_key
374

    
375

    
376
def UpgradeNodeIndices(config_data):
377
  ChangeNodeIndices(config_data, "name", "uuid")
378

    
379

    
380
def UpgradeInstanceIndices(config_data):
381
  ChangeInstanceIndices(config_data, "name", "uuid")
382

    
383

    
384
def UpgradeAll(config_data):
385
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
386
  UpgradeRapiUsers()
387
  UpgradeWatcher()
388
  UpgradeFileStoragePaths(config_data)
389
  UpgradeNetworks(config_data)
390
  UpgradeCluster(config_data)
391
  UpgradeGroups(config_data)
392
  UpgradeInstances(config_data)
393
  UpgradeNodeIndices(config_data)
394
  UpgradeInstanceIndices(config_data)
395

    
396

    
397
def DowngradeAll(config_data):
398
  # Any code specific to a particular version should be labeled that way, so
399
  # it can be removed when updating to the next version.
400
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
401
                                                DOWNGRADE_MINOR, 0)
402

    
403

    
404
def main():
405
  """Main program.
406

    
407
  """
408
  global options, args # pylint: disable=W0603
409

    
410
  # Option parsing
411
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
412
  parser.add_option("--dry-run", dest="dry_run",
413
                    action="store_true",
414
                    help="Try to do the conversion, but don't write"
415
                         " output file")
416
  parser.add_option(cli.FORCE_OPT)
417
  parser.add_option(cli.DEBUG_OPT)
418
  parser.add_option(cli.VERBOSE_OPT)
419
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
420
                    action="store_true", default=False,
421
                    help="Don't abort if hostname doesn't match")
422
  parser.add_option("--path", help="Convert configuration in this"
423
                    " directory instead of '%s'" % pathutils.DATA_DIR,
424
                    default=pathutils.DATA_DIR, dest="data_dir")
425
  parser.add_option("--confdir",
426
                    help=("Use this directory instead of '%s'" %
427
                          pathutils.CONF_DIR),
428
                    default=pathutils.CONF_DIR, dest="conf_dir")
429
  parser.add_option("--no-verify",
430
                    help="Do not verify configuration after upgrade",
431
                    action="store_true", dest="no_verify", default=False)
432
  parser.add_option("--downgrade",
433
                    help="Downgrade to the previous stable version",
434
                    action="store_true", dest="downgrade", default=False)
435
  (options, args) = parser.parse_args()
436

    
437
  # We need to keep filenames locally because they might be renamed between
438
  # versions.
439
  options.data_dir = os.path.abspath(options.data_dir)
440
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
441
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
442
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
443
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
444
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
445
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
446
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
447
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
448
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
449
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
450
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
451
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
452
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
453

    
454
  SetupLogging()
455

    
456
  # Option checking
457
  if args:
458
    raise Error("No arguments expected")
459
  if options.downgrade and not options.no_verify:
460
    options.no_verify = True
461

    
462
  # Check master name
463
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
464
    logging.error("Aborting due to hostname mismatch")
465
    sys.exit(constants.EXIT_FAILURE)
466

    
467
  if not options.force:
468
    if options.downgrade:
469
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
470
                  " Some configuration data might be removed if they don't fit"
471
                  " in the old format. Please make sure you have read the"
472
                  " upgrade notes (available in the UPGRADE file and included"
473
                  " in other documentation formats) to understand what they"
474
                  " are. Continue with *DOWNGRADING* the configuration?" %
475
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
476
    else:
477
      usertext = ("Please make sure you have read the upgrade notes for"
478
                  " Ganeti %s (available in the UPGRADE file and included"
479
                  " in other documentation formats). Continue with upgrading"
480
                  " configuration?" % constants.RELEASE_VERSION)
481
    if not cli.AskUser(usertext):
482
      sys.exit(constants.EXIT_FAILURE)
483

    
484
  # Check whether it's a Ganeti configuration directory
485
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
486
          os.path.isfile(options.SERVER_PEM_PATH) and
487
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
488
    raise Error(("%s does not seem to be a Ganeti configuration"
489
                 " directory") % options.data_dir)
490

    
491
  if not os.path.isdir(options.conf_dir):
492
    raise Error("Not a directory: %s" % options.conf_dir)
493

    
494
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
495

    
496
  try:
497
    config_version = config_data["version"]
498
  except KeyError:
499
    raise Error("Unable to determine configuration version")
500

    
501
  (config_major, config_minor, config_revision) = \
502
    version.SplitVersion(config_version)
503

    
504
  logging.info("Found configuration version %s (%d.%d.%d)",
505
               config_version, config_major, config_minor, config_revision)
506

    
507
  if "config_version" in config_data["cluster"]:
508
    raise Error("Inconsistent configuration: found config_version in"
509
                " configuration file")
510

    
511
  # Downgrade to the previous stable version
512
  if options.downgrade:
513
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
514
            (config_major == DOWNGRADE_MAJOR and
515
             config_minor == DOWNGRADE_MINOR)):
516
      raise Error("Downgrade supported only from the latest version (%s.%s),"
517
                  " found %s (%s.%s.%s) instead" %
518
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
519
                   config_minor, config_revision))
520
    DowngradeAll(config_data)
521

    
522
  # Upgrade from 2.{0..10} to 2.11
523
  elif config_major == 2 and config_minor in range(0, 11):
524
    if config_revision != 0:
525
      logging.warning("Config revision is %s, not 0", config_revision)
526
    UpgradeAll(config_data)
527

    
528
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
529
    logging.info("No changes necessary")
530

    
531
  else:
532
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
533
                (config_major, config_minor, config_revision))
534

    
535
  try:
536
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
537
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
538
                    data=serializer.DumpJson(config_data),
539
                    mode=0600,
540
                    dry_run=options.dry_run,
541
                    backup=True)
542

    
543
    if not options.dry_run:
544
      bootstrap.GenerateClusterCrypto(
545
        False, False, False, False, False,
546
        nodecert_file=options.SERVER_PEM_PATH,
547
        rapicert_file=options.RAPI_CERT_FILE,
548
        spicecert_file=options.SPICE_CERT_FILE,
549
        spicecacert_file=options.SPICE_CACERT_FILE,
550
        hmackey_file=options.CONFD_HMAC_KEY,
551
        cds_file=options.CDS_FILE)
552

    
553
  except Exception:
554
    logging.critical("Writing configuration failed. It is probably in an"
555
                     " inconsistent state and needs manual intervention.")
556
    raise
557

    
558
  # test loading the config file
559
  all_ok = True
560
  if not (options.dry_run or options.no_verify):
561
    logging.info("Testing the new config file...")
562
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
563
                              accept_foreign=options.ignore_hostname,
564
                              offline=True)
565
    # if we reached this, it's all fine
566
    vrfy = cfg.VerifyConfig()
567
    if vrfy:
568
      logging.error("Errors after conversion:")
569
      for item in vrfy:
570
        logging.error(" - %s", item)
571
      all_ok = False
572
    else:
573
      logging.info("File loaded successfully after upgrading")
574
    del cfg
575

    
576
  if options.downgrade:
577
    action = "downgraded"
578
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
579
  else:
580
    action = "upgraded"
581
    out_ver = constants.RELEASE_VERSION
582
  if all_ok:
583
    cli.ToStderr("Configuration successfully %s to version %s.",
584
                 action, out_ver)
585
  else:
586
    cli.ToStderr("Configuration %s to version %s, but there are errors."
587
                 "\nPlease review the file.", action, out_ver)
588

    
589

    
590
if __name__ == "__main__":
591
  main()