Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ 848cdc34

History | View | Annotate | Download (20.6 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47

    
48
options = None
49
args = None
50

    
51

    
52
#: Target major version we will upgrade to
53
TARGET_MAJOR = 2
54
#: Target minor version we will upgrade to
55
TARGET_MINOR = 9
56
#: Target major version for downgrade
57
DOWNGRADE_MAJOR = 2
58
#: Target minor version for downgrade
59
DOWNGRADE_MINOR = 8
60

    
61

    
62
class Error(Exception):
63
  """Generic exception"""
64
  pass
65

    
66

    
67
def SetupLogging():
68
  """Configures the logging module.
69

    
70
  """
71
  formatter = logging.Formatter("%(asctime)s: %(message)s")
72

    
73
  stderr_handler = logging.StreamHandler()
74
  stderr_handler.setFormatter(formatter)
75
  if options.debug:
76
    stderr_handler.setLevel(logging.NOTSET)
77
  elif options.verbose:
78
    stderr_handler.setLevel(logging.INFO)
79
  else:
80
    stderr_handler.setLevel(logging.WARNING)
81

    
82
  root_logger = logging.getLogger("")
83
  root_logger.setLevel(logging.NOTSET)
84
  root_logger.addHandler(stderr_handler)
85

    
86

    
87
def CheckHostname(path):
88
  """Ensures hostname matches ssconf value.
89

    
90
  @param path: Path to ssconf file
91

    
92
  """
93
  ssconf_master_node = utils.ReadOneLineFile(path)
94
  hostname = netutils.GetHostname().name
95

    
96
  if ssconf_master_node == hostname:
97
    return True
98

    
99
  logging.warning("Warning: ssconf says master node is '%s', but this"
100
                  " machine's name is '%s'; this tool must be run on"
101
                  " the master node", ssconf_master_node, hostname)
102
  return False
103

    
104

    
105
def _FillIPolicySpecs(default_ipolicy, ipolicy):
106
  if "minmax" in ipolicy:
107
    for (key, spec) in ipolicy["minmax"][0].items():
108
      for (par, val) in default_ipolicy["minmax"][0][key].items():
109
        if par not in spec:
110
          spec[par] = val
111

    
112

    
113
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
114
  minmax_keys = ["min", "max"]
115
  if any((k in ipolicy) for k in minmax_keys):
116
    minmax = {}
117
    for key in minmax_keys:
118
      if key in ipolicy:
119
        if ipolicy[key]:
120
          minmax[key] = ipolicy[key]
121
        del ipolicy[key]
122
    if minmax:
123
      ipolicy["minmax"] = [minmax]
124
  if isgroup and "std" in ipolicy:
125
    del ipolicy["std"]
126
  _FillIPolicySpecs(default_ipolicy, ipolicy)
127

    
128

    
129
def UpgradeNetworks(config_data):
130
  networks = config_data.get("networks", None)
131
  if not networks:
132
    config_data["networks"] = {}
133

    
134

    
135
def UpgradeCluster(config_data):
136
  cluster = config_data.get("cluster", None)
137
  if cluster is None:
138
    raise Error("Cannot find cluster")
139
  ipolicy = cluster.setdefault("ipolicy", None)
140
  if ipolicy:
141
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
142

    
143

    
144
def UpgradeGroups(config_data):
145
  cl_ipolicy = config_data["cluster"].get("ipolicy")
146
  for group in config_data["nodegroups"].values():
147
    networks = group.get("networks", None)
148
    if not networks:
149
      group["networks"] = {}
150
    ipolicy = group.get("ipolicy", None)
151
    if ipolicy:
152
      if cl_ipolicy is None:
153
        raise Error("A group defines an instance policy but there is no"
154
                    " instance policy at cluster level")
155
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
156

    
157

    
158
def GetExclusiveStorageValue(config_data):
159
  """Return a conservative value of the exclusive_storage flag.
160

    
161
  Return C{True} if the cluster or at least a nodegroup have the flag set.
162

    
163
  """
164
  ret = False
165
  cluster = config_data["cluster"]
166
  ndparams = cluster.get("ndparams")
167
  if ndparams is not None and ndparams.get("exclusive_storage"):
168
    ret = True
169
  for group in config_data["nodegroups"].values():
170
    ndparams = group.get("ndparams")
171
    if ndparams is not None and ndparams.get("exclusive_storage"):
172
      ret = True
173
  return ret
174

    
175

    
176
def UpgradeInstances(config_data):
177
  network2uuid = dict((n["name"], n["uuid"])
178
                      for n in config_data["networks"].values())
179
  if "instances" not in config_data:
180
    raise Error("Can't find the 'instances' key in the configuration!")
181

    
182
  missing_spindles = False
183
  for instance, iobj in config_data["instances"].items():
184
    for nic in iobj["nics"]:
185
      name = nic.get("network", None)
186
      if name:
187
        uuid = network2uuid.get(name, None)
188
        if uuid:
189
          print("NIC with network name %s found."
190
                " Substituting with uuid %s." % (name, uuid))
191
          nic["network"] = uuid
192
      try:
193
        del nic["idx"]
194
        print("Deleting deprecated idx")
195
      except KeyError:
196
        pass
197

    
198
    if "disks" not in iobj:
199
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
200
    disks = iobj["disks"]
201
    for idx, dobj in enumerate(disks):
202
      expected = "disk/%s" % idx
203
      current = dobj.get("iv_name", "")
204
      if current != expected:
205
        logging.warning("Updating iv_name for instance %s/disk %s"
206
                        " from '%s' to '%s'",
207
                        instance, idx, current, expected)
208
        dobj["iv_name"] = expected
209
      if not "spindles" in dobj:
210
        missing_spindles = True
211
      try:
212
        del dobj["idx"]
213
        print("Deleting deprecated idx")
214
      except KeyError:
215
        pass
216

    
217
    for attr in ("dev_idxs", "hotplug_info", "hotplugs", "pci_reservations"):
218
      try:
219
        del iobj[attr]
220
        print("Deleting deprecated %s" % attr)
221
      except KeyError:
222
        pass
223

    
224
  if GetExclusiveStorageValue(config_data) and missing_spindles:
225
    # We cannot be sure that the instances that are missing spindles have
226
    # exclusive storage enabled (the check would be more complicated), so we
227
    # give a noncommittal message
228
    logging.warning("Some instance disks could be needing to update the"
229
                    " spindles parameter; you can check by running"
230
                    " 'gnt-cluster verify', and fix any problem with"
231
                    " 'gnt-cluster repair-disk-sizes'")
232

    
233

    
234
def UpgradeRapiUsers():
235
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
236
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
237
    if os.path.exists(options.RAPI_USERS_FILE):
238
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
239
                  " already exists at %s" %
240
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
241
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
242
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
243
    if not options.dry_run:
244
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
245
                       mkdir=True, mkdir_mode=0750)
246

    
247
  # Create a symlink for RAPI users file
248
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
249
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
250
      os.path.isfile(options.RAPI_USERS_FILE)):
251
    logging.info("Creating symlink from %s to %s",
252
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
253
    if not options.dry_run:
254
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
255

    
256

    
257
def UpgradeWatcher():
258
  # Remove old watcher state file if it exists
259
  if os.path.exists(options.WATCHER_STATEFILE):
260
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
261
    if not options.dry_run:
262
      utils.RemoveFile(options.WATCHER_STATEFILE)
263

    
264

    
265
def UpgradeFileStoragePaths(config_data):
266
  # Write file storage paths
267
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
268
    cluster = config_data["cluster"]
269
    file_storage_dir = cluster.get("file_storage_dir")
270
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
271
    del cluster
272

    
273
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
274
                 " for file storage; writing existing configuration values"
275
                 " into '%s'",
276
                 options.FILE_STORAGE_PATHS_FILE)
277

    
278
    if file_storage_dir:
279
      logging.info("File storage directory: %s", file_storage_dir)
280
    if shared_file_storage_dir:
281
      logging.info("Shared file storage directory: %s",
282
                   shared_file_storage_dir)
283

    
284
    buf = StringIO()
285
    buf.write("# List automatically generated from configuration by\n")
286
    buf.write("# cfgupgrade at %s\n" % time.asctime())
287
    if file_storage_dir:
288
      buf.write("%s\n" % file_storage_dir)
289
    if shared_file_storage_dir:
290
      buf.write("%s\n" % shared_file_storage_dir)
291
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
292
                    data=buf.getvalue(),
293
                    mode=0600,
294
                    dry_run=options.dry_run,
295
                    backup=True)
296

    
297

    
298
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
299
  if old_key not in nodes_by_old_key:
300
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
301
                    " already up-to-date", old_key)
302
    return old_key
303
  return nodes_by_old_key[old_key][new_key_field]
304

    
305

    
306
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
307
  def ChangeDiskNodeIndices(disk):
308
    if disk["dev_type"] in constants.LDS_DRBD:
309
      for i in range(0, 2):
310
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
311
                                                disk["logical_id"][i],
312
                                                new_key_field)
313
    if "children" in disk:
314
      for child in disk["children"]:
315
        ChangeDiskNodeIndices(child)
316

    
317
  nodes_by_old_key = {}
318
  nodes_by_new_key = {}
319
  for (_, node) in config_data["nodes"].items():
320
    nodes_by_old_key[node[old_key_field]] = node
321
    nodes_by_new_key[node[new_key_field]] = node
322

    
323
  config_data["nodes"] = nodes_by_new_key
324

    
325
  cluster = config_data["cluster"]
326
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
327
                                           cluster["master_node"],
328
                                           new_key_field)
329

    
330
  for inst in config_data["instances"].values():
331
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
332
                                           inst["primary_node"],
333
                                           new_key_field)
334
    for disk in inst["disks"]:
335
      ChangeDiskNodeIndices(disk)
336

    
337

    
338
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
339
  insts_by_old_key = {}
340
  insts_by_new_key = {}
341
  for (_, inst) in config_data["instances"].items():
342
    insts_by_old_key[inst[old_key_field]] = inst
343
    insts_by_new_key[inst[new_key_field]] = inst
344

    
345
  config_data["instances"] = insts_by_new_key
346

    
347

    
348
def UpgradeNodeIndices(config_data):
349
  ChangeNodeIndices(config_data, "name", "uuid")
350

    
351

    
352
def UpgradeInstanceIndices(config_data):
353
  ChangeInstanceIndices(config_data, "name", "uuid")
354

    
355

    
356
def UpgradeAll(config_data):
357
  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
358
                                                  TARGET_MINOR, 0)
359
  UpgradeRapiUsers()
360
  UpgradeWatcher()
361
  UpgradeFileStoragePaths(config_data)
362
  UpgradeNetworks(config_data)
363
  UpgradeCluster(config_data)
364
  UpgradeGroups(config_data)
365
  UpgradeInstances(config_data)
366
  UpgradeNodeIndices(config_data)
367
  UpgradeInstanceIndices(config_data)
368

    
369

    
370
def DowngradeDisks(disks, owner):
371
  for disk in disks:
372
    # Remove spindles to downgrade to 2.8
373
    if "spindles" in disk:
374
      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
375
                      " instance %s",
376
                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
377
      del disk["spindles"]
378

    
379

    
380
def DowngradeInstances(config_data):
381
  if "instances" not in config_data:
382
    raise Error("Cannot find the 'instances' key in the configuration!")
383
  for (iname, iobj) in config_data["instances"].items():
384
    if "disks" not in iobj:
385
      raise Error("Cannot find 'disks' key for instance %s" % iname)
386
    DowngradeDisks(iobj["disks"], iname)
387

    
388

    
389
def DowngradeNodeIndices(config_data):
390
  ChangeNodeIndices(config_data, "uuid", "name")
391

    
392

    
393
def DowngradeInstanceIndices(config_data):
394
  ChangeInstanceIndices(config_data, "uuid", "name")
395

    
396

    
397
def DowngradeAll(config_data):
398
  # Any code specific to a particular version should be labeled that way, so
399
  # it can be removed when updating to the next version.
400
  config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
401
                                                  DOWNGRADE_MINOR, 0)
402
  DowngradeInstances(config_data)
403
  DowngradeNodeIndices(config_data)
404
  DowngradeInstanceIndices(config_data)
405

    
406

    
407
def main():
408
  """Main program.
409

    
410
  """
411
  global options, args # pylint: disable=W0603
412

    
413
  # Option parsing
414
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
415
  parser.add_option("--dry-run", dest="dry_run",
416
                    action="store_true",
417
                    help="Try to do the conversion, but don't write"
418
                         " output file")
419
  parser.add_option(cli.FORCE_OPT)
420
  parser.add_option(cli.DEBUG_OPT)
421
  parser.add_option(cli.VERBOSE_OPT)
422
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
423
                    action="store_true", default=False,
424
                    help="Don't abort if hostname doesn't match")
425
  parser.add_option("--path", help="Convert configuration in this"
426
                    " directory instead of '%s'" % pathutils.DATA_DIR,
427
                    default=pathutils.DATA_DIR, dest="data_dir")
428
  parser.add_option("--confdir",
429
                    help=("Use this directory instead of '%s'" %
430
                          pathutils.CONF_DIR),
431
                    default=pathutils.CONF_DIR, dest="conf_dir")
432
  parser.add_option("--no-verify",
433
                    help="Do not verify configuration after upgrade",
434
                    action="store_true", dest="no_verify", default=False)
435
  parser.add_option("--downgrade",
436
                    help="Downgrade to the previous stable version",
437
                    action="store_true", dest="downgrade", default=False)
438
  (options, args) = parser.parse_args()
439

    
440
  # We need to keep filenames locally because they might be renamed between
441
  # versions.
442
  options.data_dir = os.path.abspath(options.data_dir)
443
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
444
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
445
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
446
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
447
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
448
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
449
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
450
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
451
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
452
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
453
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
454
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
455
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
456

    
457
  SetupLogging()
458

    
459
  # Option checking
460
  if args:
461
    raise Error("No arguments expected")
462
  if options.downgrade and not options.no_verify:
463
    options.no_verify = True
464

    
465
  # Check master name
466
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
467
    logging.error("Aborting due to hostname mismatch")
468
    sys.exit(constants.EXIT_FAILURE)
469

    
470
  if not options.force:
471
    if options.downgrade:
472
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
473
                  " Some configuration data might be removed if they don't fit"
474
                  " in the old format. Please make sure you have read the"
475
                  " upgrade notes (available in the UPGRADE file and included"
476
                  " in other documentation formats) to understand what they"
477
                  " are. Continue with *DOWNGRADING* the configuration?" %
478
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
479
    else:
480
      usertext = ("Please make sure you have read the upgrade notes for"
481
                  " Ganeti %s (available in the UPGRADE file and included"
482
                  " in other documentation formats). Continue with upgrading"
483
                  " configuration?" % constants.RELEASE_VERSION)
484
    if not cli.AskUser(usertext):
485
      sys.exit(constants.EXIT_FAILURE)
486

    
487
  # Check whether it's a Ganeti configuration directory
488
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
489
          os.path.isfile(options.SERVER_PEM_PATH) and
490
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
491
    raise Error(("%s does not seem to be a Ganeti configuration"
492
                 " directory") % options.data_dir)
493

    
494
  if not os.path.isdir(options.conf_dir):
495
    raise Error("Not a directory: %s" % options.conf_dir)
496

    
497
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
498

    
499
  try:
500
    config_version = config_data["version"]
501
  except KeyError:
502
    raise Error("Unable to determine configuration version")
503

    
504
  (config_major, config_minor, config_revision) = \
505
    constants.SplitVersion(config_version)
506

    
507
  logging.info("Found configuration version %s (%d.%d.%d)",
508
               config_version, config_major, config_minor, config_revision)
509

    
510
  if "config_version" in config_data["cluster"]:
511
    raise Error("Inconsistent configuration: found config_version in"
512
                " configuration file")
513

    
514
  # Downgrade to the previous stable version
515
  if options.downgrade:
516
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
517
            (config_major == DOWNGRADE_MAJOR and
518
             config_minor == DOWNGRADE_MINOR)):
519
      raise Error("Downgrade supported only from the latest version (%s.%s),"
520
                  " found %s (%s.%s.%s) instead" %
521
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
522
                   config_minor, config_revision))
523
    DowngradeAll(config_data)
524

    
525
  # Upgrade from 2.{0..7} to 2.9
526
  elif config_major == 2 and config_minor in range(0, 10):
527
    if config_revision != 0:
528
      logging.warning("Config revision is %s, not 0", config_revision)
529
    UpgradeAll(config_data)
530

    
531
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
532
    logging.info("No changes necessary")
533

    
534
  else:
535
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
536
                (config_major, config_minor, config_revision))
537

    
538
  try:
539
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
540
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
541
                    data=serializer.DumpJson(config_data),
542
                    mode=0600,
543
                    dry_run=options.dry_run,
544
                    backup=True)
545

    
546
    if not options.dry_run:
547
      bootstrap.GenerateClusterCrypto(
548
        False, False, False, False, False,
549
        nodecert_file=options.SERVER_PEM_PATH,
550
        rapicert_file=options.RAPI_CERT_FILE,
551
        spicecert_file=options.SPICE_CERT_FILE,
552
        spicecacert_file=options.SPICE_CACERT_FILE,
553
        hmackey_file=options.CONFD_HMAC_KEY,
554
        cds_file=options.CDS_FILE)
555

    
556
  except Exception:
557
    logging.critical("Writing configuration failed. It is probably in an"
558
                     " inconsistent state and needs manual intervention.")
559
    raise
560

    
561
  # test loading the config file
562
  all_ok = True
563
  if not (options.dry_run or options.no_verify):
564
    logging.info("Testing the new config file...")
565
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
566
                              accept_foreign=options.ignore_hostname,
567
                              offline=True)
568
    # if we reached this, it's all fine
569
    vrfy = cfg.VerifyConfig()
570
    if vrfy:
571
      logging.error("Errors after conversion:")
572
      for item in vrfy:
573
        logging.error(" - %s", item)
574
      all_ok = False
575
    else:
576
      logging.info("File loaded successfully after upgrading")
577
    del cfg
578

    
579
  if options.downgrade:
580
    action = "downgraded"
581
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
582
  else:
583
    action = "upgraded"
584
    out_ver = constants.RELEASE_VERSION
585
  if all_ok:
586
    cli.ToStderr("Configuration successfully %s to version %s.",
587
                 action, out_ver)
588
  else:
589
    cli.ToStderr("Configuration %s to version %s, but there are errors."
590
                 "\nPlease review the file.", action, out_ver)
591

    
592

    
593
if __name__ == "__main__":
594
  main()