Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ b555101c

History | View | Annotate | Download (19.4 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47

    
48
options = None
49
args = None
50

    
51

    
52
#: Target major version we will upgrade to
53
TARGET_MAJOR = 2
54
#: Target minor version we will upgrade to
55
TARGET_MINOR = 7
56
#: Target major version for downgrade
57
DOWNGRADE_MAJOR = 2
58
#: Target minor version for downgrade
59
DOWNGRADE_MINOR = 7
60

    
61

    
62
class Error(Exception):
63
  """Generic exception"""
64
  pass
65

    
66

    
67
def SetupLogging():
68
  """Configures the logging module.
69

    
70
  """
71
  formatter = logging.Formatter("%(asctime)s: %(message)s")
72

    
73
  stderr_handler = logging.StreamHandler()
74
  stderr_handler.setFormatter(formatter)
75
  if options.debug:
76
    stderr_handler.setLevel(logging.NOTSET)
77
  elif options.verbose:
78
    stderr_handler.setLevel(logging.INFO)
79
  else:
80
    stderr_handler.setLevel(logging.WARNING)
81

    
82
  root_logger = logging.getLogger("")
83
  root_logger.setLevel(logging.NOTSET)
84
  root_logger.addHandler(stderr_handler)
85

    
86

    
87
def CheckHostname(path):
88
  """Ensures hostname matches ssconf value.
89

    
90
  @param path: Path to ssconf file
91

    
92
  """
93
  ssconf_master_node = utils.ReadOneLineFile(path)
94
  hostname = netutils.GetHostname().name
95

    
96
  if ssconf_master_node == hostname:
97
    return True
98

    
99
  logging.warning("Warning: ssconf says master node is '%s', but this"
100
                  " machine's name is '%s'; this tool must be run on"
101
                  " the master node", ssconf_master_node, hostname)
102
  return False
103

    
104

    
105
def _FillIPolicySpecs(default_ipolicy, ipolicy):
106
  if "minmax" in ipolicy:
107
    for (key, spec) in ipolicy["minmax"][0].items():
108
      for (par, val) in default_ipolicy["minmax"][0][key].items():
109
        if par not in spec:
110
          spec[par] = val
111

    
112

    
113
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
114
  minmax_keys = ["min", "max"]
115
  if any((k in ipolicy) for k in minmax_keys):
116
    minmax = {}
117
    for key in minmax_keys:
118
      if key in ipolicy:
119
        if ipolicy[key]:
120
          minmax[key] = ipolicy[key]
121
        del ipolicy[key]
122
    if minmax:
123
      ipolicy["minmax"] = [minmax]
124
  if isgroup and "std" in ipolicy:
125
    del ipolicy["std"]
126
  _FillIPolicySpecs(default_ipolicy, ipolicy)
127

    
128

    
129
def UpgradeNetworks(config_data):
130
  networks = config_data.get("networks", None)
131
  if not networks:
132
    config_data["networks"] = {}
133

    
134

    
135
def UpgradeCluster(config_data):
136
  cluster = config_data.get("cluster", None)
137
  if cluster is None:
138
    raise Error("Cannot find cluster")
139
  ipolicy = cluster.setdefault("ipolicy", None)
140
  if ipolicy:
141
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
142

    
143

    
144
def UpgradeGroups(config_data):
145
  cl_ipolicy = config_data["cluster"].get("ipolicy")
146
  for group in config_data["nodegroups"].values():
147
    networks = group.get("networks", None)
148
    if not networks:
149
      group["networks"] = {}
150
    ipolicy = group.get("ipolicy", None)
151
    if ipolicy:
152
      if cl_ipolicy is None:
153
        raise Error("A group defines an instance policy but there is no"
154
                    " instance policy at cluster level")
155
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
156

    
157

    
158
def GetExclusiveStorageValue(config_data):
159
  """Return a conservative value of the exclusive_storage flag.
160

    
161
  Return C{True} if the cluster or at least a nodegroup have the flag set.
162

    
163
  """
164
  ret = False
165
  cluster = config_data["cluster"]
166
  ndparams = cluster.get("ndparams")
167
  if ndparams is not None and ndparams.get("exclusive_storage"):
168
    ret = True
169
  for group in config_data["nodegroups"].values():
170
    ndparams = group.get("ndparams")
171
    if ndparams is not None and ndparams.get("exclusive_storage"):
172
      ret = True
173
  return ret
174

    
175

    
176
def UpgradeInstances(config_data):
177
  network2uuid = dict((n["name"], n["uuid"])
178
                      for n in config_data["networks"].values())
179
  if "instances" not in config_data:
180
    raise Error("Can't find the 'instances' key in the configuration!")
181

    
182
  missing_spindles = False
183
  for instance, iobj in config_data["instances"].items():
184
    for nic in iobj["nics"]:
185
      name = nic.get("network", None)
186
      if name:
187
        uuid = network2uuid.get(name, None)
188
        if uuid:
189
          print("NIC with network name %s found."
190
                " Substituting with uuid %s." % (name, uuid))
191
          nic["network"] = uuid
192

    
193
    if "disks" not in iobj:
194
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
195
    disks = iobj["disks"]
196
    for idx, dobj in enumerate(disks):
197
      expected = "disk/%s" % idx
198
      current = dobj.get("iv_name", "")
199
      if current != expected:
200
        logging.warning("Updating iv_name for instance %s/disk %s"
201
                        " from '%s' to '%s'",
202
                        instance, idx, current, expected)
203
        dobj["iv_name"] = expected
204
      if not "spindles" in dobj:
205
        missing_spindles = True
206

    
207
  if GetExclusiveStorageValue(config_data) and missing_spindles:
208
    # We cannot be sure that the instances that are missing spindles have
209
    # exclusive storage enabled (the check would be more complicated), so we
210
    # give a noncommittal message
211
    logging.warning("Some instance disks could be needing to update the"
212
                    " spindles parameter; you can check by running"
213
                    " 'gnt-cluster verify', and fix any problem with"
214
                    " 'gnt-cluster repair-disk-sizes'")
215

    
216

    
217
def UpgradeRapiUsers():
218
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
219
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
220
    if os.path.exists(options.RAPI_USERS_FILE):
221
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
222
                  " already exists at %s" %
223
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
224
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
225
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
226
    if not options.dry_run:
227
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
228
                       mkdir=True, mkdir_mode=0750)
229

    
230
  # Create a symlink for RAPI users file
231
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
232
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
233
      os.path.isfile(options.RAPI_USERS_FILE)):
234
    logging.info("Creating symlink from %s to %s",
235
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
236
    if not options.dry_run:
237
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
238

    
239

    
240
def UpgradeWatcher():
241
  # Remove old watcher state file if it exists
242
  if os.path.exists(options.WATCHER_STATEFILE):
243
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
244
    if not options.dry_run:
245
      utils.RemoveFile(options.WATCHER_STATEFILE)
246

    
247

    
248
def UpgradeFileStoragePaths(config_data):
249
  # Write file storage paths
250
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
251
    cluster = config_data["cluster"]
252
    file_storage_dir = cluster.get("file_storage_dir")
253
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
254
    del cluster
255

    
256
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
257
                 " for file storage; writing existing configuration values"
258
                 " into '%s'",
259
                 options.FILE_STORAGE_PATHS_FILE)
260

    
261
    if file_storage_dir:
262
      logging.info("File storage directory: %s", file_storage_dir)
263
    if shared_file_storage_dir:
264
      logging.info("Shared file storage directory: %s",
265
                   shared_file_storage_dir)
266

    
267
    buf = StringIO()
268
    buf.write("# List automatically generated from configuration by\n")
269
    buf.write("# cfgupgrade at %s\n" % time.asctime())
270
    if file_storage_dir:
271
      buf.write("%s\n" % file_storage_dir)
272
    if shared_file_storage_dir:
273
      buf.write("%s\n" % shared_file_storage_dir)
274
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
275
                    data=buf.getvalue(),
276
                    mode=0600,
277
                    dry_run=options.dry_run,
278
                    backup=True)
279

    
280

    
281
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
282
  if old_key not in nodes_by_old_key:
283
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
284
                    " already up-to-date", old_key)
285
    return old_key
286
  return nodes_by_old_key[old_key][new_key_field]
287

    
288

    
289
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
290
  def ChangeDiskNodeIndices(disk):
291
    if disk["dev_type"] in constants.LDS_DRBD:
292
      for i in range(0, 2):
293
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
294
                                                disk["logical_id"][i],
295
                                                new_key_field)
296
    if "children" in disk:
297
      for child in disk["children"]:
298
        ChangeDiskNodeIndices(child)
299

    
300
  nodes_by_old_key = {}
301
  nodes_by_new_key = {}
302
  for (_, node) in config_data["nodes"].items():
303
    nodes_by_old_key[node[old_key_field]] = node
304
    nodes_by_new_key[node[new_key_field]] = node
305

    
306
  config_data["nodes"] = nodes_by_new_key
307

    
308
  cluster = config_data["cluster"]
309
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
310
                                           cluster["master_node"],
311
                                           new_key_field)
312

    
313
  for inst in config_data["instances"].values():
314
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
315
                                           inst["primary_node"],
316
                                           new_key_field)
317
    for disk in inst["disks"]:
318
      ChangeDiskNodeIndices(disk)
319

    
320

    
321
def UpgradeNodeIndices(config_data):
322
  ChangeNodeIndices(config_data, "name", "uuid")
323

    
324

    
325
def UpgradeAll(config_data):
326
  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
327
                                                  TARGET_MINOR, 0)
328
  UpgradeRapiUsers()
329
  UpgradeWatcher()
330
  UpgradeFileStoragePaths(config_data)
331
  UpgradeNetworks(config_data)
332
  UpgradeCluster(config_data)
333
  UpgradeGroups(config_data)
334
  UpgradeInstances(config_data)
335
  UpgradeNodeIndices(config_data)
336

    
337

    
338
def DowngradeDisks(disks, owner):
339
  for disk in disks:
340
    # Remove spindles to downgrade to 2.8
341
    if "spindles" in disk:
342
      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
343
                      " instance %s",
344
                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
345
      del disk["spindles"]
346

    
347

    
348
def DowngradeInstances(config_data):
349
  if "instances" not in config_data:
350
    raise Error("Cannot find the 'instances' key in the configuration!")
351
  for (iname, iobj) in config_data["instances"].items():
352
    if "disks" not in iobj:
353
      raise Error("Cannot find 'disks' key for instance %s" % iname)
354
    DowngradeDisks(iobj["disks"], iname)
355

    
356

    
357
def DowngradeNodeIndices(config_data):
358
  ChangeNodeIndices(config_data, "uuid", "name")
359

    
360

    
361
def DowngradeAll(config_data):
362
  # Any code specific to a particular version should be labeled that way, so
363
  # it can be removed when updating to the next version.
364
  DowngradeInstances(config_data)
365
  DowngradeNodeIndices(config_data)
366

    
367

    
368
def main():
369
  """Main program.
370

    
371
  """
372
  global options, args # pylint: disable=W0603
373

    
374
  # Option parsing
375
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
376
  parser.add_option("--dry-run", dest="dry_run",
377
                    action="store_true",
378
                    help="Try to do the conversion, but don't write"
379
                         " output file")
380
  parser.add_option(cli.FORCE_OPT)
381
  parser.add_option(cli.DEBUG_OPT)
382
  parser.add_option(cli.VERBOSE_OPT)
383
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
384
                    action="store_true", default=False,
385
                    help="Don't abort if hostname doesn't match")
386
  parser.add_option("--path", help="Convert configuration in this"
387
                    " directory instead of '%s'" % pathutils.DATA_DIR,
388
                    default=pathutils.DATA_DIR, dest="data_dir")
389
  parser.add_option("--confdir",
390
                    help=("Use this directory instead of '%s'" %
391
                          pathutils.CONF_DIR),
392
                    default=pathutils.CONF_DIR, dest="conf_dir")
393
  parser.add_option("--no-verify",
394
                    help="Do not verify configuration after upgrade",
395
                    action="store_true", dest="no_verify", default=False)
396
  parser.add_option("--downgrade",
397
                    help="Downgrade to the previous stable version",
398
                    action="store_true", dest="downgrade", default=False)
399
  (options, args) = parser.parse_args()
400

    
401
  # We need to keep filenames locally because they might be renamed between
402
  # versions.
403
  options.data_dir = os.path.abspath(options.data_dir)
404
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
405
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
406
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
407
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
408
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
409
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
410
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
411
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
412
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
413
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
414
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
415
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
416
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
417

    
418
  SetupLogging()
419

    
420
  # Option checking
421
  if args:
422
    raise Error("No arguments expected")
423
  if options.downgrade and not options.no_verify:
424
    options.no_verify = True
425

    
426
  # Check master name
427
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
428
    logging.error("Aborting due to hostname mismatch")
429
    sys.exit(constants.EXIT_FAILURE)
430

    
431
  if not options.force:
432
    if options.downgrade:
433
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
434
                  " Some configuration data might be removed if they don't fit"
435
                  " in the old format. Please make sure you have read the"
436
                  " upgrade notes (available in the UPGRADE file and included"
437
                  " in other documentation formats) to understand what they"
438
                  " are. Continue with *DOWNGRADING* the configuration?" %
439
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
440
    else:
441
      usertext = ("Please make sure you have read the upgrade notes for"
442
                  " Ganeti %s (available in the UPGRADE file and included"
443
                  " in other documentation formats). Continue with upgrading"
444
                  " configuration?" % constants.RELEASE_VERSION)
445
    if not cli.AskUser(usertext):
446
      sys.exit(constants.EXIT_FAILURE)
447

    
448
  # Check whether it's a Ganeti configuration directory
449
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
450
          os.path.isfile(options.SERVER_PEM_PATH) and
451
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
452
    raise Error(("%s does not seem to be a Ganeti configuration"
453
                 " directory") % options.data_dir)
454

    
455
  if not os.path.isdir(options.conf_dir):
456
    raise Error("Not a directory: %s" % options.conf_dir)
457

    
458
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
459

    
460
  try:
461
    config_version = config_data["version"]
462
  except KeyError:
463
    raise Error("Unable to determine configuration version")
464

    
465
  (config_major, config_minor, config_revision) = \
466
    constants.SplitVersion(config_version)
467

    
468
  logging.info("Found configuration version %s (%d.%d.%d)",
469
               config_version, config_major, config_minor, config_revision)
470

    
471
  if "config_version" in config_data["cluster"]:
472
    raise Error("Inconsistent configuration: found config_version in"
473
                " configuration file")
474

    
475
  # Downgrade to the previous stable version
476
  if options.downgrade:
477
    if config_major != TARGET_MAJOR or config_minor != TARGET_MINOR:
478
      raise Error("Downgrade supported only from the latest version (%s.%s),"
479
                  " found %s (%s.%s.%s) instead" %
480
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
481
                   config_minor, config_revision))
482
    DowngradeAll(config_data)
483

    
484
  # Upgrade from 2.{0..7} to 2.7
485
  elif config_major == 2 and config_minor in range(0, 8):
486
    if config_revision != 0:
487
      logging.warning("Config revision is %s, not 0", config_revision)
488
    UpgradeAll(config_data)
489

    
490
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
491
    logging.info("No changes necessary")
492

    
493
  else:
494
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
495
                (config_major, config_minor, config_revision))
496

    
497
  try:
498
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
499
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
500
                    data=serializer.DumpJson(config_data),
501
                    mode=0600,
502
                    dry_run=options.dry_run,
503
                    backup=True)
504

    
505
    if not options.dry_run:
506
      bootstrap.GenerateClusterCrypto(
507
        False, False, False, False, False,
508
        nodecert_file=options.SERVER_PEM_PATH,
509
        rapicert_file=options.RAPI_CERT_FILE,
510
        spicecert_file=options.SPICE_CERT_FILE,
511
        spicecacert_file=options.SPICE_CACERT_FILE,
512
        hmackey_file=options.CONFD_HMAC_KEY,
513
        cds_file=options.CDS_FILE)
514

    
515
  except Exception:
516
    logging.critical("Writing configuration failed. It is probably in an"
517
                     " inconsistent state and needs manual intervention.")
518
    raise
519

    
520
  # test loading the config file
521
  all_ok = True
522
  if not (options.dry_run or options.no_verify):
523
    logging.info("Testing the new config file...")
524
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
525
                              accept_foreign=options.ignore_hostname,
526
                              offline=True)
527
    # if we reached this, it's all fine
528
    vrfy = cfg.VerifyConfig()
529
    if vrfy:
530
      logging.error("Errors after conversion:")
531
      for item in vrfy:
532
        logging.error(" - %s", item)
533
      all_ok = False
534
    else:
535
      logging.info("File loaded successfully after upgrading")
536
    del cfg
537

    
538
  if options.downgrade:
539
    action = "downgraded"
540
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
541
  else:
542
    action = "upgraded"
543
    out_ver = constants.RELEASE_VERSION
544
  if all_ok:
545
    cli.ToStderr("Configuration successfully %s to version %s.",
546
                 action, out_ver)
547
  else:
548
    cli.ToStderr("Configuration %s to version %s, but there are errors."
549
                 "\nPlease review the file.", action, out_ver)
550

    
551

    
552
if __name__ == "__main__":
553
  main()