Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ 5275a77f

History | View | Annotate | Download (20 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47

    
48
options = None
49
args = None
50

    
51

    
52
#: Target major version we will upgrade to
53
TARGET_MAJOR = 2
54
#: Target minor version we will upgrade to
55
TARGET_MINOR = 10
56
#: Target major version for downgrade
57
DOWNGRADE_MAJOR = 2
58
#: Target minor version for downgrade
59
DOWNGRADE_MINOR = 9
60

    
61

    
62
class Error(Exception):
63
  """Generic exception"""
64
  pass
65

    
66

    
67
def SetupLogging():
68
  """Configures the logging module.
69

    
70
  """
71
  formatter = logging.Formatter("%(asctime)s: %(message)s")
72

    
73
  stderr_handler = logging.StreamHandler()
74
  stderr_handler.setFormatter(formatter)
75
  if options.debug:
76
    stderr_handler.setLevel(logging.NOTSET)
77
  elif options.verbose:
78
    stderr_handler.setLevel(logging.INFO)
79
  else:
80
    stderr_handler.setLevel(logging.WARNING)
81

    
82
  root_logger = logging.getLogger("")
83
  root_logger.setLevel(logging.NOTSET)
84
  root_logger.addHandler(stderr_handler)
85

    
86

    
87
def CheckHostname(path):
88
  """Ensures hostname matches ssconf value.
89

    
90
  @param path: Path to ssconf file
91

    
92
  """
93
  ssconf_master_node = utils.ReadOneLineFile(path)
94
  hostname = netutils.GetHostname().name
95

    
96
  if ssconf_master_node == hostname:
97
    return True
98

    
99
  logging.warning("Warning: ssconf says master node is '%s', but this"
100
                  " machine's name is '%s'; this tool must be run on"
101
                  " the master node", ssconf_master_node, hostname)
102
  return False
103

    
104

    
105
def _FillIPolicySpecs(default_ipolicy, ipolicy):
106
  if "minmax" in ipolicy:
107
    for (key, spec) in ipolicy["minmax"][0].items():
108
      for (par, val) in default_ipolicy["minmax"][0][key].items():
109
        if par not in spec:
110
          spec[par] = val
111

    
112

    
113
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
114
  minmax_keys = ["min", "max"]
115
  if any((k in ipolicy) for k in minmax_keys):
116
    minmax = {}
117
    for key in minmax_keys:
118
      if key in ipolicy:
119
        if ipolicy[key]:
120
          minmax[key] = ipolicy[key]
121
        del ipolicy[key]
122
    if minmax:
123
      ipolicy["minmax"] = [minmax]
124
  if isgroup and "std" in ipolicy:
125
    del ipolicy["std"]
126
  _FillIPolicySpecs(default_ipolicy, ipolicy)
127

    
128

    
129
def UpgradeNetworks(config_data):
130
  networks = config_data.get("networks", None)
131
  if not networks:
132
    config_data["networks"] = {}
133

    
134

    
135
def UpgradeCluster(config_data):
136
  cluster = config_data.get("cluster", None)
137
  if cluster is None:
138
    raise Error("Cannot find cluster")
139
  ipolicy = cluster.setdefault("ipolicy", None)
140
  if ipolicy:
141
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
142

    
143

    
144
def UpgradeGroups(config_data):
145
  cl_ipolicy = config_data["cluster"].get("ipolicy")
146
  for group in config_data["nodegroups"].values():
147
    networks = group.get("networks", None)
148
    if not networks:
149
      group["networks"] = {}
150
    ipolicy = group.get("ipolicy", None)
151
    if ipolicy:
152
      if cl_ipolicy is None:
153
        raise Error("A group defines an instance policy but there is no"
154
                    " instance policy at cluster level")
155
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
156

    
157

    
158
def GetExclusiveStorageValue(config_data):
159
  """Return a conservative value of the exclusive_storage flag.
160

    
161
  Return C{True} if the cluster or at least a nodegroup have the flag set.
162

    
163
  """
164
  ret = False
165
  cluster = config_data["cluster"]
166
  ndparams = cluster.get("ndparams")
167
  if ndparams is not None and ndparams.get("exclusive_storage"):
168
    ret = True
169
  for group in config_data["nodegroups"].values():
170
    ndparams = group.get("ndparams")
171
    if ndparams is not None and ndparams.get("exclusive_storage"):
172
      ret = True
173
  return ret
174

    
175

    
176
def RemovePhysicalId(disk):
177
  if "children" in disk:
178
    for d in disk["children"]:
179
      RemovePhysicalId(d)
180
  if "physical_id" in disk:
181
    del disk["physical_id"]
182

    
183

    
184
def UpgradeInstances(config_data):
185
  network2uuid = dict((n["name"], n["uuid"])
186
                      for n in config_data["networks"].values())
187
  if "instances" not in config_data:
188
    raise Error("Can't find the 'instances' key in the configuration!")
189

    
190
  missing_spindles = False
191
  for instance, iobj in config_data["instances"].items():
192
    for nic in iobj["nics"]:
193
      name = nic.get("network", None)
194
      if name:
195
        uuid = network2uuid.get(name, None)
196
        if uuid:
197
          print("NIC with network name %s found."
198
                " Substituting with uuid %s." % (name, uuid))
199
          nic["network"] = uuid
200

    
201
    if "disks" not in iobj:
202
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
203
    disks = iobj["disks"]
204
    for idx, dobj in enumerate(disks):
205
      RemovePhysicalId(dobj)
206

    
207
      expected = "disk/%s" % idx
208
      current = dobj.get("iv_name", "")
209
      if current != expected:
210
        logging.warning("Updating iv_name for instance %s/disk %s"
211
                        " from '%s' to '%s'",
212
                        instance, idx, current, expected)
213
        dobj["iv_name"] = expected
214
      if not "spindles" in dobj:
215
        missing_spindles = True
216

    
217
  if GetExclusiveStorageValue(config_data) and missing_spindles:
218
    # We cannot be sure that the instances that are missing spindles have
219
    # exclusive storage enabled (the check would be more complicated), so we
220
    # give a noncommittal message
221
    logging.warning("Some instance disks could be needing to update the"
222
                    " spindles parameter; you can check by running"
223
                    " 'gnt-cluster verify', and fix any problem with"
224
                    " 'gnt-cluster repair-disk-sizes'")
225

    
226

    
227
def UpgradeRapiUsers():
228
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
229
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
230
    if os.path.exists(options.RAPI_USERS_FILE):
231
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
232
                  " already exists at %s" %
233
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
234
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
235
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
236
    if not options.dry_run:
237
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
238
                       mkdir=True, mkdir_mode=0750)
239

    
240
  # Create a symlink for RAPI users file
241
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
242
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
243
      os.path.isfile(options.RAPI_USERS_FILE)):
244
    logging.info("Creating symlink from %s to %s",
245
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
246
    if not options.dry_run:
247
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
248

    
249

    
250
def UpgradeWatcher():
251
  # Remove old watcher state file if it exists
252
  if os.path.exists(options.WATCHER_STATEFILE):
253
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
254
    if not options.dry_run:
255
      utils.RemoveFile(options.WATCHER_STATEFILE)
256

    
257

    
258
def UpgradeFileStoragePaths(config_data):
259
  # Write file storage paths
260
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
261
    cluster = config_data["cluster"]
262
    file_storage_dir = cluster.get("file_storage_dir")
263
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
264
    del cluster
265

    
266
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
267
                 " for file storage; writing existing configuration values"
268
                 " into '%s'",
269
                 options.FILE_STORAGE_PATHS_FILE)
270

    
271
    if file_storage_dir:
272
      logging.info("File storage directory: %s", file_storage_dir)
273
    if shared_file_storage_dir:
274
      logging.info("Shared file storage directory: %s",
275
                   shared_file_storage_dir)
276

    
277
    buf = StringIO()
278
    buf.write("# List automatically generated from configuration by\n")
279
    buf.write("# cfgupgrade at %s\n" % time.asctime())
280
    if file_storage_dir:
281
      buf.write("%s\n" % file_storage_dir)
282
    if shared_file_storage_dir:
283
      buf.write("%s\n" % shared_file_storage_dir)
284
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
285
                    data=buf.getvalue(),
286
                    mode=0600,
287
                    dry_run=options.dry_run,
288
                    backup=True)
289

    
290

    
291
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
292
  if old_key not in nodes_by_old_key:
293
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
294
                    " already up-to-date", old_key)
295
    return old_key
296
  return nodes_by_old_key[old_key][new_key_field]
297

    
298

    
299
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
300
  def ChangeDiskNodeIndices(disk):
301
    if disk["dev_type"] in constants.LDS_DRBD:
302
      for i in range(0, 2):
303
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
304
                                                disk["logical_id"][i],
305
                                                new_key_field)
306
    if "children" in disk:
307
      for child in disk["children"]:
308
        ChangeDiskNodeIndices(child)
309

    
310
  nodes_by_old_key = {}
311
  nodes_by_new_key = {}
312
  for (_, node) in config_data["nodes"].items():
313
    nodes_by_old_key[node[old_key_field]] = node
314
    nodes_by_new_key[node[new_key_field]] = node
315

    
316
  config_data["nodes"] = nodes_by_new_key
317

    
318
  cluster = config_data["cluster"]
319
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
320
                                           cluster["master_node"],
321
                                           new_key_field)
322

    
323
  for inst in config_data["instances"].values():
324
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
325
                                           inst["primary_node"],
326
                                           new_key_field)
327
    for disk in inst["disks"]:
328
      ChangeDiskNodeIndices(disk)
329

    
330

    
331
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
332
  insts_by_old_key = {}
333
  insts_by_new_key = {}
334
  for (_, inst) in config_data["instances"].items():
335
    insts_by_old_key[inst[old_key_field]] = inst
336
    insts_by_new_key[inst[new_key_field]] = inst
337

    
338
  config_data["instances"] = insts_by_new_key
339

    
340

    
341
def UpgradeNodeIndices(config_data):
342
  ChangeNodeIndices(config_data, "name", "uuid")
343

    
344

    
345
def UpgradeInstanceIndices(config_data):
346
  ChangeInstanceIndices(config_data, "name", "uuid")
347

    
348

    
349
def UpgradeAll(config_data):
350
  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
351
                                                  TARGET_MINOR, 0)
352
  UpgradeRapiUsers()
353
  UpgradeWatcher()
354
  UpgradeFileStoragePaths(config_data)
355
  UpgradeNetworks(config_data)
356
  UpgradeCluster(config_data)
357
  UpgradeGroups(config_data)
358
  UpgradeInstances(config_data)
359
  UpgradeNodeIndices(config_data)
360
  UpgradeInstanceIndices(config_data)
361

    
362

    
363
def DowngradeInstances(config_data):
364
  if "instances" not in config_data:
365
    raise Error("Cannot find the 'instances' key in the configuration!")
366
  for (iname, iobj) in config_data["instances"].items():
367
    DowngradeNicParamsVLAN(iobj["nics"], iname)
368

    
369

    
370
def DowngradeNicParamsVLAN(nics, owner):
371
  for nic in nics:
372
    vlan = nic["nicparams"].get("vlan", None)
373
    if vlan:
374
      logging.warning("Instance with name %s found. Removing VLAN information"
375
                      " %s.", owner, vlan)
376
      del nic["nicparams"]["vlan"]
377

    
378

    
379
def DowngradeAll(config_data):
380
  # Any code specific to a particular version should be labeled that way, so
381
  # it can be removed when updating to the next version.
382
  config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
383
                                                  DOWNGRADE_MINOR, 0)
384
  DowngradeInstances(config_data)
385

    
386

    
387
def main():
388
  """Main program.
389

    
390
  """
391
  global options, args # pylint: disable=W0603
392

    
393
  # Option parsing
394
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
395
  parser.add_option("--dry-run", dest="dry_run",
396
                    action="store_true",
397
                    help="Try to do the conversion, but don't write"
398
                         " output file")
399
  parser.add_option(cli.FORCE_OPT)
400
  parser.add_option(cli.DEBUG_OPT)
401
  parser.add_option(cli.VERBOSE_OPT)
402
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
403
                    action="store_true", default=False,
404
                    help="Don't abort if hostname doesn't match")
405
  parser.add_option("--path", help="Convert configuration in this"
406
                    " directory instead of '%s'" % pathutils.DATA_DIR,
407
                    default=pathutils.DATA_DIR, dest="data_dir")
408
  parser.add_option("--confdir",
409
                    help=("Use this directory instead of '%s'" %
410
                          pathutils.CONF_DIR),
411
                    default=pathutils.CONF_DIR, dest="conf_dir")
412
  parser.add_option("--no-verify",
413
                    help="Do not verify configuration after upgrade",
414
                    action="store_true", dest="no_verify", default=False)
415
  parser.add_option("--downgrade",
416
                    help="Downgrade to the previous stable version",
417
                    action="store_true", dest="downgrade", default=False)
418
  (options, args) = parser.parse_args()
419

    
420
  # We need to keep filenames locally because they might be renamed between
421
  # versions.
422
  options.data_dir = os.path.abspath(options.data_dir)
423
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
424
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
425
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
426
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
427
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
428
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
429
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
430
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
431
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
432
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
433
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
434
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
435
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
436

    
437
  SetupLogging()
438

    
439
  # Option checking
440
  if args:
441
    raise Error("No arguments expected")
442
  if options.downgrade and not options.no_verify:
443
    options.no_verify = True
444

    
445
  # Check master name
446
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
447
    logging.error("Aborting due to hostname mismatch")
448
    sys.exit(constants.EXIT_FAILURE)
449

    
450
  if not options.force:
451
    if options.downgrade:
452
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
453
                  " Some configuration data might be removed if they don't fit"
454
                  " in the old format. Please make sure you have read the"
455
                  " upgrade notes (available in the UPGRADE file and included"
456
                  " in other documentation formats) to understand what they"
457
                  " are. Continue with *DOWNGRADING* the configuration?" %
458
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
459
    else:
460
      usertext = ("Please make sure you have read the upgrade notes for"
461
                  " Ganeti %s (available in the UPGRADE file and included"
462
                  " in other documentation formats). Continue with upgrading"
463
                  " configuration?" % constants.RELEASE_VERSION)
464
    if not cli.AskUser(usertext):
465
      sys.exit(constants.EXIT_FAILURE)
466

    
467
  # Check whether it's a Ganeti configuration directory
468
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
469
          os.path.isfile(options.SERVER_PEM_PATH) and
470
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
471
    raise Error(("%s does not seem to be a Ganeti configuration"
472
                 " directory") % options.data_dir)
473

    
474
  if not os.path.isdir(options.conf_dir):
475
    raise Error("Not a directory: %s" % options.conf_dir)
476

    
477
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
478

    
479
  try:
480
    config_version = config_data["version"]
481
  except KeyError:
482
    raise Error("Unable to determine configuration version")
483

    
484
  (config_major, config_minor, config_revision) = \
485
    constants.SplitVersion(config_version)
486

    
487
  logging.info("Found configuration version %s (%d.%d.%d)",
488
               config_version, config_major, config_minor, config_revision)
489

    
490
  if "config_version" in config_data["cluster"]:
491
    raise Error("Inconsistent configuration: found config_version in"
492
                " configuration file")
493

    
494
  # Downgrade to the previous stable version
495
  if options.downgrade:
496
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
497
            (config_major == DOWNGRADE_MAJOR and
498
             config_minor == DOWNGRADE_MINOR)):
499
      raise Error("Downgrade supported only from the latest version (%s.%s),"
500
                  " found %s (%s.%s.%s) instead" %
501
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
502
                   config_minor, config_revision))
503
    DowngradeAll(config_data)
504

    
505
  # Upgrade from 2.{0..7} to 2.9
506
  elif config_major == 2 and config_minor in range(0, 10):
507
    if config_revision != 0:
508
      logging.warning("Config revision is %s, not 0", config_revision)
509
    UpgradeAll(config_data)
510

    
511
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
512
    logging.info("No changes necessary")
513

    
514
  else:
515
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
516
                (config_major, config_minor, config_revision))
517

    
518
  try:
519
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
520
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
521
                    data=serializer.DumpJson(config_data),
522
                    mode=0600,
523
                    dry_run=options.dry_run,
524
                    backup=True)
525

    
526
    if not options.dry_run:
527
      bootstrap.GenerateClusterCrypto(
528
        False, False, False, False, False,
529
        nodecert_file=options.SERVER_PEM_PATH,
530
        rapicert_file=options.RAPI_CERT_FILE,
531
        spicecert_file=options.SPICE_CERT_FILE,
532
        spicecacert_file=options.SPICE_CACERT_FILE,
533
        hmackey_file=options.CONFD_HMAC_KEY,
534
        cds_file=options.CDS_FILE)
535

    
536
  except Exception:
537
    logging.critical("Writing configuration failed. It is probably in an"
538
                     " inconsistent state and needs manual intervention.")
539
    raise
540

    
541
  # test loading the config file
542
  all_ok = True
543
  if not (options.dry_run or options.no_verify):
544
    logging.info("Testing the new config file...")
545
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
546
                              accept_foreign=options.ignore_hostname,
547
                              offline=True)
548
    # if we reached this, it's all fine
549
    vrfy = cfg.VerifyConfig()
550
    if vrfy:
551
      logging.error("Errors after conversion:")
552
      for item in vrfy:
553
        logging.error(" - %s", item)
554
      all_ok = False
555
    else:
556
      logging.info("File loaded successfully after upgrading")
557
    del cfg
558

    
559
  if options.downgrade:
560
    action = "downgraded"
561
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
562
  else:
563
    action = "upgraded"
564
    out_ver = constants.RELEASE_VERSION
565
  if all_ok:
566
    cli.ToStderr("Configuration successfully %s to version %s.",
567
                 action, out_ver)
568
  else:
569
    cli.ToStderr("Configuration %s to version %s, but there are errors."
570
                 "\nPlease review the file.", action, out_ver)
571

    
572

    
573
if __name__ == "__main__":
574
  main()