Statistics
| Branch: | Tag: | Revision:

root / tools / cfgupgrade @ effc1b86

History | View | Annotate | Download (20 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Tool to upgrade the configuration file.
23

    
24
This code handles only the types supported by simplejson. As an
25
example, 'set' is a 'list'.
26

    
27
"""
28

    
29

    
30
import os
31
import os.path
32
import sys
33
import optparse
34
import logging
35
import time
36
from cStringIO import StringIO
37

    
38
from ganeti import constants
39
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

    
47
from ganeti.utils import version
48

    
49

    
50
options = None
51
args = None
52

    
53

    
54
#: Target major version we will upgrade to
55
TARGET_MAJOR = 2
56
#: Target minor version we will upgrade to
57
TARGET_MINOR = 10
58
#: Target major version for downgrade
59
DOWNGRADE_MAJOR = 2
60
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 9
62

    
63

    
64
class Error(Exception):
65
  """Generic exception"""
66
  pass
67

    
68

    
69
def SetupLogging():
70
  """Configures the logging module.
71

    
72
  """
73
  formatter = logging.Formatter("%(asctime)s: %(message)s")
74

    
75
  stderr_handler = logging.StreamHandler()
76
  stderr_handler.setFormatter(formatter)
77
  if options.debug:
78
    stderr_handler.setLevel(logging.NOTSET)
79
  elif options.verbose:
80
    stderr_handler.setLevel(logging.INFO)
81
  else:
82
    stderr_handler.setLevel(logging.WARNING)
83

    
84
  root_logger = logging.getLogger("")
85
  root_logger.setLevel(logging.NOTSET)
86
  root_logger.addHandler(stderr_handler)
87

    
88

    
89
def CheckHostname(path):
90
  """Ensures hostname matches ssconf value.
91

    
92
  @param path: Path to ssconf file
93

    
94
  """
95
  ssconf_master_node = utils.ReadOneLineFile(path)
96
  hostname = netutils.GetHostname().name
97

    
98
  if ssconf_master_node == hostname:
99
    return True
100

    
101
  logging.warning("Warning: ssconf says master node is '%s', but this"
102
                  " machine's name is '%s'; this tool must be run on"
103
                  " the master node", ssconf_master_node, hostname)
104
  return False
105

    
106

    
107
def _FillIPolicySpecs(default_ipolicy, ipolicy):
108
  if "minmax" in ipolicy:
109
    for (key, spec) in ipolicy["minmax"][0].items():
110
      for (par, val) in default_ipolicy["minmax"][0][key].items():
111
        if par not in spec:
112
          spec[par] = val
113

    
114

    
115
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
116
  minmax_keys = ["min", "max"]
117
  if any((k in ipolicy) for k in minmax_keys):
118
    minmax = {}
119
    for key in minmax_keys:
120
      if key in ipolicy:
121
        if ipolicy[key]:
122
          minmax[key] = ipolicy[key]
123
        del ipolicy[key]
124
    if minmax:
125
      ipolicy["minmax"] = [minmax]
126
  if isgroup and "std" in ipolicy:
127
    del ipolicy["std"]
128
  _FillIPolicySpecs(default_ipolicy, ipolicy)
129

    
130

    
131
def UpgradeNetworks(config_data):
132
  networks = config_data.get("networks", None)
133
  if not networks:
134
    config_data["networks"] = {}
135

    
136

    
137
def UpgradeCluster(config_data):
138
  cluster = config_data.get("cluster", None)
139
  if cluster is None:
140
    raise Error("Cannot find cluster")
141
  ipolicy = cluster.setdefault("ipolicy", None)
142
  if ipolicy:
143
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
144

    
145

    
146
def UpgradeGroups(config_data):
147
  cl_ipolicy = config_data["cluster"].get("ipolicy")
148
  for group in config_data["nodegroups"].values():
149
    networks = group.get("networks", None)
150
    if not networks:
151
      group["networks"] = {}
152
    ipolicy = group.get("ipolicy", None)
153
    if ipolicy:
154
      if cl_ipolicy is None:
155
        raise Error("A group defines an instance policy but there is no"
156
                    " instance policy at cluster level")
157
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
158

    
159

    
160
def GetExclusiveStorageValue(config_data):
161
  """Return a conservative value of the exclusive_storage flag.
162

    
163
  Return C{True} if the cluster or at least a nodegroup have the flag set.
164

    
165
  """
166
  ret = False
167
  cluster = config_data["cluster"]
168
  ndparams = cluster.get("ndparams")
169
  if ndparams is not None and ndparams.get("exclusive_storage"):
170
    ret = True
171
  for group in config_data["nodegroups"].values():
172
    ndparams = group.get("ndparams")
173
    if ndparams is not None and ndparams.get("exclusive_storage"):
174
      ret = True
175
  return ret
176

    
177

    
178
def RemovePhysicalId(disk):
179
  if "children" in disk:
180
    for d in disk["children"]:
181
      RemovePhysicalId(d)
182
  if "physical_id" in disk:
183
    del disk["physical_id"]
184

    
185

    
186
def UpgradeInstances(config_data):
187
  network2uuid = dict((n["name"], n["uuid"])
188
                      for n in config_data["networks"].values())
189
  if "instances" not in config_data:
190
    raise Error("Can't find the 'instances' key in the configuration!")
191

    
192
  missing_spindles = False
193
  for instance, iobj in config_data["instances"].items():
194
    for nic in iobj["nics"]:
195
      name = nic.get("network", None)
196
      if name:
197
        uuid = network2uuid.get(name, None)
198
        if uuid:
199
          print("NIC with network name %s found."
200
                " Substituting with uuid %s." % (name, uuid))
201
          nic["network"] = uuid
202

    
203
    if "disks" not in iobj:
204
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
205
    disks = iobj["disks"]
206
    for idx, dobj in enumerate(disks):
207
      RemovePhysicalId(dobj)
208

    
209
      expected = "disk/%s" % idx
210
      current = dobj.get("iv_name", "")
211
      if current != expected:
212
        logging.warning("Updating iv_name for instance %s/disk %s"
213
                        " from '%s' to '%s'",
214
                        instance, idx, current, expected)
215
        dobj["iv_name"] = expected
216
      if not "spindles" in dobj:
217
        missing_spindles = True
218

    
219
  if GetExclusiveStorageValue(config_data) and missing_spindles:
220
    # We cannot be sure that the instances that are missing spindles have
221
    # exclusive storage enabled (the check would be more complicated), so we
222
    # give a noncommittal message
223
    logging.warning("Some instance disks could be needing to update the"
224
                    " spindles parameter; you can check by running"
225
                    " 'gnt-cluster verify', and fix any problem with"
226
                    " 'gnt-cluster repair-disk-sizes'")
227

    
228

    
229
def UpgradeRapiUsers():
230
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
231
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
232
    if os.path.exists(options.RAPI_USERS_FILE):
233
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
234
                  " already exists at %s" %
235
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
236
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
237
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
238
    if not options.dry_run:
239
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
240
                       mkdir=True, mkdir_mode=0750)
241

    
242
  # Create a symlink for RAPI users file
243
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
244
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
245
      os.path.isfile(options.RAPI_USERS_FILE)):
246
    logging.info("Creating symlink from %s to %s",
247
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
248
    if not options.dry_run:
249
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
250

    
251

    
252
def UpgradeWatcher():
253
  # Remove old watcher state file if it exists
254
  if os.path.exists(options.WATCHER_STATEFILE):
255
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
256
    if not options.dry_run:
257
      utils.RemoveFile(options.WATCHER_STATEFILE)
258

    
259

    
260
def UpgradeFileStoragePaths(config_data):
261
  # Write file storage paths
262
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
263
    cluster = config_data["cluster"]
264
    file_storage_dir = cluster.get("file_storage_dir")
265
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
266
    del cluster
267

    
268
    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
269
                 " for file storage; writing existing configuration values"
270
                 " into '%s'",
271
                 options.FILE_STORAGE_PATHS_FILE)
272

    
273
    if file_storage_dir:
274
      logging.info("File storage directory: %s", file_storage_dir)
275
    if shared_file_storage_dir:
276
      logging.info("Shared file storage directory: %s",
277
                   shared_file_storage_dir)
278

    
279
    buf = StringIO()
280
    buf.write("# List automatically generated from configuration by\n")
281
    buf.write("# cfgupgrade at %s\n" % time.asctime())
282
    if file_storage_dir:
283
      buf.write("%s\n" % file_storage_dir)
284
    if shared_file_storage_dir:
285
      buf.write("%s\n" % shared_file_storage_dir)
286
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
287
                    data=buf.getvalue(),
288
                    mode=0600,
289
                    dry_run=options.dry_run,
290
                    backup=True)
291

    
292

    
293
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
294
  if old_key not in nodes_by_old_key:
295
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
296
                    " already up-to-date", old_key)
297
    return old_key
298
  return nodes_by_old_key[old_key][new_key_field]
299

    
300

    
301
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
302
  def ChangeDiskNodeIndices(disk):
303
    if disk["dev_type"] in constants.LDS_DRBD:
304
      for i in range(0, 2):
305
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
306
                                                disk["logical_id"][i],
307
                                                new_key_field)
308
    if "children" in disk:
309
      for child in disk["children"]:
310
        ChangeDiskNodeIndices(child)
311

    
312
  nodes_by_old_key = {}
313
  nodes_by_new_key = {}
314
  for (_, node) in config_data["nodes"].items():
315
    nodes_by_old_key[node[old_key_field]] = node
316
    nodes_by_new_key[node[new_key_field]] = node
317

    
318
  config_data["nodes"] = nodes_by_new_key
319

    
320
  cluster = config_data["cluster"]
321
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
322
                                           cluster["master_node"],
323
                                           new_key_field)
324

    
325
  for inst in config_data["instances"].values():
326
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
327
                                           inst["primary_node"],
328
                                           new_key_field)
329
    for disk in inst["disks"]:
330
      ChangeDiskNodeIndices(disk)
331

    
332

    
333
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
334
  insts_by_old_key = {}
335
  insts_by_new_key = {}
336
  for (_, inst) in config_data["instances"].items():
337
    insts_by_old_key[inst[old_key_field]] = inst
338
    insts_by_new_key[inst[new_key_field]] = inst
339

    
340
  config_data["instances"] = insts_by_new_key
341

    
342

    
343
def UpgradeNodeIndices(config_data):
344
  ChangeNodeIndices(config_data, "name", "uuid")
345

    
346

    
347
def UpgradeInstanceIndices(config_data):
348
  ChangeInstanceIndices(config_data, "name", "uuid")
349

    
350

    
351
def UpgradeAll(config_data):
352
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
353
  UpgradeRapiUsers()
354
  UpgradeWatcher()
355
  UpgradeFileStoragePaths(config_data)
356
  UpgradeNetworks(config_data)
357
  UpgradeCluster(config_data)
358
  UpgradeGroups(config_data)
359
  UpgradeInstances(config_data)
360
  UpgradeNodeIndices(config_data)
361
  UpgradeInstanceIndices(config_data)
362

    
363

    
364
def DowngradeInstances(config_data):
365
  if "instances" not in config_data:
366
    raise Error("Cannot find the 'instances' key in the configuration!")
367
  for (iname, iobj) in config_data["instances"].items():
368
    DowngradeNicParamsVLAN(iobj["nics"], iname)
369

    
370

    
371
def DowngradeNicParamsVLAN(nics, owner):
372
  for nic in nics:
373
    vlan = nic["nicparams"].get("vlan", None)
374
    if vlan:
375
      logging.warning("Instance with name %s found. Removing VLAN information"
376
                      " %s.", owner, vlan)
377
      del nic["nicparams"]["vlan"]
378

    
379

    
380
def DowngradeAll(config_data):
381
  # Any code specific to a particular version should be labeled that way, so
382
  # it can be removed when updating to the next version.
383
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
384
                                                DOWNGRADE_MINOR, 0)
385
  DowngradeInstances(config_data)
386

    
387

    
388
def main():
389
  """Main program.
390

    
391
  """
392
  global options, args # pylint: disable=W0603
393

    
394
  # Option parsing
395
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
396
  parser.add_option("--dry-run", dest="dry_run",
397
                    action="store_true",
398
                    help="Try to do the conversion, but don't write"
399
                         " output file")
400
  parser.add_option(cli.FORCE_OPT)
401
  parser.add_option(cli.DEBUG_OPT)
402
  parser.add_option(cli.VERBOSE_OPT)
403
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
404
                    action="store_true", default=False,
405
                    help="Don't abort if hostname doesn't match")
406
  parser.add_option("--path", help="Convert configuration in this"
407
                    " directory instead of '%s'" % pathutils.DATA_DIR,
408
                    default=pathutils.DATA_DIR, dest="data_dir")
409
  parser.add_option("--confdir",
410
                    help=("Use this directory instead of '%s'" %
411
                          pathutils.CONF_DIR),
412
                    default=pathutils.CONF_DIR, dest="conf_dir")
413
  parser.add_option("--no-verify",
414
                    help="Do not verify configuration after upgrade",
415
                    action="store_true", dest="no_verify", default=False)
416
  parser.add_option("--downgrade",
417
                    help="Downgrade to the previous stable version",
418
                    action="store_true", dest="downgrade", default=False)
419
  (options, args) = parser.parse_args()
420

    
421
  # We need to keep filenames locally because they might be renamed between
422
  # versions.
423
  options.data_dir = os.path.abspath(options.data_dir)
424
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
425
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
426
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
427
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
428
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
429
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
430
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
431
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
432
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
433
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
434
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
435
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
436
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
437

    
438
  SetupLogging()
439

    
440
  # Option checking
441
  if args:
442
    raise Error("No arguments expected")
443
  if options.downgrade and not options.no_verify:
444
    options.no_verify = True
445

    
446
  # Check master name
447
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
448
    logging.error("Aborting due to hostname mismatch")
449
    sys.exit(constants.EXIT_FAILURE)
450

    
451
  if not options.force:
452
    if options.downgrade:
453
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
454
                  " Some configuration data might be removed if they don't fit"
455
                  " in the old format. Please make sure you have read the"
456
                  " upgrade notes (available in the UPGRADE file and included"
457
                  " in other documentation formats) to understand what they"
458
                  " are. Continue with *DOWNGRADING* the configuration?" %
459
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
460
    else:
461
      usertext = ("Please make sure you have read the upgrade notes for"
462
                  " Ganeti %s (available in the UPGRADE file and included"
463
                  " in other documentation formats). Continue with upgrading"
464
                  " configuration?" % constants.RELEASE_VERSION)
465
    if not cli.AskUser(usertext):
466
      sys.exit(constants.EXIT_FAILURE)
467

    
468
  # Check whether it's a Ganeti configuration directory
469
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
470
          os.path.isfile(options.SERVER_PEM_PATH) and
471
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
472
    raise Error(("%s does not seem to be a Ganeti configuration"
473
                 " directory") % options.data_dir)
474

    
475
  if not os.path.isdir(options.conf_dir):
476
    raise Error("Not a directory: %s" % options.conf_dir)
477

    
478
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
479

    
480
  try:
481
    config_version = config_data["version"]
482
  except KeyError:
483
    raise Error("Unable to determine configuration version")
484

    
485
  (config_major, config_minor, config_revision) = \
486
    version.SplitVersion(config_version)
487

    
488
  logging.info("Found configuration version %s (%d.%d.%d)",
489
               config_version, config_major, config_minor, config_revision)
490

    
491
  if "config_version" in config_data["cluster"]:
492
    raise Error("Inconsistent configuration: found config_version in"
493
                " configuration file")
494

    
495
  # Downgrade to the previous stable version
496
  if options.downgrade:
497
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
498
            (config_major == DOWNGRADE_MAJOR and
499
             config_minor == DOWNGRADE_MINOR)):
500
      raise Error("Downgrade supported only from the latest version (%s.%s),"
501
                  " found %s (%s.%s.%s) instead" %
502
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
503
                   config_minor, config_revision))
504
    DowngradeAll(config_data)
505

    
506
  # Upgrade from 2.{0..7} to 2.9
507
  elif config_major == 2 and config_minor in range(0, 10):
508
    if config_revision != 0:
509
      logging.warning("Config revision is %s, not 0", config_revision)
510
    UpgradeAll(config_data)
511

    
512
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
513
    logging.info("No changes necessary")
514

    
515
  else:
516
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
517
                (config_major, config_minor, config_revision))
518

    
519
  try:
520
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
521
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
522
                    data=serializer.DumpJson(config_data),
523
                    mode=0600,
524
                    dry_run=options.dry_run,
525
                    backup=True)
526

    
527
    if not options.dry_run:
528
      bootstrap.GenerateClusterCrypto(
529
        False, False, False, False, False,
530
        nodecert_file=options.SERVER_PEM_PATH,
531
        rapicert_file=options.RAPI_CERT_FILE,
532
        spicecert_file=options.SPICE_CERT_FILE,
533
        spicecacert_file=options.SPICE_CACERT_FILE,
534
        hmackey_file=options.CONFD_HMAC_KEY,
535
        cds_file=options.CDS_FILE)
536

    
537
  except Exception:
538
    logging.critical("Writing configuration failed. It is probably in an"
539
                     " inconsistent state and needs manual intervention.")
540
    raise
541

    
542
  # test loading the config file
543
  all_ok = True
544
  if not (options.dry_run or options.no_verify):
545
    logging.info("Testing the new config file...")
546
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
547
                              accept_foreign=options.ignore_hostname,
548
                              offline=True)
549
    # if we reached this, it's all fine
550
    vrfy = cfg.VerifyConfig()
551
    if vrfy:
552
      logging.error("Errors after conversion:")
553
      for item in vrfy:
554
        logging.error(" - %s", item)
555
      all_ok = False
556
    else:
557
      logging.info("File loaded successfully after upgrading")
558
    del cfg
559

    
560
  if options.downgrade:
561
    action = "downgraded"
562
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
563
  else:
564
    action = "upgraded"
565
    out_ver = constants.RELEASE_VERSION
566
  if all_ok:
567
    cli.ToStderr("Configuration successfully %s to version %s.",
568
                 action, out_ver)
569
  else:
570
    cli.ToStderr("Configuration %s to version %s, but there are errors."
571
                 "\nPlease review the file.", action, out_ver)
572

    
573

    
574
if __name__ == "__main__":
575
  main()