root / tools / cfgupgrade @ f2e4363c
History | View | Annotate | Download (17.9 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
|
22 |
"""Tool to upgrade the configuration file. |
23 |
|
24 |
This code handles only the types supported by simplejson. As an |
25 |
example, 'set' is a 'list'. |
26 |
|
27 |
""" |
28 |
|
29 |
|
30 |
import os |
31 |
import os.path |
32 |
import sys |
33 |
import optparse |
34 |
import logging |
35 |
import time |
36 |
from cStringIO import StringIO |
37 |
|
38 |
from ganeti import constants |
39 |
from ganeti import serializer |
40 |
from ganeti import utils |
41 |
from ganeti import cli |
42 |
from ganeti import bootstrap |
43 |
from ganeti import config |
44 |
from ganeti import netutils |
45 |
from ganeti import pathutils |
46 |
|
47 |
|
48 |
options = None |
49 |
args = None |
50 |
|
51 |
|
52 |
#: Target major version we will upgrade to |
53 |
TARGET_MAJOR = 2 |
54 |
#: Target minor version we will upgrade to |
55 |
TARGET_MINOR = 7 |
56 |
#: Target major version for downgrade |
57 |
DOWNGRADE_MAJOR = 2 |
58 |
#: Target minor version for downgrade |
59 |
DOWNGRADE_MINOR = 7 |
60 |
|
61 |
|
62 |
class Error(Exception): |
63 |
"""Generic exception""" |
64 |
pass |
65 |
|
66 |
|
67 |
def SetupLogging(): |
68 |
"""Configures the logging module. |
69 |
|
70 |
""" |
71 |
formatter = logging.Formatter("%(asctime)s: %(message)s") |
72 |
|
73 |
stderr_handler = logging.StreamHandler() |
74 |
stderr_handler.setFormatter(formatter) |
75 |
if options.debug: |
76 |
stderr_handler.setLevel(logging.NOTSET) |
77 |
elif options.verbose: |
78 |
stderr_handler.setLevel(logging.INFO) |
79 |
else: |
80 |
stderr_handler.setLevel(logging.WARNING) |
81 |
|
82 |
root_logger = logging.getLogger("") |
83 |
root_logger.setLevel(logging.NOTSET) |
84 |
root_logger.addHandler(stderr_handler) |
85 |
|
86 |
|
87 |
def CheckHostname(path): |
88 |
"""Ensures hostname matches ssconf value. |
89 |
|
90 |
@param path: Path to ssconf file |
91 |
|
92 |
""" |
93 |
ssconf_master_node = utils.ReadOneLineFile(path) |
94 |
hostname = netutils.GetHostname().name |
95 |
|
96 |
if ssconf_master_node == hostname: |
97 |
return True |
98 |
|
99 |
logging.warning("Warning: ssconf says master node is '%s', but this" |
100 |
" machine's name is '%s'; this tool must be run on" |
101 |
" the master node", ssconf_master_node, hostname) |
102 |
return False |
103 |
|
104 |
|
105 |
def _FillIPolicySpecs(default_ipolicy, ipolicy): |
106 |
if "minmax" in ipolicy: |
107 |
for (key, spec) in ipolicy["minmax"][0].items(): |
108 |
for (par, val) in default_ipolicy["minmax"][0][key].items(): |
109 |
if par not in spec: |
110 |
spec[par] = val |
111 |
|
112 |
|
113 |
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup): |
114 |
minmax_keys = ["min", "max"] |
115 |
if any((k in ipolicy) for k in minmax_keys): |
116 |
minmax = {} |
117 |
for key in minmax_keys: |
118 |
if key in ipolicy: |
119 |
if ipolicy[key]: |
120 |
minmax[key] = ipolicy[key] |
121 |
del ipolicy[key] |
122 |
if minmax: |
123 |
ipolicy["minmax"] = [minmax] |
124 |
if isgroup and "std" in ipolicy: |
125 |
del ipolicy["std"] |
126 |
_FillIPolicySpecs(default_ipolicy, ipolicy) |
127 |
|
128 |
|
129 |
def UpgradeNetworks(config_data): |
130 |
networks = config_data.get("networks", None) |
131 |
if not networks: |
132 |
config_data["networks"] = {} |
133 |
|
134 |
|
135 |
def UpgradeCluster(config_data): |
136 |
cluster = config_data.get("cluster", None) |
137 |
if cluster is None: |
138 |
raise Error("Cannot find cluster") |
139 |
ipolicy = cluster.setdefault("ipolicy", None) |
140 |
if ipolicy: |
141 |
UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) |
142 |
|
143 |
|
144 |
def UpgradeGroups(config_data): |
145 |
cl_ipolicy = config_data["cluster"].get("ipolicy") |
146 |
for group in config_data["nodegroups"].values(): |
147 |
networks = group.get("networks", None) |
148 |
if not networks: |
149 |
group["networks"] = {} |
150 |
ipolicy = group.get("ipolicy", None) |
151 |
if ipolicy: |
152 |
if cl_ipolicy is None: |
153 |
raise Error("A group defines an instance policy but there is no" |
154 |
" instance policy at cluster level") |
155 |
UpgradeIPolicy(ipolicy, cl_ipolicy, True) |
156 |
|
157 |
|
158 |
def UpgradeInstances(config_data): |
159 |
network2uuid = dict((n["name"], n["uuid"]) |
160 |
for n in config_data["networks"].values()) |
161 |
if "instances" not in config_data: |
162 |
raise Error("Can't find the 'instances' key in the configuration!") |
163 |
|
164 |
for instance, iobj in config_data["instances"].items(): |
165 |
for nic in iobj["nics"]: |
166 |
name = nic.get("network", None) |
167 |
if name: |
168 |
uuid = network2uuid.get(name, None) |
169 |
if uuid: |
170 |
print("NIC with network name %s found." |
171 |
" Substituting with uuid %s." % (name, uuid)) |
172 |
nic["network"] = uuid |
173 |
|
174 |
if "disks" not in iobj: |
175 |
raise Error("Instance '%s' doesn't have a disks entry?!" % instance) |
176 |
disks = iobj["disks"] |
177 |
for idx, dobj in enumerate(disks): |
178 |
expected = "disk/%s" % idx |
179 |
current = dobj.get("iv_name", "") |
180 |
if current != expected: |
181 |
logging.warning("Updating iv_name for instance %s/disk %s" |
182 |
" from '%s' to '%s'", |
183 |
instance, idx, current, expected) |
184 |
dobj["iv_name"] = expected |
185 |
|
186 |
|
187 |
def UpgradeRapiUsers(): |
188 |
if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and |
189 |
not os.path.islink(options.RAPI_USERS_FILE_PRE24)): |
190 |
if os.path.exists(options.RAPI_USERS_FILE): |
191 |
raise Error("Found pre-2.4 RAPI users file at %s, but another file" |
192 |
" already exists at %s" % |
193 |
(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)) |
194 |
logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", |
195 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
196 |
if not options.dry_run: |
197 |
utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE, |
198 |
mkdir=True, mkdir_mode=0750) |
199 |
|
200 |
# Create a symlink for RAPI users file |
201 |
if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or |
202 |
os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and |
203 |
os.path.isfile(options.RAPI_USERS_FILE)): |
204 |
logging.info("Creating symlink from %s to %s", |
205 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
206 |
if not options.dry_run: |
207 |
os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24) |
208 |
|
209 |
|
210 |
def UpgradeWatcher(): |
211 |
# Remove old watcher state file if it exists |
212 |
if os.path.exists(options.WATCHER_STATEFILE): |
213 |
logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE) |
214 |
if not options.dry_run: |
215 |
utils.RemoveFile(options.WATCHER_STATEFILE) |
216 |
|
217 |
|
218 |
def UpgradeFileStoragePaths(config_data): |
219 |
# Write file storage paths |
220 |
if not os.path.exists(options.FILE_STORAGE_PATHS_FILE): |
221 |
cluster = config_data["cluster"] |
222 |
file_storage_dir = cluster.get("file_storage_dir") |
223 |
shared_file_storage_dir = cluster.get("shared_file_storage_dir") |
224 |
del cluster |
225 |
|
226 |
logging.info("Ganeti 2.7 and later only allow whitelisted directories" |
227 |
" for file storage; writing existing configuration values" |
228 |
" into '%s'", |
229 |
options.FILE_STORAGE_PATHS_FILE) |
230 |
|
231 |
if file_storage_dir: |
232 |
logging.info("File storage directory: %s", file_storage_dir) |
233 |
if shared_file_storage_dir: |
234 |
logging.info("Shared file storage directory: %s", |
235 |
shared_file_storage_dir) |
236 |
|
237 |
buf = StringIO() |
238 |
buf.write("# List automatically generated from configuration by\n") |
239 |
buf.write("# cfgupgrade at %s\n" % time.asctime()) |
240 |
if file_storage_dir: |
241 |
buf.write("%s\n" % file_storage_dir) |
242 |
if shared_file_storage_dir: |
243 |
buf.write("%s\n" % shared_file_storage_dir) |
244 |
utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE, |
245 |
data=buf.getvalue(), |
246 |
mode=0600, |
247 |
dry_run=options.dry_run, |
248 |
backup=True) |
249 |
|
250 |
|
251 |
def UpgradeAll(config_data): |
252 |
config_data["version"] = constants.BuildVersion(TARGET_MAJOR, |
253 |
TARGET_MINOR, 0) |
254 |
UpgradeRapiUsers() |
255 |
UpgradeWatcher() |
256 |
UpgradeFileStoragePaths(config_data) |
257 |
UpgradeNetworks(config_data) |
258 |
UpgradeCluster(config_data) |
259 |
UpgradeGroups(config_data) |
260 |
UpgradeInstances(config_data) |
261 |
|
262 |
|
263 |
def DowngradeIPolicy(ipolicy, owner): |
264 |
# Downgrade IPolicy to 2.7 (stable) |
265 |
minmax_keys = ["min", "max"] |
266 |
specs_is_split = any((k in ipolicy) for k in minmax_keys) |
267 |
if not specs_is_split: |
268 |
if "minmax" in ipolicy: |
269 |
if type(ipolicy["minmax"]) is not list: |
270 |
raise Error("Invalid minmax type in %s ipolicy: %s" % |
271 |
(owner, type(ipolicy["minmax"]))) |
272 |
if len(ipolicy["minmax"]) > 1: |
273 |
logging.warning("Discarding some limit specs values from %s policy", |
274 |
owner) |
275 |
minmax = ipolicy["minmax"][0] |
276 |
del ipolicy["minmax"] |
277 |
else: |
278 |
minmax = {} |
279 |
for key in minmax_keys: |
280 |
spec = minmax.get(key, {}) |
281 |
ipolicy[key] = spec |
282 |
if "std" not in ipolicy: |
283 |
ipolicy["std"] = {} |
284 |
|
285 |
|
286 |
def DowngradeGroups(config_data): |
287 |
for group in config_data["nodegroups"].values(): |
288 |
ipolicy = group.get("ipolicy", None) |
289 |
if ipolicy is not None: |
290 |
DowngradeIPolicy(ipolicy, "group \"%s\"" % group.get("name")) |
291 |
|
292 |
|
293 |
def DowngradeEnabledTemplates(cluster): |
294 |
# Remove enabled disk templates to downgrade to 2.7 |
295 |
edt_key = "enabled_disk_templates" |
296 |
if edt_key in cluster: |
297 |
logging.warning("Removing cluster's enabled disk templates; value = %s", |
298 |
utils.CommaJoin(cluster[edt_key])) |
299 |
del cluster[edt_key] |
300 |
|
301 |
|
302 |
def DowngradeCluster(config_data): |
303 |
cluster = config_data.get("cluster", None) |
304 |
if cluster is None: |
305 |
raise Error("Cannot find cluster") |
306 |
DowngradeEnabledTemplates(cluster) |
307 |
ipolicy = cluster.get("ipolicy", None) |
308 |
if ipolicy: |
309 |
DowngradeIPolicy(ipolicy, "cluster") |
310 |
|
311 |
|
312 |
def DowngradeInstances(config_data): |
313 |
if "instances" not in config_data: |
314 |
raise Error("Can't find the 'instances' key in the configuration!") |
315 |
|
316 |
for _, iobj in config_data["instances"].items(): |
317 |
if "disks_active" in iobj: |
318 |
del iobj["disks_active"] |
319 |
|
320 |
|
321 |
def DowngradeAll(config_data): |
322 |
# Any code specific to a particular version should be labeled that way, so |
323 |
# it can be removed when updating to the next version. |
324 |
config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR, |
325 |
DOWNGRADE_MINOR, 0) |
326 |
DowngradeCluster(config_data) |
327 |
DowngradeGroups(config_data) |
328 |
DowngradeInstances(config_data) |
329 |
|
330 |
|
331 |
def main(): |
332 |
"""Main program. |
333 |
|
334 |
""" |
335 |
global options, args # pylint: disable=W0603 |
336 |
|
337 |
# Option parsing |
338 |
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") |
339 |
parser.add_option("--dry-run", dest="dry_run", |
340 |
action="store_true", |
341 |
help="Try to do the conversion, but don't write" |
342 |
" output file") |
343 |
parser.add_option(cli.FORCE_OPT) |
344 |
parser.add_option(cli.DEBUG_OPT) |
345 |
parser.add_option(cli.VERBOSE_OPT) |
346 |
parser.add_option("--ignore-hostname", dest="ignore_hostname", |
347 |
action="store_true", default=False, |
348 |
help="Don't abort if hostname doesn't match") |
349 |
parser.add_option("--path", help="Convert configuration in this" |
350 |
" directory instead of '%s'" % pathutils.DATA_DIR, |
351 |
default=pathutils.DATA_DIR, dest="data_dir") |
352 |
parser.add_option("--confdir", |
353 |
help=("Use this directory instead of '%s'" % |
354 |
pathutils.CONF_DIR), |
355 |
default=pathutils.CONF_DIR, dest="conf_dir") |
356 |
parser.add_option("--no-verify", |
357 |
help="Do not verify configuration after upgrade", |
358 |
action="store_true", dest="no_verify", default=False) |
359 |
parser.add_option("--downgrade", |
360 |
help="Downgrade to the previous stable version", |
361 |
action="store_true", dest="downgrade", default=False) |
362 |
(options, args) = parser.parse_args() |
363 |
|
364 |
# We need to keep filenames locally because they might be renamed between |
365 |
# versions. |
366 |
options.data_dir = os.path.abspath(options.data_dir) |
367 |
options.CONFIG_DATA_PATH = options.data_dir + "/config.data" |
368 |
options.SERVER_PEM_PATH = options.data_dir + "/server.pem" |
369 |
options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts" |
370 |
options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem" |
371 |
options.SPICE_CERT_FILE = options.data_dir + "/spice.pem" |
372 |
options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem" |
373 |
options.RAPI_USERS_FILE = options.data_dir + "/rapi/users" |
374 |
options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users" |
375 |
options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key" |
376 |
options.CDS_FILE = options.data_dir + "/cluster-domain-secret" |
377 |
options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node" |
378 |
options.WATCHER_STATEFILE = options.data_dir + "/watcher.data" |
379 |
options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths" |
380 |
|
381 |
SetupLogging() |
382 |
|
383 |
# Option checking |
384 |
if args: |
385 |
raise Error("No arguments expected") |
386 |
if options.downgrade and not options.no_verify: |
387 |
options.no_verify = True |
388 |
|
389 |
# Check master name |
390 |
if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname): |
391 |
logging.error("Aborting due to hostname mismatch") |
392 |
sys.exit(constants.EXIT_FAILURE) |
393 |
|
394 |
if not options.force: |
395 |
if options.downgrade: |
396 |
usertext = ("The configuration is going to be DOWNGRADED to version %s.%s" |
397 |
" Some configuration data might be removed if they don't fit" |
398 |
" in the old format. Please make sure you have read the" |
399 |
" upgrade notes (available in the UPGRADE file and included" |
400 |
" in other documentation formats) to understand what they" |
401 |
" are. Continue with *DOWNGRADING* the configuration?" % |
402 |
(DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) |
403 |
else: |
404 |
usertext = ("Please make sure you have read the upgrade notes for" |
405 |
" Ganeti %s (available in the UPGRADE file and included" |
406 |
" in other documentation formats). Continue with upgrading" |
407 |
" configuration?" % constants.RELEASE_VERSION) |
408 |
if not cli.AskUser(usertext): |
409 |
sys.exit(constants.EXIT_FAILURE) |
410 |
|
411 |
# Check whether it's a Ganeti configuration directory |
412 |
if not (os.path.isfile(options.CONFIG_DATA_PATH) and |
413 |
os.path.isfile(options.SERVER_PEM_PATH) and |
414 |
os.path.isfile(options.KNOWN_HOSTS_PATH)): |
415 |
raise Error(("%s does not seem to be a Ganeti configuration" |
416 |
" directory") % options.data_dir) |
417 |
|
418 |
if not os.path.isdir(options.conf_dir): |
419 |
raise Error("Not a directory: %s" % options.conf_dir) |
420 |
|
421 |
config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH)) |
422 |
|
423 |
try: |
424 |
config_version = config_data["version"] |
425 |
except KeyError: |
426 |
raise Error("Unable to determine configuration version") |
427 |
|
428 |
(config_major, config_minor, config_revision) = \ |
429 |
constants.SplitVersion(config_version) |
430 |
|
431 |
logging.info("Found configuration version %s (%d.%d.%d)", |
432 |
config_version, config_major, config_minor, config_revision) |
433 |
|
434 |
if "config_version" in config_data["cluster"]: |
435 |
raise Error("Inconsistent configuration: found config_version in" |
436 |
" configuration file") |
437 |
|
438 |
# Downgrade to the previous stable version |
439 |
if options.downgrade: |
440 |
if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or |
441 |
(config_major == DOWNGRADE_MAJOR and |
442 |
config_minor == DOWNGRADE_MINOR)): |
443 |
raise Error("Downgrade supported only from the latest version (%s.%s)," |
444 |
" found %s (%s.%s.%s) instead" % |
445 |
(TARGET_MAJOR, TARGET_MINOR, config_version, config_major, |
446 |
config_minor, config_revision)) |
447 |
DowngradeAll(config_data) |
448 |
|
449 |
# Upgrade from 2.{0..7} to 2.7 |
450 |
elif config_major == 2 and config_minor in range(0, 8): |
451 |
if config_revision != 0: |
452 |
logging.warning("Config revision is %s, not 0", config_revision) |
453 |
UpgradeAll(config_data) |
454 |
|
455 |
elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: |
456 |
logging.info("No changes necessary") |
457 |
|
458 |
else: |
459 |
raise Error("Configuration version %d.%d.%d not supported by this tool" % |
460 |
(config_major, config_minor, config_revision)) |
461 |
|
462 |
try: |
463 |
logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH) |
464 |
utils.WriteFile(file_name=options.CONFIG_DATA_PATH, |
465 |
data=serializer.DumpJson(config_data), |
466 |
mode=0600, |
467 |
dry_run=options.dry_run, |
468 |
backup=True) |
469 |
|
470 |
if not options.dry_run: |
471 |
bootstrap.GenerateClusterCrypto( |
472 |
False, False, False, False, False, |
473 |
nodecert_file=options.SERVER_PEM_PATH, |
474 |
rapicert_file=options.RAPI_CERT_FILE, |
475 |
spicecert_file=options.SPICE_CERT_FILE, |
476 |
spicecacert_file=options.SPICE_CACERT_FILE, |
477 |
hmackey_file=options.CONFD_HMAC_KEY, |
478 |
cds_file=options.CDS_FILE) |
479 |
|
480 |
except Exception: |
481 |
logging.critical("Writing configuration failed. It is probably in an" |
482 |
" inconsistent state and needs manual intervention.") |
483 |
raise |
484 |
|
485 |
# test loading the config file |
486 |
all_ok = True |
487 |
if not (options.dry_run or options.no_verify): |
488 |
logging.info("Testing the new config file...") |
489 |
cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH, |
490 |
accept_foreign=options.ignore_hostname, |
491 |
offline=True) |
492 |
# if we reached this, it's all fine |
493 |
vrfy = cfg.VerifyConfig() |
494 |
if vrfy: |
495 |
logging.error("Errors after conversion:") |
496 |
for item in vrfy: |
497 |
logging.error(" - %s", item) |
498 |
all_ok = False |
499 |
else: |
500 |
logging.info("File loaded successfully after upgrading") |
501 |
del cfg |
502 |
|
503 |
if options.downgrade: |
504 |
action = "downgraded" |
505 |
out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) |
506 |
else: |
507 |
action = "upgraded" |
508 |
out_ver = constants.RELEASE_VERSION |
509 |
if all_ok: |
510 |
cli.ToStderr("Configuration successfully %s to version %s.", |
511 |
action, out_ver) |
512 |
else: |
513 |
cli.ToStderr("Configuration %s to version %s, but there are errors." |
514 |
"\nPlease review the file.", action, out_ver) |
515 |
|
516 |
|
517 |
if __name__ == "__main__": |
518 |
main() |