root / tools / cfgupgrade @ efbd15d8
History | View | Annotate | Download (22.1 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
|
22 |
"""Tool to upgrade the configuration file. |
23 |
|
24 |
This code handles only the types supported by simplejson. As an |
25 |
example, 'set' is a 'list'. |
26 |
|
27 |
""" |
28 |
|
29 |
|
30 |
import os |
31 |
import os.path |
32 |
import sys |
33 |
import optparse |
34 |
import logging |
35 |
import time |
36 |
from cStringIO import StringIO |
37 |
|
38 |
from ganeti import constants |
39 |
from ganeti import serializer |
40 |
from ganeti import utils |
41 |
from ganeti import cli |
42 |
from ganeti import bootstrap |
43 |
from ganeti import config |
44 |
from ganeti import netutils |
45 |
from ganeti import pathutils |
46 |
|
47 |
|
48 |
options = None |
49 |
args = None |
50 |
|
51 |
|
52 |
#: Target major version we will upgrade to |
53 |
TARGET_MAJOR = 2 |
54 |
#: Target minor version we will upgrade to |
55 |
TARGET_MINOR = 9 |
56 |
#: Target major version for downgrade |
57 |
DOWNGRADE_MAJOR = 2 |
58 |
#: Target minor version for downgrade |
59 |
DOWNGRADE_MINOR = 8 |
60 |
|
61 |
# map of legacy device types |
62 |
# (mapping differing old LD_* constants to new DT_* constants) |
63 |
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} |
64 |
# (mapping differing new DT_* constants to old LD_* constants) |
65 |
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items()) |
66 |
|
67 |
|
68 |
class Error(Exception): |
69 |
"""Generic exception""" |
70 |
pass |
71 |
|
72 |
|
73 |
def SetupLogging(): |
74 |
"""Configures the logging module. |
75 |
|
76 |
""" |
77 |
formatter = logging.Formatter("%(asctime)s: %(message)s") |
78 |
|
79 |
stderr_handler = logging.StreamHandler() |
80 |
stderr_handler.setFormatter(formatter) |
81 |
if options.debug: |
82 |
stderr_handler.setLevel(logging.NOTSET) |
83 |
elif options.verbose: |
84 |
stderr_handler.setLevel(logging.INFO) |
85 |
else: |
86 |
stderr_handler.setLevel(logging.WARNING) |
87 |
|
88 |
root_logger = logging.getLogger("") |
89 |
root_logger.setLevel(logging.NOTSET) |
90 |
root_logger.addHandler(stderr_handler) |
91 |
|
92 |
|
93 |
def CheckHostname(path): |
94 |
"""Ensures hostname matches ssconf value. |
95 |
|
96 |
@param path: Path to ssconf file |
97 |
|
98 |
""" |
99 |
ssconf_master_node = utils.ReadOneLineFile(path) |
100 |
hostname = netutils.GetHostname().name |
101 |
|
102 |
if ssconf_master_node == hostname: |
103 |
return True |
104 |
|
105 |
logging.warning("Warning: ssconf says master node is '%s', but this" |
106 |
" machine's name is '%s'; this tool must be run on" |
107 |
" the master node", ssconf_master_node, hostname) |
108 |
return False |
109 |
|
110 |
|
111 |
def _FillIPolicySpecs(default_ipolicy, ipolicy): |
112 |
if "minmax" in ipolicy: |
113 |
for (key, spec) in ipolicy["minmax"][0].items(): |
114 |
for (par, val) in default_ipolicy["minmax"][0][key].items(): |
115 |
if par not in spec: |
116 |
spec[par] = val |
117 |
|
118 |
|
119 |
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup): |
120 |
minmax_keys = ["min", "max"] |
121 |
if any((k in ipolicy) for k in minmax_keys): |
122 |
minmax = {} |
123 |
for key in minmax_keys: |
124 |
if key in ipolicy: |
125 |
if ipolicy[key]: |
126 |
minmax[key] = ipolicy[key] |
127 |
del ipolicy[key] |
128 |
if minmax: |
129 |
ipolicy["minmax"] = [minmax] |
130 |
if isgroup and "std" in ipolicy: |
131 |
del ipolicy["std"] |
132 |
_FillIPolicySpecs(default_ipolicy, ipolicy) |
133 |
|
134 |
|
135 |
def UpgradeNetworks(config_data): |
136 |
networks = config_data.get("networks", None) |
137 |
if not networks: |
138 |
config_data["networks"] = {} |
139 |
|
140 |
|
141 |
def UpgradeCluster(config_data): |
142 |
cluster = config_data.get("cluster", None) |
143 |
if cluster is None: |
144 |
raise Error("Cannot find cluster") |
145 |
ipolicy = cluster.setdefault("ipolicy", None) |
146 |
if ipolicy: |
147 |
UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) |
148 |
|
149 |
|
150 |
def UpgradeGroups(config_data): |
151 |
cl_ipolicy = config_data["cluster"].get("ipolicy") |
152 |
for group in config_data["nodegroups"].values(): |
153 |
networks = group.get("networks", None) |
154 |
if not networks: |
155 |
group["networks"] = {} |
156 |
ipolicy = group.get("ipolicy", None) |
157 |
if ipolicy: |
158 |
if cl_ipolicy is None: |
159 |
raise Error("A group defines an instance policy but there is no" |
160 |
" instance policy at cluster level") |
161 |
UpgradeIPolicy(ipolicy, cl_ipolicy, True) |
162 |
|
163 |
|
164 |
def GetExclusiveStorageValue(config_data): |
165 |
"""Return a conservative value of the exclusive_storage flag. |
166 |
|
167 |
Return C{True} if the cluster or at least a nodegroup have the flag set. |
168 |
|
169 |
""" |
170 |
ret = False |
171 |
cluster = config_data["cluster"] |
172 |
ndparams = cluster.get("ndparams") |
173 |
if ndparams is not None and ndparams.get("exclusive_storage"): |
174 |
ret = True |
175 |
for group in config_data["nodegroups"].values(): |
176 |
ndparams = group.get("ndparams") |
177 |
if ndparams is not None and ndparams.get("exclusive_storage"): |
178 |
ret = True |
179 |
return ret |
180 |
|
181 |
|
182 |
def ChangeDiskDevType(disk, dev_type_map): |
183 |
"""Replaces disk's dev_type attributes according to the given map. |
184 |
|
185 |
This can be used for both, up or downgrading the disks. |
186 |
""" |
187 |
if disk["dev_type"] in dev_type_map: |
188 |
disk["dev_type"] = dev_type_map[disk["dev_type"]] |
189 |
if "children" in disk: |
190 |
for child in disk["children"]: |
191 |
ChangeDiskDevType(child, dev_type_map) |
192 |
|
193 |
|
194 |
def UpgradeDiskDevType(disk): |
195 |
"""Upgrades the disks' device type.""" |
196 |
ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW) |
197 |
|
198 |
|
199 |
def UpgradeInstances(config_data): |
200 |
"""Upgrades the instances' configuration.""" |
201 |
|
202 |
network2uuid = dict((n["name"], n["uuid"]) |
203 |
for n in config_data["networks"].values()) |
204 |
if "instances" not in config_data: |
205 |
raise Error("Can't find the 'instances' key in the configuration!") |
206 |
|
207 |
missing_spindles = False |
208 |
for instance, iobj in config_data["instances"].items(): |
209 |
for nic in iobj["nics"]: |
210 |
name = nic.get("network", None) |
211 |
if name: |
212 |
uuid = network2uuid.get(name, None) |
213 |
if uuid: |
214 |
print("NIC with network name %s found." |
215 |
" Substituting with uuid %s." % (name, uuid)) |
216 |
nic["network"] = uuid |
217 |
|
218 |
if "disks" not in iobj: |
219 |
raise Error("Instance '%s' doesn't have a disks entry?!" % instance) |
220 |
disks = iobj["disks"] |
221 |
for idx, dobj in enumerate(disks): |
222 |
expected = "disk/%s" % idx |
223 |
current = dobj.get("iv_name", "") |
224 |
if current != expected: |
225 |
logging.warning("Updating iv_name for instance %s/disk %s" |
226 |
" from '%s' to '%s'", |
227 |
instance, idx, current, expected) |
228 |
dobj["iv_name"] = expected |
229 |
|
230 |
if "dev_type" in dobj: |
231 |
UpgradeDiskDevType(dobj) |
232 |
|
233 |
if not "spindles" in dobj: |
234 |
missing_spindles = True |
235 |
|
236 |
if GetExclusiveStorageValue(config_data) and missing_spindles: |
237 |
# We cannot be sure that the instances that are missing spindles have |
238 |
# exclusive storage enabled (the check would be more complicated), so we |
239 |
# give a noncommittal message |
240 |
logging.warning("Some instance disks could be needing to update the" |
241 |
" spindles parameter; you can check by running" |
242 |
" 'gnt-cluster verify', and fix any problem with" |
243 |
" 'gnt-cluster repair-disk-sizes'") |
244 |
|
245 |
|
246 |
def UpgradeRapiUsers(): |
247 |
if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and |
248 |
not os.path.islink(options.RAPI_USERS_FILE_PRE24)): |
249 |
if os.path.exists(options.RAPI_USERS_FILE): |
250 |
raise Error("Found pre-2.4 RAPI users file at %s, but another file" |
251 |
" already exists at %s" % |
252 |
(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)) |
253 |
logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", |
254 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
255 |
if not options.dry_run: |
256 |
utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE, |
257 |
mkdir=True, mkdir_mode=0750) |
258 |
|
259 |
# Create a symlink for RAPI users file |
260 |
if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or |
261 |
os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and |
262 |
os.path.isfile(options.RAPI_USERS_FILE)): |
263 |
logging.info("Creating symlink from %s to %s", |
264 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
265 |
if not options.dry_run: |
266 |
os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24) |
267 |
|
268 |
|
269 |
def UpgradeWatcher(): |
270 |
# Remove old watcher state file if it exists |
271 |
if os.path.exists(options.WATCHER_STATEFILE): |
272 |
logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE) |
273 |
if not options.dry_run: |
274 |
utils.RemoveFile(options.WATCHER_STATEFILE) |
275 |
|
276 |
|
277 |
def UpgradeFileStoragePaths(config_data): |
278 |
# Write file storage paths |
279 |
if not os.path.exists(options.FILE_STORAGE_PATHS_FILE): |
280 |
cluster = config_data["cluster"] |
281 |
file_storage_dir = cluster.get("file_storage_dir") |
282 |
shared_file_storage_dir = cluster.get("shared_file_storage_dir") |
283 |
del cluster |
284 |
|
285 |
logging.info("Ganeti 2.7 and later only allow whitelisted directories" |
286 |
" for file storage; writing existing configuration values" |
287 |
" into '%s'", |
288 |
options.FILE_STORAGE_PATHS_FILE) |
289 |
|
290 |
if file_storage_dir: |
291 |
logging.info("File storage directory: %s", file_storage_dir) |
292 |
if shared_file_storage_dir: |
293 |
logging.info("Shared file storage directory: %s", |
294 |
shared_file_storage_dir) |
295 |
|
296 |
buf = StringIO() |
297 |
buf.write("# List automatically generated from configuration by\n") |
298 |
buf.write("# cfgupgrade at %s\n" % time.asctime()) |
299 |
if file_storage_dir: |
300 |
buf.write("%s\n" % file_storage_dir) |
301 |
if shared_file_storage_dir: |
302 |
buf.write("%s\n" % shared_file_storage_dir) |
303 |
utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE, |
304 |
data=buf.getvalue(), |
305 |
mode=0600, |
306 |
dry_run=options.dry_run, |
307 |
backup=True) |
308 |
|
309 |
|
310 |
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field): |
311 |
if old_key not in nodes_by_old_key: |
312 |
logging.warning("Can't find node '%s' in configuration, assuming that it's" |
313 |
" already up-to-date", old_key) |
314 |
return old_key |
315 |
return nodes_by_old_key[old_key][new_key_field] |
316 |
|
317 |
|
318 |
def ChangeNodeIndices(config_data, old_key_field, new_key_field): |
319 |
def ChangeDiskNodeIndices(disk): |
320 |
# Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be |
321 |
# considered when up/downgrading from/to any versions touching 2.9 on the |
322 |
# way. |
323 |
drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD |
324 |
if disk["dev_type"] in drbd_disk_types: |
325 |
for i in range(0, 2): |
326 |
disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key, |
327 |
disk["logical_id"][i], |
328 |
new_key_field) |
329 |
if "children" in disk: |
330 |
for child in disk["children"]: |
331 |
ChangeDiskNodeIndices(child) |
332 |
|
333 |
nodes_by_old_key = {} |
334 |
nodes_by_new_key = {} |
335 |
for (_, node) in config_data["nodes"].items(): |
336 |
nodes_by_old_key[node[old_key_field]] = node |
337 |
nodes_by_new_key[node[new_key_field]] = node |
338 |
|
339 |
config_data["nodes"] = nodes_by_new_key |
340 |
|
341 |
cluster = config_data["cluster"] |
342 |
cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key, |
343 |
cluster["master_node"], |
344 |
new_key_field) |
345 |
|
346 |
for inst in config_data["instances"].values(): |
347 |
inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key, |
348 |
inst["primary_node"], |
349 |
new_key_field) |
350 |
for disk in inst["disks"]: |
351 |
ChangeDiskNodeIndices(disk) |
352 |
|
353 |
|
354 |
def ChangeInstanceIndices(config_data, old_key_field, new_key_field): |
355 |
insts_by_old_key = {} |
356 |
insts_by_new_key = {} |
357 |
for (_, inst) in config_data["instances"].items(): |
358 |
insts_by_old_key[inst[old_key_field]] = inst |
359 |
insts_by_new_key[inst[new_key_field]] = inst |
360 |
|
361 |
config_data["instances"] = insts_by_new_key |
362 |
|
363 |
|
364 |
def UpgradeNodeIndices(config_data): |
365 |
ChangeNodeIndices(config_data, "name", "uuid") |
366 |
|
367 |
|
368 |
def UpgradeInstanceIndices(config_data): |
369 |
ChangeInstanceIndices(config_data, "name", "uuid") |
370 |
|
371 |
|
372 |
def UpgradeAll(config_data): |
373 |
config_data["version"] = constants.BuildVersion(TARGET_MAJOR, |
374 |
TARGET_MINOR, 0) |
375 |
UpgradeRapiUsers() |
376 |
UpgradeWatcher() |
377 |
UpgradeFileStoragePaths(config_data) |
378 |
UpgradeNetworks(config_data) |
379 |
UpgradeCluster(config_data) |
380 |
UpgradeGroups(config_data) |
381 |
UpgradeInstances(config_data) |
382 |
UpgradeNodeIndices(config_data) |
383 |
UpgradeInstanceIndices(config_data) |
384 |
|
385 |
|
386 |
def DowngradeDiskDevType(disk): |
387 |
"""Downgrades the disks' device type.""" |
388 |
ChangeDiskDevType(disk, DEV_TYPE_NEW_OLD) |
389 |
|
390 |
|
391 |
def DowngradeDisks(disks, owner): |
392 |
for disk in disks: |
393 |
# Remove spindles to downgrade to 2.8 |
394 |
if "spindles" in disk: |
395 |
logging.warning("Removing spindles (value=%s) from disk %s (%s) of" |
396 |
" instance %s", |
397 |
disk["spindles"], disk["iv_name"], disk["uuid"], owner) |
398 |
del disk["spindles"] |
399 |
if "dev_type" in disk: |
400 |
DowngradeDiskDevType(disk) |
401 |
|
402 |
|
403 |
def DowngradeInstances(config_data): |
404 |
if "instances" not in config_data: |
405 |
raise Error("Cannot find the 'instances' key in the configuration!") |
406 |
for (iname, iobj) in config_data["instances"].items(): |
407 |
if "disks" not in iobj: |
408 |
raise Error("Cannot find 'disks' key for instance %s" % iname) |
409 |
DowngradeDisks(iobj["disks"], iname) |
410 |
|
411 |
|
412 |
def DowngradeNodeIndices(config_data): |
413 |
ChangeNodeIndices(config_data, "uuid", "name") |
414 |
|
415 |
|
416 |
def DowngradeInstanceIndices(config_data): |
417 |
ChangeInstanceIndices(config_data, "uuid", "name") |
418 |
|
419 |
|
420 |
def DowngradeHvparams(config_data): |
421 |
"""Downgrade the cluster's hypervisor parameters.""" |
422 |
cluster = config_data["cluster"] |
423 |
if "hvparams" in cluster: |
424 |
hvparams = cluster["hvparams"] |
425 |
xen_params = None |
426 |
for xen_variant in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]: |
427 |
if xen_variant in hvparams: |
428 |
xen_params = hvparams[xen_variant] |
429 |
# 'xen_cmd' was introduced in 2.9 |
430 |
if constants.HV_XEN_CMD in xen_params: |
431 |
del xen_params[constants.HV_XEN_CMD] |
432 |
# 'vif_script' was introducted in 2.9 |
433 |
if constants.HV_VIF_SCRIPT in xen_params: |
434 |
del xen_params[constants.HV_VIF_SCRIPT] |
435 |
|
436 |
|
437 |
def DowngradeAll(config_data): |
438 |
# Any code specific to a particular version should be labeled that way, so |
439 |
# it can be removed when updating to the next version. |
440 |
config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR, |
441 |
DOWNGRADE_MINOR, 0) |
442 |
DowngradeInstances(config_data) |
443 |
DowngradeNodeIndices(config_data) |
444 |
DowngradeInstanceIndices(config_data) |
445 |
DowngradeHvparams(config_data) |
446 |
|
447 |
|
448 |
def main(): |
449 |
"""Main program. |
450 |
|
451 |
""" |
452 |
global options, args # pylint: disable=W0603 |
453 |
|
454 |
# Option parsing |
455 |
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") |
456 |
parser.add_option("--dry-run", dest="dry_run", |
457 |
action="store_true", |
458 |
help="Try to do the conversion, but don't write" |
459 |
" output file") |
460 |
parser.add_option(cli.FORCE_OPT) |
461 |
parser.add_option(cli.DEBUG_OPT) |
462 |
parser.add_option(cli.VERBOSE_OPT) |
463 |
parser.add_option("--ignore-hostname", dest="ignore_hostname", |
464 |
action="store_true", default=False, |
465 |
help="Don't abort if hostname doesn't match") |
466 |
parser.add_option("--path", help="Convert configuration in this" |
467 |
" directory instead of '%s'" % pathutils.DATA_DIR, |
468 |
default=pathutils.DATA_DIR, dest="data_dir") |
469 |
parser.add_option("--confdir", |
470 |
help=("Use this directory instead of '%s'" % |
471 |
pathutils.CONF_DIR), |
472 |
default=pathutils.CONF_DIR, dest="conf_dir") |
473 |
parser.add_option("--no-verify", |
474 |
help="Do not verify configuration after upgrade", |
475 |
action="store_true", dest="no_verify", default=False) |
476 |
parser.add_option("--downgrade", |
477 |
help="Downgrade to the previous stable version", |
478 |
action="store_true", dest="downgrade", default=False) |
479 |
(options, args) = parser.parse_args() |
480 |
|
481 |
# We need to keep filenames locally because they might be renamed between |
482 |
# versions. |
483 |
options.data_dir = os.path.abspath(options.data_dir) |
484 |
options.CONFIG_DATA_PATH = options.data_dir + "/config.data" |
485 |
options.SERVER_PEM_PATH = options.data_dir + "/server.pem" |
486 |
options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts" |
487 |
options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem" |
488 |
options.SPICE_CERT_FILE = options.data_dir + "/spice.pem" |
489 |
options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem" |
490 |
options.RAPI_USERS_FILE = options.data_dir + "/rapi/users" |
491 |
options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users" |
492 |
options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key" |
493 |
options.CDS_FILE = options.data_dir + "/cluster-domain-secret" |
494 |
options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node" |
495 |
options.WATCHER_STATEFILE = options.data_dir + "/watcher.data" |
496 |
options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths" |
497 |
|
498 |
SetupLogging() |
499 |
|
500 |
# Option checking |
501 |
if args: |
502 |
raise Error("No arguments expected") |
503 |
if options.downgrade and not options.no_verify: |
504 |
options.no_verify = True |
505 |
|
506 |
# Check master name |
507 |
if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname): |
508 |
logging.error("Aborting due to hostname mismatch") |
509 |
sys.exit(constants.EXIT_FAILURE) |
510 |
|
511 |
if not options.force: |
512 |
if options.downgrade: |
513 |
usertext = ("The configuration is going to be DOWNGRADED to version %s.%s" |
514 |
" Some configuration data might be removed if they don't fit" |
515 |
" in the old format. Please make sure you have read the" |
516 |
" upgrade notes (available in the UPGRADE file and included" |
517 |
" in other documentation formats) to understand what they" |
518 |
" are. Continue with *DOWNGRADING* the configuration?" % |
519 |
(DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) |
520 |
else: |
521 |
usertext = ("Please make sure you have read the upgrade notes for" |
522 |
" Ganeti %s (available in the UPGRADE file and included" |
523 |
" in other documentation formats). Continue with upgrading" |
524 |
" configuration?" % constants.RELEASE_VERSION) |
525 |
if not cli.AskUser(usertext): |
526 |
sys.exit(constants.EXIT_FAILURE) |
527 |
|
528 |
# Check whether it's a Ganeti configuration directory |
529 |
if not (os.path.isfile(options.CONFIG_DATA_PATH) and |
530 |
os.path.isfile(options.SERVER_PEM_PATH) and |
531 |
os.path.isfile(options.KNOWN_HOSTS_PATH)): |
532 |
raise Error(("%s does not seem to be a Ganeti configuration" |
533 |
" directory") % options.data_dir) |
534 |
|
535 |
if not os.path.isdir(options.conf_dir): |
536 |
raise Error("Not a directory: %s" % options.conf_dir) |
537 |
|
538 |
config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH)) |
539 |
|
540 |
try: |
541 |
config_version = config_data["version"] |
542 |
except KeyError: |
543 |
raise Error("Unable to determine configuration version") |
544 |
|
545 |
(config_major, config_minor, config_revision) = \ |
546 |
constants.SplitVersion(config_version) |
547 |
|
548 |
logging.info("Found configuration version %s (%d.%d.%d)", |
549 |
config_version, config_major, config_minor, config_revision) |
550 |
|
551 |
if "config_version" in config_data["cluster"]: |
552 |
raise Error("Inconsistent configuration: found config_version in" |
553 |
" configuration file") |
554 |
|
555 |
# Downgrade to the previous stable version |
556 |
if options.downgrade: |
557 |
if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or |
558 |
(config_major == DOWNGRADE_MAJOR and |
559 |
config_minor == DOWNGRADE_MINOR)): |
560 |
raise Error("Downgrade supported only from the latest version (%s.%s)," |
561 |
" found %s (%s.%s.%s) instead" % |
562 |
(TARGET_MAJOR, TARGET_MINOR, config_version, config_major, |
563 |
config_minor, config_revision)) |
564 |
DowngradeAll(config_data) |
565 |
|
566 |
# Upgrade from 2.{0..7} to 2.9 |
567 |
elif config_major == 2 and config_minor in range(0, 10): |
568 |
if config_revision != 0: |
569 |
logging.warning("Config revision is %s, not 0", config_revision) |
570 |
UpgradeAll(config_data) |
571 |
|
572 |
elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: |
573 |
logging.info("No changes necessary") |
574 |
|
575 |
else: |
576 |
raise Error("Configuration version %d.%d.%d not supported by this tool" % |
577 |
(config_major, config_minor, config_revision)) |
578 |
|
579 |
try: |
580 |
logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH) |
581 |
utils.WriteFile(file_name=options.CONFIG_DATA_PATH, |
582 |
data=serializer.DumpJson(config_data), |
583 |
mode=0600, |
584 |
dry_run=options.dry_run, |
585 |
backup=True) |
586 |
|
587 |
if not options.dry_run: |
588 |
bootstrap.GenerateClusterCrypto( |
589 |
False, False, False, False, False, |
590 |
nodecert_file=options.SERVER_PEM_PATH, |
591 |
rapicert_file=options.RAPI_CERT_FILE, |
592 |
spicecert_file=options.SPICE_CERT_FILE, |
593 |
spicecacert_file=options.SPICE_CACERT_FILE, |
594 |
hmackey_file=options.CONFD_HMAC_KEY, |
595 |
cds_file=options.CDS_FILE) |
596 |
|
597 |
except Exception: |
598 |
logging.critical("Writing configuration failed. It is probably in an" |
599 |
" inconsistent state and needs manual intervention.") |
600 |
raise |
601 |
|
602 |
# test loading the config file |
603 |
all_ok = True |
604 |
if not (options.dry_run or options.no_verify): |
605 |
logging.info("Testing the new config file...") |
606 |
cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH, |
607 |
accept_foreign=options.ignore_hostname, |
608 |
offline=True) |
609 |
# if we reached this, it's all fine |
610 |
vrfy = cfg.VerifyConfig() |
611 |
if vrfy: |
612 |
logging.error("Errors after conversion:") |
613 |
for item in vrfy: |
614 |
logging.error(" - %s", item) |
615 |
all_ok = False |
616 |
else: |
617 |
logging.info("File loaded successfully after upgrading") |
618 |
del cfg |
619 |
|
620 |
if options.downgrade: |
621 |
action = "downgraded" |
622 |
out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) |
623 |
else: |
624 |
action = "upgraded" |
625 |
out_ver = constants.RELEASE_VERSION |
626 |
if all_ok: |
627 |
cli.ToStderr("Configuration successfully %s to version %s.", |
628 |
action, out_ver) |
629 |
else: |
630 |
cli.ToStderr("Configuration %s to version %s, but there are errors." |
631 |
"\nPlease review the file.", action, out_ver) |
632 |
|
633 |
|
634 |
if __name__ == "__main__": |
635 |
main() |