root / tools / cfgupgrade @ cccbacf2
History | View | Annotate | Download (22.9 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
|
22 |
"""Tool to upgrade the configuration file. |
23 |
|
24 |
This code handles only the types supported by simplejson. As an |
25 |
example, 'set' is a 'list'. |
26 |
|
27 |
""" |
28 |
|
29 |
|
30 |
import os |
31 |
import os.path |
32 |
import sys |
33 |
import optparse |
34 |
import logging |
35 |
import time |
36 |
from cStringIO import StringIO |
37 |
from bitarray import bitarray |
38 |
from base64 import b64encode, b64decode |
39 |
|
40 |
from ganeti import constants |
41 |
from ganeti import serializer |
42 |
from ganeti import utils |
43 |
from ganeti import cli |
44 |
from ganeti import bootstrap |
45 |
from ganeti import config |
46 |
from ganeti import netutils |
47 |
from ganeti import pathutils |
48 |
|
49 |
from ganeti.utils import version |
50 |
|
51 |
|
52 |
options = None |
53 |
args = None |
54 |
|
55 |
|
56 |
#: Target major version we will upgrade to |
57 |
TARGET_MAJOR = 2 |
58 |
#: Target minor version we will upgrade to |
59 |
TARGET_MINOR = 10 |
60 |
#: Target major version for downgrade |
61 |
DOWNGRADE_MAJOR = 2 |
62 |
#: Target minor version for downgrade |
63 |
DOWNGRADE_MINOR = 9 |
64 |
|
65 |
# map of legacy device types |
66 |
# (mapping differing old LD_* constants to new DT_* constants) |
67 |
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} |
68 |
# (mapping differing new DT_* constants to old LD_* constants) |
69 |
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items()) |
70 |
|
71 |
|
72 |
class Error(Exception): |
73 |
"""Generic exception""" |
74 |
pass |
75 |
|
76 |
|
77 |
def SetupLogging(): |
78 |
"""Configures the logging module. |
79 |
|
80 |
""" |
81 |
formatter = logging.Formatter("%(asctime)s: %(message)s") |
82 |
|
83 |
stderr_handler = logging.StreamHandler() |
84 |
stderr_handler.setFormatter(formatter) |
85 |
if options.debug: |
86 |
stderr_handler.setLevel(logging.NOTSET) |
87 |
elif options.verbose: |
88 |
stderr_handler.setLevel(logging.INFO) |
89 |
else: |
90 |
stderr_handler.setLevel(logging.WARNING) |
91 |
|
92 |
root_logger = logging.getLogger("") |
93 |
root_logger.setLevel(logging.NOTSET) |
94 |
root_logger.addHandler(stderr_handler) |
95 |
|
96 |
|
97 |
def CheckHostname(path): |
98 |
"""Ensures hostname matches ssconf value. |
99 |
|
100 |
@param path: Path to ssconf file |
101 |
|
102 |
""" |
103 |
ssconf_master_node = utils.ReadOneLineFile(path) |
104 |
hostname = netutils.GetHostname().name |
105 |
|
106 |
if ssconf_master_node == hostname: |
107 |
return True |
108 |
|
109 |
logging.warning("Warning: ssconf says master node is '%s', but this" |
110 |
" machine's name is '%s'; this tool must be run on" |
111 |
" the master node", ssconf_master_node, hostname) |
112 |
return False |
113 |
|
114 |
|
115 |
def _FillIPolicySpecs(default_ipolicy, ipolicy): |
116 |
if "minmax" in ipolicy: |
117 |
for (key, spec) in ipolicy["minmax"][0].items(): |
118 |
for (par, val) in default_ipolicy["minmax"][0][key].items(): |
119 |
if par not in spec: |
120 |
spec[par] = val |
121 |
|
122 |
|
123 |
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup): |
124 |
minmax_keys = ["min", "max"] |
125 |
if any((k in ipolicy) for k in minmax_keys): |
126 |
minmax = {} |
127 |
for key in minmax_keys: |
128 |
if key in ipolicy: |
129 |
if ipolicy[key]: |
130 |
minmax[key] = ipolicy[key] |
131 |
del ipolicy[key] |
132 |
if minmax: |
133 |
ipolicy["minmax"] = [minmax] |
134 |
if isgroup and "std" in ipolicy: |
135 |
del ipolicy["std"] |
136 |
_FillIPolicySpecs(default_ipolicy, ipolicy) |
137 |
|
138 |
|
139 |
# pylint: disable=E1101 |
140 |
def UpgradeNetworks(config_data): |
141 |
networks = config_data.get("networks", {}) |
142 |
if not networks: |
143 |
config_data["networks"] = {} |
144 |
for nobj in networks.values(): |
145 |
for key in ("reservations", "ext_reservations"): |
146 |
r = nobj[key] |
147 |
if options.tob64: |
148 |
try: |
149 |
b = bitarray(r) |
150 |
nobj[key] = b64encode(b.tobytes()) |
151 |
except ValueError: |
152 |
print("No 01 network found! Probably already in base64.") |
153 |
if options.to01: |
154 |
try: |
155 |
b = bitarray(r) |
156 |
print("01 network found! Do nothing.") |
157 |
except ValueError: |
158 |
b = bitarray() |
159 |
b.frombytes(b64decode(r)) |
160 |
nobj[key] = b.to01() |
161 |
print("%s: %s -> %s" % (nobj["name"], r, nobj[key])) |
162 |
|
163 |
|
164 |
def UpgradeCluster(config_data): |
165 |
cluster = config_data.get("cluster", None) |
166 |
if cluster is None: |
167 |
raise Error("Cannot find cluster") |
168 |
ipolicy = cluster.setdefault("ipolicy", None) |
169 |
if ipolicy: |
170 |
UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) |
171 |
|
172 |
|
173 |
def UpgradeGroups(config_data): |
174 |
cl_ipolicy = config_data["cluster"].get("ipolicy") |
175 |
for group in config_data["nodegroups"].values(): |
176 |
networks = group.get("networks", None) |
177 |
if not networks: |
178 |
group["networks"] = {} |
179 |
ipolicy = group.get("ipolicy", None) |
180 |
if ipolicy: |
181 |
if cl_ipolicy is None: |
182 |
raise Error("A group defines an instance policy but there is no" |
183 |
" instance policy at cluster level") |
184 |
UpgradeIPolicy(ipolicy, cl_ipolicy, True) |
185 |
|
186 |
|
187 |
def GetExclusiveStorageValue(config_data): |
188 |
"""Return a conservative value of the exclusive_storage flag. |
189 |
|
190 |
Return C{True} if the cluster or at least a nodegroup have the flag set. |
191 |
|
192 |
""" |
193 |
ret = False |
194 |
cluster = config_data["cluster"] |
195 |
ndparams = cluster.get("ndparams") |
196 |
if ndparams is not None and ndparams.get("exclusive_storage"): |
197 |
ret = True |
198 |
for group in config_data["nodegroups"].values(): |
199 |
ndparams = group.get("ndparams") |
200 |
if ndparams is not None and ndparams.get("exclusive_storage"): |
201 |
ret = True |
202 |
return ret |
203 |
|
204 |
|
205 |
def RemovePhysicalId(disk): |
206 |
if "children" in disk: |
207 |
for d in disk["children"]: |
208 |
RemovePhysicalId(d) |
209 |
if "physical_id" in disk: |
210 |
del disk["physical_id"] |
211 |
|
212 |
|
213 |
def ChangeDiskDevType(disk, dev_type_map): |
214 |
"""Replaces disk's dev_type attributes according to the given map. |
215 |
|
216 |
This can be used for both, up or downgrading the disks. |
217 |
""" |
218 |
if disk["dev_type"] in dev_type_map: |
219 |
disk["dev_type"] = dev_type_map[disk["dev_type"]] |
220 |
if "children" in disk: |
221 |
for child in disk["children"]: |
222 |
ChangeDiskDevType(child, dev_type_map) |
223 |
|
224 |
|
225 |
def UpgradeDiskDevType(disk): |
226 |
"""Upgrades the disks' device type.""" |
227 |
ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW) |
228 |
|
229 |
|
230 |
def UpgradeInstances(config_data): |
231 |
"""Upgrades the instances' configuration.""" |
232 |
|
233 |
network2uuid = dict((n["name"], n["uuid"]) |
234 |
for n in config_data["networks"].values()) |
235 |
if "instances" not in config_data: |
236 |
raise Error("Can't find the 'instances' key in the configuration!") |
237 |
|
238 |
missing_spindles = False |
239 |
for instance, iobj in config_data["instances"].items(): |
240 |
for nic in iobj["nics"]: |
241 |
name = nic.get("network", None) |
242 |
if name: |
243 |
uuid = network2uuid.get(name, None) |
244 |
if uuid: |
245 |
print("NIC with network name %s found." |
246 |
" Substituting with uuid %s." % (name, uuid)) |
247 |
nic["network"] = uuid |
248 |
|
249 |
if "disks" not in iobj: |
250 |
raise Error("Instance '%s' doesn't have a disks entry?!" % instance) |
251 |
disks = iobj["disks"] |
252 |
for idx, dobj in enumerate(disks): |
253 |
RemovePhysicalId(dobj) |
254 |
|
255 |
expected = "disk/%s" % idx |
256 |
current = dobj.get("iv_name", "") |
257 |
if current != expected: |
258 |
logging.warning("Updating iv_name for instance %s/disk %s" |
259 |
" from '%s' to '%s'", |
260 |
instance, idx, current, expected) |
261 |
dobj["iv_name"] = expected |
262 |
|
263 |
if "dev_type" in dobj: |
264 |
UpgradeDiskDevType(dobj) |
265 |
|
266 |
if not "spindles" in dobj: |
267 |
missing_spindles = True |
268 |
|
269 |
if GetExclusiveStorageValue(config_data) and missing_spindles: |
270 |
# We cannot be sure that the instances that are missing spindles have |
271 |
# exclusive storage enabled (the check would be more complicated), so we |
272 |
# give a noncommittal message |
273 |
logging.warning("Some instance disks could be needing to update the" |
274 |
" spindles parameter; you can check by running" |
275 |
" 'gnt-cluster verify', and fix any problem with" |
276 |
" 'gnt-cluster repair-disk-sizes'") |
277 |
|
278 |
|
279 |
def UpgradeRapiUsers(): |
280 |
if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and |
281 |
not os.path.islink(options.RAPI_USERS_FILE_PRE24)): |
282 |
if os.path.exists(options.RAPI_USERS_FILE): |
283 |
raise Error("Found pre-2.4 RAPI users file at %s, but another file" |
284 |
" already exists at %s" % |
285 |
(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)) |
286 |
logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", |
287 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
288 |
if not options.dry_run: |
289 |
utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE, |
290 |
mkdir=True, mkdir_mode=0750) |
291 |
|
292 |
# Create a symlink for RAPI users file |
293 |
if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or |
294 |
os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and |
295 |
os.path.isfile(options.RAPI_USERS_FILE)): |
296 |
logging.info("Creating symlink from %s to %s", |
297 |
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE) |
298 |
if not options.dry_run: |
299 |
os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24) |
300 |
|
301 |
|
302 |
def UpgradeWatcher(): |
303 |
# Remove old watcher state file if it exists |
304 |
if os.path.exists(options.WATCHER_STATEFILE): |
305 |
logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE) |
306 |
if not options.dry_run: |
307 |
utils.RemoveFile(options.WATCHER_STATEFILE) |
308 |
|
309 |
|
310 |
def UpgradeFileStoragePaths(config_data): |
311 |
# Write file storage paths |
312 |
if not os.path.exists(options.FILE_STORAGE_PATHS_FILE): |
313 |
cluster = config_data["cluster"] |
314 |
file_storage_dir = cluster.get("file_storage_dir") |
315 |
shared_file_storage_dir = cluster.get("shared_file_storage_dir") |
316 |
del cluster |
317 |
|
318 |
logging.info("Ganeti 2.7 and later only allow whitelisted directories" |
319 |
" for file storage; writing existing configuration values" |
320 |
" into '%s'", |
321 |
options.FILE_STORAGE_PATHS_FILE) |
322 |
|
323 |
if file_storage_dir: |
324 |
logging.info("File storage directory: %s", file_storage_dir) |
325 |
if shared_file_storage_dir: |
326 |
logging.info("Shared file storage directory: %s", |
327 |
shared_file_storage_dir) |
328 |
|
329 |
buf = StringIO() |
330 |
buf.write("# List automatically generated from configuration by\n") |
331 |
buf.write("# cfgupgrade at %s\n" % time.asctime()) |
332 |
if file_storage_dir: |
333 |
buf.write("%s\n" % file_storage_dir) |
334 |
if shared_file_storage_dir: |
335 |
buf.write("%s\n" % shared_file_storage_dir) |
336 |
utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE, |
337 |
data=buf.getvalue(), |
338 |
mode=0600, |
339 |
dry_run=options.dry_run, |
340 |
backup=True) |
341 |
|
342 |
|
343 |
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field): |
344 |
if old_key not in nodes_by_old_key: |
345 |
logging.warning("Can't find node '%s' in configuration, assuming that it's" |
346 |
" already up-to-date", old_key) |
347 |
return old_key |
348 |
return nodes_by_old_key[old_key][new_key_field] |
349 |
|
350 |
|
351 |
def ChangeNodeIndices(config_data, old_key_field, new_key_field): |
352 |
def ChangeDiskNodeIndices(disk): |
353 |
# Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be |
354 |
# considered when up/downgrading from/to any versions touching 2.9 on the |
355 |
# way. |
356 |
drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD |
357 |
if disk["dev_type"] in drbd_disk_types: |
358 |
for i in range(0, 2): |
359 |
disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key, |
360 |
disk["logical_id"][i], |
361 |
new_key_field) |
362 |
if "children" in disk: |
363 |
for child in disk["children"]: |
364 |
ChangeDiskNodeIndices(child) |
365 |
|
366 |
nodes_by_old_key = {} |
367 |
nodes_by_new_key = {} |
368 |
for (_, node) in config_data["nodes"].items(): |
369 |
nodes_by_old_key[node[old_key_field]] = node |
370 |
nodes_by_new_key[node[new_key_field]] = node |
371 |
|
372 |
config_data["nodes"] = nodes_by_new_key |
373 |
|
374 |
cluster = config_data["cluster"] |
375 |
cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key, |
376 |
cluster["master_node"], |
377 |
new_key_field) |
378 |
|
379 |
for inst in config_data["instances"].values(): |
380 |
inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key, |
381 |
inst["primary_node"], |
382 |
new_key_field) |
383 |
for disk in inst["disks"]: |
384 |
ChangeDiskNodeIndices(disk) |
385 |
|
386 |
|
387 |
def ChangeInstanceIndices(config_data, old_key_field, new_key_field): |
388 |
insts_by_old_key = {} |
389 |
insts_by_new_key = {} |
390 |
for (_, inst) in config_data["instances"].items(): |
391 |
insts_by_old_key[inst[old_key_field]] = inst |
392 |
insts_by_new_key[inst[new_key_field]] = inst |
393 |
|
394 |
config_data["instances"] = insts_by_new_key |
395 |
|
396 |
|
397 |
def UpgradeNodeIndices(config_data): |
398 |
ChangeNodeIndices(config_data, "name", "uuid") |
399 |
|
400 |
|
401 |
def UpgradeInstanceIndices(config_data): |
402 |
ChangeInstanceIndices(config_data, "name", "uuid") |
403 |
|
404 |
|
405 |
def UpgradeAll(config_data): |
406 |
config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0) |
407 |
UpgradeRapiUsers() |
408 |
UpgradeWatcher() |
409 |
UpgradeFileStoragePaths(config_data) |
410 |
UpgradeNetworks(config_data) |
411 |
UpgradeCluster(config_data) |
412 |
UpgradeGroups(config_data) |
413 |
UpgradeInstances(config_data) |
414 |
UpgradeNodeIndices(config_data) |
415 |
UpgradeInstanceIndices(config_data) |
416 |
|
417 |
|
418 |
def DowngradeNDParams(ndparams): |
419 |
for param in ["ovs", "ovs_link", "ovs_name"]: |
420 |
if param in ndparams: |
421 |
del ndparams[param] |
422 |
|
423 |
|
424 |
def DowngradeNicParams(nicparams): |
425 |
if "vlan" in nicparams: |
426 |
del nicparams["vlan"] |
427 |
|
428 |
|
429 |
def DowngradeHVParams(hvparams): |
430 |
for hv in ["xen-pvm", "xen-hvm"]: |
431 |
if hv not in hvparams: |
432 |
continue |
433 |
for param in ["cpuid", "soundhw"]: |
434 |
if param in hvparams[hv]: |
435 |
del hvparams[hv][param] |
436 |
|
437 |
|
438 |
def DowngradeCluster(config_data): |
439 |
cluster = config_data["cluster"] |
440 |
if "ndparams" in cluster: |
441 |
DowngradeNDParams(cluster["ndparams"]) |
442 |
if "nicparams" in cluster: |
443 |
DowngradeNicParams(cluster["nicparams"][constants.PP_DEFAULT]) |
444 |
if "hvparams" in cluster: |
445 |
DowngradeHVParams(cluster["hvparams"]) |
446 |
|
447 |
|
448 |
def DowngradeNodeGroups(config_data): |
449 |
for (_, ngobj) in config_data["nodegroups"].items(): |
450 |
if "ndparams" in ngobj: |
451 |
DowngradeNDParams(ngobj["ndparams"]) |
452 |
|
453 |
|
454 |
def DowngradeNodes(config_data): |
455 |
for (_, nobj) in config_data["nodes"].items(): |
456 |
if "ndparams" in nobj: |
457 |
DowngradeNDParams(nobj["ndparams"]) |
458 |
|
459 |
|
460 |
def DowngradeInstances(config_data): |
461 |
for (_, iobj) in config_data["instances"].items(): |
462 |
DowngradeHVParams(iobj["hvparams"]) |
463 |
for nic in iobj["nics"]: |
464 |
DowngradeNicParams(nic["nicparams"]) |
465 |
|
466 |
|
467 |
def DowngradeAll(config_data): |
468 |
# Any code specific to a particular version should be labeled that way, so |
469 |
# it can be removed when updating to the next version. |
470 |
config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR, |
471 |
DOWNGRADE_MINOR, 0) |
472 |
DowngradeCluster(config_data) |
473 |
DowngradeNodeGroups(config_data) |
474 |
DowngradeNodes(config_data) |
475 |
DowngradeInstances(config_data) |
476 |
|
477 |
|
478 |
def main(): |
479 |
"""Main program. |
480 |
|
481 |
""" |
482 |
global options, args # pylint: disable=W0603 |
483 |
|
484 |
# Option parsing |
485 |
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") |
486 |
parser.add_option("--dry-run", dest="dry_run", |
487 |
action="store_true", |
488 |
help="Try to do the conversion, but don't write" |
489 |
" output file") |
490 |
parser.add_option(cli.FORCE_OPT) |
491 |
parser.add_option(cli.DEBUG_OPT) |
492 |
parser.add_option(cli.VERBOSE_OPT) |
493 |
parser.add_option("--ignore-hostname", dest="ignore_hostname", |
494 |
action="store_true", default=False, |
495 |
help="Don't abort if hostname doesn't match") |
496 |
parser.add_option("--path", help="Convert configuration in this" |
497 |
" directory instead of '%s'" % pathutils.DATA_DIR, |
498 |
default=pathutils.DATA_DIR, dest="data_dir") |
499 |
parser.add_option("--confdir", |
500 |
help=("Use this directory instead of '%s'" % |
501 |
pathutils.CONF_DIR), |
502 |
default=pathutils.CONF_DIR, dest="conf_dir") |
503 |
parser.add_option("--no-verify", |
504 |
help="Do not verify configuration after upgrade", |
505 |
action="store_true", dest="no_verify", default=False) |
506 |
parser.add_option("--downgrade", |
507 |
help="Downgrade to the previous stable version", |
508 |
action="store_true", dest="downgrade", default=False) |
509 |
parser.add_option("--tob64", |
510 |
help="Change to base64 encoded networks", |
511 |
action="store_true", dest="tob64", default=False) |
512 |
parser.add_option("--to01", |
513 |
help="Change to non encoded networks (01 bitarrays)", |
514 |
action="store_true", dest="to01", default=False) |
515 |
(options, args) = parser.parse_args() |
516 |
|
517 |
# We need to keep filenames locally because they might be renamed between |
518 |
# versions. |
519 |
options.data_dir = os.path.abspath(options.data_dir) |
520 |
options.CONFIG_DATA_PATH = options.data_dir + "/config.data" |
521 |
options.SERVER_PEM_PATH = options.data_dir + "/server.pem" |
522 |
options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts" |
523 |
options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem" |
524 |
options.SPICE_CERT_FILE = options.data_dir + "/spice.pem" |
525 |
options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem" |
526 |
options.RAPI_USERS_FILE = options.data_dir + "/rapi/users" |
527 |
options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users" |
528 |
options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key" |
529 |
options.CDS_FILE = options.data_dir + "/cluster-domain-secret" |
530 |
options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node" |
531 |
options.WATCHER_STATEFILE = options.data_dir + "/watcher.data" |
532 |
options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths" |
533 |
|
534 |
SetupLogging() |
535 |
|
536 |
# Option checking |
537 |
if args: |
538 |
raise Error("No arguments expected") |
539 |
if options.downgrade and not options.no_verify: |
540 |
options.no_verify = True |
541 |
|
542 |
# Check master name |
543 |
if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname): |
544 |
logging.error("Aborting due to hostname mismatch") |
545 |
sys.exit(constants.EXIT_FAILURE) |
546 |
|
547 |
if not options.force: |
548 |
if options.downgrade: |
549 |
usertext = ("The configuration is going to be DOWNGRADED to version %s.%s" |
550 |
" Some configuration data might be removed if they don't fit" |
551 |
" in the old format. Please make sure you have read the" |
552 |
" upgrade notes (available in the UPGRADE file and included" |
553 |
" in other documentation formats) to understand what they" |
554 |
" are. Continue with *DOWNGRADING* the configuration?" % |
555 |
(DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) |
556 |
else: |
557 |
usertext = ("Please make sure you have read the upgrade notes for" |
558 |
" Ganeti %s (available in the UPGRADE file and included" |
559 |
" in other documentation formats). Continue with upgrading" |
560 |
" configuration?" % constants.RELEASE_VERSION) |
561 |
if not cli.AskUser(usertext): |
562 |
sys.exit(constants.EXIT_FAILURE) |
563 |
|
564 |
# Check whether it's a Ganeti configuration directory |
565 |
if not (os.path.isfile(options.CONFIG_DATA_PATH) and |
566 |
os.path.isfile(options.SERVER_PEM_PATH) and |
567 |
os.path.isfile(options.KNOWN_HOSTS_PATH)): |
568 |
raise Error(("%s does not seem to be a Ganeti configuration" |
569 |
" directory") % options.data_dir) |
570 |
|
571 |
if not os.path.isdir(options.conf_dir): |
572 |
raise Error("Not a directory: %s" % options.conf_dir) |
573 |
|
574 |
config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH)) |
575 |
|
576 |
try: |
577 |
config_version = config_data["version"] |
578 |
except KeyError: |
579 |
raise Error("Unable to determine configuration version") |
580 |
|
581 |
(config_major, config_minor, config_revision) = \ |
582 |
version.SplitVersion(config_version) |
583 |
|
584 |
logging.info("Found configuration version %s (%d.%d.%d)", |
585 |
config_version, config_major, config_minor, config_revision) |
586 |
|
587 |
if "config_version" in config_data["cluster"]: |
588 |
raise Error("Inconsistent configuration: found config_version in" |
589 |
" configuration file") |
590 |
|
591 |
# Downgrade to the previous stable version |
592 |
if options.downgrade: |
593 |
if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or |
594 |
(config_major == DOWNGRADE_MAJOR and |
595 |
config_minor == DOWNGRADE_MINOR)): |
596 |
raise Error("Downgrade supported only from the latest version (%s.%s)," |
597 |
" found %s (%s.%s.%s) instead" % |
598 |
(TARGET_MAJOR, TARGET_MINOR, config_version, config_major, |
599 |
config_minor, config_revision)) |
600 |
DowngradeAll(config_data) |
601 |
|
602 |
# Upgrade from 2.{0..9} to 2.10 |
603 |
elif config_major == 2 and config_minor in range(0, 10): |
604 |
if config_revision != 0: |
605 |
logging.warning("Config revision is %s, not 0", config_revision) |
606 |
UpgradeAll(config_data) |
607 |
|
608 |
elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: |
609 |
logging.info("No changes necessary") |
610 |
|
611 |
else: |
612 |
raise Error("Configuration version %d.%d.%d not supported by this tool" % |
613 |
(config_major, config_minor, config_revision)) |
614 |
|
615 |
try: |
616 |
logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH) |
617 |
utils.WriteFile(file_name=options.CONFIG_DATA_PATH, |
618 |
data=serializer.DumpJson(config_data), |
619 |
mode=0600, |
620 |
dry_run=options.dry_run, |
621 |
backup=True) |
622 |
|
623 |
if not options.dry_run: |
624 |
bootstrap.GenerateClusterCrypto( |
625 |
False, False, False, False, False, |
626 |
nodecert_file=options.SERVER_PEM_PATH, |
627 |
rapicert_file=options.RAPI_CERT_FILE, |
628 |
spicecert_file=options.SPICE_CERT_FILE, |
629 |
spicecacert_file=options.SPICE_CACERT_FILE, |
630 |
hmackey_file=options.CONFD_HMAC_KEY, |
631 |
cds_file=options.CDS_FILE) |
632 |
|
633 |
except Exception: |
634 |
logging.critical("Writing configuration failed. It is probably in an" |
635 |
" inconsistent state and needs manual intervention.") |
636 |
raise |
637 |
|
638 |
# test loading the config file |
639 |
all_ok = True |
640 |
if not (options.dry_run or options.no_verify): |
641 |
logging.info("Testing the new config file...") |
642 |
cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH, |
643 |
accept_foreign=options.ignore_hostname, |
644 |
offline=True) |
645 |
# if we reached this, it's all fine |
646 |
vrfy = cfg.VerifyConfig() |
647 |
if vrfy: |
648 |
logging.error("Errors after conversion:") |
649 |
for item in vrfy: |
650 |
logging.error(" - %s", item) |
651 |
all_ok = False |
652 |
else: |
653 |
logging.info("File loaded successfully after upgrading") |
654 |
del cfg |
655 |
|
656 |
if options.downgrade: |
657 |
action = "downgraded" |
658 |
out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) |
659 |
else: |
660 |
action = "upgraded" |
661 |
out_ver = constants.RELEASE_VERSION |
662 |
if all_ok: |
663 |
cli.ToStderr("Configuration successfully %s to version %s.", |
664 |
action, out_ver) |
665 |
else: |
666 |
cli.ToStderr("Configuration %s to version %s, but there are errors." |
667 |
"\nPlease review the file.", action, out_ver) |
668 |
|
669 |
|
670 |
if __name__ == "__main__": |
671 |
main() |