Revision 3ccb3a64
b/lib/bdev.py | ||
---|---|---|
1218 | 1218 |
defa = pyp.Literal("_is_default").suppress() |
1219 | 1219 |
dbl_quote = pyp.Literal('"').suppress() |
1220 | 1220 |
|
1221 |
keyword = pyp.Word(pyp.alphanums + '-')
|
|
1221 |
keyword = pyp.Word(pyp.alphanums + "-")
|
|
1222 | 1222 |
|
1223 | 1223 |
# value types |
1224 |
value = pyp.Word(pyp.alphanums + '_-/.:')
|
|
1224 |
value = pyp.Word(pyp.alphanums + "_-/.:")
|
|
1225 | 1225 |
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote |
1226 | 1226 |
ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() + |
1227 | 1227 |
pyp.Word(pyp.nums + ".") + colon + number) |
b/lib/bootstrap.py | ||
---|---|---|
666 | 666 |
(constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED, |
667 | 667 |
utils.ShellQuote(bind_address))) |
668 | 668 |
|
669 |
result = sshrunner.Run(node, 'root', mycommand, batch=False,
|
|
669 |
result = sshrunner.Run(node, "root", mycommand, batch=False,
|
|
670 | 670 |
ask_key=ssh_key_check, |
671 | 671 |
use_cluster_key=True, |
672 | 672 |
strict_host_check=ssh_key_check) |
... | ... | |
708 | 708 |
" as master candidates. Only these nodes" |
709 | 709 |
" can become masters. Current list of" |
710 | 710 |
" master candidates is:\n" |
711 |
"%s" % ('\n'.join(mc_no_master)),
|
|
711 |
"%s" % ("\n".join(mc_no_master)),
|
|
712 | 712 |
errors.ECODE_STATE) |
713 | 713 |
|
714 | 714 |
if not no_voting: |
b/lib/client/gnt_backup.py | ||
---|---|---|
120 | 120 |
|
121 | 121 |
|
122 | 122 |
commands = { |
123 |
'list': (
|
|
123 |
"list": (
|
|
124 | 124 |
PrintExportList, ARGS_NONE, |
125 | 125 |
[NODE_LIST_OPT], |
126 | 126 |
"", "Lists instance exports available in the ganeti cluster"), |
127 |
'export': (
|
|
127 |
"export": (
|
|
128 | 128 |
ExportInstance, ARGS_ONE_INSTANCE, |
129 | 129 |
[FORCE_OPT, SINGLE_NODE_OPT, NOSHUTDOWN_OPT, SHUTDOWN_TIMEOUT_OPT, |
130 | 130 |
REMOVE_INSTANCE_OPT, IGNORE_REMOVE_FAILURES_OPT, DRY_RUN_OPT, |
131 | 131 |
PRIORITY_OPT], |
132 | 132 |
"-n <target_node> [opts...] <name>", |
133 | 133 |
"Exports an instance to an image"), |
134 |
'import': (
|
|
134 |
"import": (
|
|
135 | 135 |
ImportInstance, ARGS_ONE_INSTANCE, COMMON_CREATE_OPTS + import_opts, |
136 | 136 |
"[...] -t disk-type -n node[:secondary-node] <name>", |
137 | 137 |
"Imports an instance from an exported image"), |
138 |
'remove': (
|
|
138 |
"remove": (
|
|
139 | 139 |
RemoveExport, [ArgUnknown(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT], |
140 | 140 |
"<name>", "Remove exports of named instance from the filesystem."), |
141 | 141 |
} |
b/lib/confd/__init__.py | ||
---|---|---|
34 | 34 |
"""Prepend the confd magic fourcc to a payload. |
35 | 35 |
|
36 | 36 |
""" |
37 |
return ''.join([constants.CONFD_MAGIC_FOURCC, payload])
|
|
37 |
return "".join([constants.CONFD_MAGIC_FOURCC, payload])
|
|
38 | 38 |
|
39 | 39 |
|
40 | 40 |
def UnpackMagic(payload): |
b/lib/confd/client.py | ||
---|---|---|
171 | 171 |
""" |
172 | 172 |
if now is None: |
173 | 173 |
now = time.time() |
174 |
tstamp = '%d' % now
|
|
174 |
tstamp = "%d" % now
|
|
175 | 175 |
req = serializer.DumpSignedJson(request.ToDict(), self._hmac_key, tstamp) |
176 | 176 |
return confd.PackMagic(req) |
177 | 177 |
|
b/lib/confd/querylib.py | ||
---|---|---|
63 | 63 |
|
64 | 64 |
""" |
65 | 65 |
status = constants.CONFD_REPL_STATUS_NOTIMPLEMENTED |
66 |
answer = 'not implemented'
|
|
66 |
answer = "not implemented"
|
|
67 | 67 |
return status, answer |
68 | 68 |
|
69 | 69 |
|
... | ... | |
80 | 80 |
""" |
81 | 81 |
if query is None: |
82 | 82 |
status = constants.CONFD_REPL_STATUS_OK |
83 |
answer = 'ok'
|
|
83 |
answer = "ok"
|
|
84 | 84 |
else: |
85 | 85 |
status = constants.CONFD_REPL_STATUS_ERROR |
86 |
answer = 'non-empty ping query'
|
|
86 |
answer = "non-empty ping query"
|
|
87 | 87 |
|
88 | 88 |
return status, answer |
89 | 89 |
|
b/lib/confd/server.py | ||
---|---|---|
92 | 92 |
|
93 | 93 |
""" |
94 | 94 |
if self.disabled: |
95 |
logging.debug('Confd is disabled. Ignoring query.')
|
|
95 |
logging.debug("Confd is disabled. Ignoring query.")
|
|
96 | 96 |
return |
97 | 97 |
try: |
98 | 98 |
request = self.ExtractRequest(payload_in) |
... | ... | |
100 | 100 |
payload_out = self.PackReply(reply, rsalt) |
101 | 101 |
return payload_out |
102 | 102 |
except errors.ConfdRequestError, err: |
103 |
logging.info('Ignoring broken query from %s:%d: %s', ip, port, err)
|
|
103 |
logging.info("Ignoring broken query from %s:%d: %s", ip, port, err)
|
|
104 | 104 |
return None |
105 | 105 |
|
106 | 106 |
def ExtractRequest(self, payload): |
... | ... | |
130 | 130 |
try: |
131 | 131 |
request = objects.ConfdRequest.FromDict(message) |
132 | 132 |
except AttributeError, err: |
133 |
raise errors.ConfdRequestError('%s' % err)
|
|
133 |
raise errors.ConfdRequestError(str(err))
|
|
134 | 134 |
|
135 | 135 |
return request |
136 | 136 |
|
b/lib/config.py | ||
---|---|---|
1839 | 1839 |
# Make sure the configuration has the right version |
1840 | 1840 |
_ValidateConfig(data) |
1841 | 1841 |
|
1842 |
if (not hasattr(data, 'cluster') or
|
|
1843 |
not hasattr(data.cluster, 'rsahostkeypub')):
|
|
1842 |
if (not hasattr(data, "cluster") or
|
|
1843 |
not hasattr(data.cluster, "rsahostkeypub")):
|
|
1844 | 1844 |
raise errors.ConfigurationError("Incomplete configuration" |
1845 | 1845 |
" (missing cluster.rsahostkeypub)") |
1846 | 1846 |
|
b/lib/daemon.py | ||
---|---|---|
382 | 382 |
|
383 | 383 |
""" |
384 | 384 |
if len(payload) > constants.MAX_UDP_DATA_SIZE: |
385 |
raise errors.UdpDataSizeError('Packet too big: %s > %s' % (len(payload),
|
|
385 |
raise errors.UdpDataSizeError("Packet too big: %s > %s" % (len(payload),
|
|
386 | 386 |
constants.MAX_UDP_DATA_SIZE)) |
387 | 387 |
self._out_queue.append((ip, port, payload)) |
388 | 388 |
|
b/lib/locking.py | ||
---|---|---|
1465 | 1465 |
} |
1466 | 1466 |
|
1467 | 1467 |
# Constant for the big ganeti lock |
1468 |
BGL = 'BGL'
|
|
1468 |
BGL = "BGL"
|
|
1469 | 1469 |
|
1470 | 1470 |
|
1471 | 1471 |
class GanetiLockManager: |
b/lib/ovf.py | ||
---|---|---|
104 | 104 |
|
105 | 105 |
# AllocationUnits values and conversion |
106 | 106 |
ALLOCATION_UNITS = { |
107 |
'b': ["bytes", "b"],
|
|
108 |
'kb': ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
|
|
109 |
'mb': ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
|
|
110 |
'gb': ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
|
|
107 |
"b": ["bytes", "b"],
|
|
108 |
"kb": ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
|
|
109 |
"mb": ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
|
|
110 |
"gb": ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
|
|
111 | 111 |
} |
112 | 112 |
CONVERT_UNITS_TO_MB = { |
113 |
'b': lambda x: x / (1024 * 1024),
|
|
114 |
'kb': lambda x: x / 1024,
|
|
115 |
'mb': lambda x: x,
|
|
116 |
'gb': lambda x: x * 1024,
|
|
113 |
"b": lambda x: x / (1024 * 1024),
|
|
114 |
"kb": lambda x: x / 1024,
|
|
115 |
"mb": lambda x: x,
|
|
116 |
"gb": lambda x: x * 1024,
|
|
117 | 117 |
} |
118 | 118 |
|
119 | 119 |
# Names of the config fields |
b/lib/rpc.py | ||
---|---|---|
657 | 657 |
if osp is not None: |
658 | 658 |
idict["osparams"].update(osp) |
659 | 659 |
for nic in idict["nics"]: |
660 |
nic['nicparams'] = objects.FillDict(
|
|
660 |
nic["nicparams"] = objects.FillDict(
|
|
661 | 661 |
cluster.nicparams[constants.PP_DEFAULT], |
662 |
nic['nicparams'])
|
|
662 |
nic["nicparams"])
|
|
663 | 663 |
return idict |
664 | 664 |
|
665 | 665 |
def _InstDictHvpBep(self, (instance, hvp, bep)): |
b/lib/utils/__init__.py | ||
---|---|---|
287 | 287 |
|
288 | 288 |
The user can be passed either as a string (denoting the name) or as |
289 | 289 |
an integer (denoting the user id). If the user is not found, the |
290 |
'default' argument is returned, which defaults to None.
|
|
290 |
C{default} argument is returned, which defaults to C{None}.
|
|
291 | 291 |
|
292 | 292 |
""" |
293 | 293 |
try: |
b/lib/watcher/state.py | ||
---|---|---|
116 | 116 |
fd = utils.WriteFile(filename, |
117 | 117 |
data=serialized_form, |
118 | 118 |
prewrite=utils.LockFile, close=False) |
119 |
self.statefile = os.fdopen(fd, 'w+')
|
|
119 |
self.statefile = os.fdopen(fd, "w+")
|
|
120 | 120 |
|
121 | 121 |
def Close(self): |
122 | 122 |
"""Unlock configuration file and close it. |
b/tools/cfgupgrade | ||
---|---|---|
97 | 97 |
|
98 | 98 |
# Option parsing |
99 | 99 |
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") |
100 |
parser.add_option('--dry-run', dest='dry_run',
|
|
100 |
parser.add_option("--dry-run", dest="dry_run",
|
|
101 | 101 |
action="store_true", |
102 | 102 |
help="Try to do the conversion, but don't write" |
103 | 103 |
" output file") |
... | ... | |
107 | 107 |
parser.add_option("--ignore-hostname", dest="ignore_hostname", |
108 | 108 |
action="store_true", default=False, |
109 | 109 |
help="Don't abort if hostname doesn't match") |
110 |
parser.add_option('--path', help="Convert configuration in this"
|
|
110 |
parser.add_option("--path", help="Convert configuration in this"
|
|
111 | 111 |
" directory instead of '%s'" % constants.DATA_DIR, |
112 | 112 |
default=constants.DATA_DIR, dest="data_dir") |
113 | 113 |
parser.add_option("--no-verify", |
b/tools/cfgupgrade12 | ||
---|---|---|
57 | 57 |
|
58 | 58 |
# Dictionary with instance old keys, and new hypervisor keys |
59 | 59 |
INST_HV_CHG = { |
60 |
'hvm_pae': constants.HV_PAE,
|
|
61 |
'vnc_bind_address': constants.HV_VNC_BIND_ADDRESS,
|
|
62 |
'initrd_path': constants.HV_INITRD_PATH,
|
|
63 |
'hvm_nic_type': constants.HV_NIC_TYPE,
|
|
64 |
'kernel_path': constants.HV_KERNEL_PATH,
|
|
65 |
'hvm_acpi': constants.HV_ACPI,
|
|
66 |
'hvm_cdrom_image_path': constants.HV_CDROM_IMAGE_PATH,
|
|
67 |
'hvm_boot_order': constants.HV_BOOT_ORDER,
|
|
68 |
'hvm_disk_type': constants.HV_DISK_TYPE,
|
|
60 |
"hvm_pae": constants.HV_PAE,
|
|
61 |
"vnc_bind_address": constants.HV_VNC_BIND_ADDRESS,
|
|
62 |
"initrd_path": constants.HV_INITRD_PATH,
|
|
63 |
"hvm_nic_type": constants.HV_NIC_TYPE,
|
|
64 |
"kernel_path": constants.HV_KERNEL_PATH,
|
|
65 |
"hvm_acpi": constants.HV_ACPI,
|
|
66 |
"hvm_cdrom_image_path": constants.HV_CDROM_IMAGE_PATH,
|
|
67 |
"hvm_boot_order": constants.HV_BOOT_ORDER,
|
|
68 |
"hvm_disk_type": constants.HV_DISK_TYPE,
|
|
69 | 69 |
} |
70 | 70 |
|
71 | 71 |
# Instance beparams changes |
72 | 72 |
INST_BE_CHG = { |
73 |
'vcpus': constants.BE_VCPUS,
|
|
74 |
'memory': constants.BE_MEMORY,
|
|
75 |
'auto_balance': constants.BE_AUTO_BALANCE,
|
|
73 |
"vcpus": constants.BE_VCPUS,
|
|
74 |
"memory": constants.BE_MEMORY,
|
|
75 |
"auto_balance": constants.BE_AUTO_BALANCE,
|
|
76 | 76 |
} |
77 | 77 |
|
78 | 78 |
# Field names |
79 |
F_SERIAL = 'serial_no'
|
|
79 |
F_SERIAL = "serial_no"
|
|
80 | 80 |
|
81 | 81 |
|
82 | 82 |
class Error(Exception): |
... | ... | |
97 | 97 |
""" |
98 | 98 |
logging.debug("Reading %s", file_name) |
99 | 99 |
try: |
100 |
fh = open(file_name, 'r')
|
|
100 |
fh = open(file_name, "r")
|
|
101 | 101 |
except IOError, err: |
102 | 102 |
if default is not NoDefault and err.errno == errno.ENOENT: |
103 | 103 |
return default |
... | ... | |
161 | 161 |
""" |
162 | 162 |
logging.info("Upgrading the cluster object") |
163 | 163 |
# Upgrade the configuration version |
164 |
if 'config_version' in cluster:
|
|
165 |
del cluster['config_version']
|
|
164 |
if "config_version" in cluster:
|
|
165 |
del cluster["config_version"]
|
|
166 | 166 |
|
167 | 167 |
# Add old ssconf keys back to config |
168 | 168 |
logging.info(" - importing ssconf keys") |
169 |
for key in ('master_node', 'master_ip', 'master_netdev', 'cluster_name'):
|
|
169 |
for key in ("master_node", "master_ip", "master_netdev", "cluster_name"):
|
|
170 | 170 |
if key not in cluster: |
171 | 171 |
cluster[key] = ReadFile(SsconfName(key)).strip() |
172 | 172 |
|
173 |
if 'default_hypervisor' not in cluster:
|
|
174 |
old_hyp = ReadFile(SsconfName('hypervisor')).strip()
|
|
173 |
if "default_hypervisor" not in cluster:
|
|
174 |
old_hyp = ReadFile(SsconfName("hypervisor")).strip()
|
|
175 | 175 |
if old_hyp == "xen-3.0": |
176 | 176 |
hyp = "xen-pvm" |
177 | 177 |
elif old_hyp == "xen-hvm-3.1": |
... | ... | |
182 | 182 |
raise Error("Unknown old hypervisor name '%s'" % old_hyp) |
183 | 183 |
|
184 | 184 |
logging.info("Setting the default and enabled hypervisor") |
185 |
cluster['default_hypervisor'] = hyp
|
|
186 |
cluster['enabled_hypervisors'] = [hyp]
|
|
185 |
cluster["default_hypervisor"] = hyp
|
|
186 |
cluster["enabled_hypervisors"] = [hyp]
|
|
187 | 187 |
|
188 | 188 |
# hv/be params |
189 |
if 'hvparams' not in cluster:
|
|
189 |
if "hvparams" not in cluster:
|
|
190 | 190 |
logging.info(" - adding hvparams") |
191 |
cluster['hvparams'] = constants.HVC_DEFAULTS
|
|
192 |
if 'beparams' not in cluster:
|
|
191 |
cluster["hvparams"] = constants.HVC_DEFAULTS
|
|
192 |
if "beparams" not in cluster:
|
|
193 | 193 |
logging.info(" - adding beparams") |
194 |
cluster['beparams'] = {constants.PP_DEFAULT: constants.BEC_DEFAULTS}
|
|
194 |
cluster["beparams"] = {constants.PP_DEFAULT: constants.BEC_DEFAULTS}
|
|
195 | 195 |
|
196 | 196 |
# file storage |
197 |
if 'file_storage_dir' not in cluster:
|
|
198 |
cluster['file_storage_dir'] = constants.DEFAULT_FILE_STORAGE_DIR
|
|
197 |
if "file_storage_dir" not in cluster:
|
|
198 |
cluster["file_storage_dir"] = constants.DEFAULT_FILE_STORAGE_DIR
|
|
199 | 199 |
|
200 | 200 |
# candidate pool size |
201 |
if 'candidate_pool_size' not in cluster:
|
|
202 |
cluster['candidate_pool_size'] = constants.MASTER_POOL_SIZE_DEFAULT
|
|
201 |
if "candidate_pool_size" not in cluster:
|
|
202 |
cluster["candidate_pool_size"] = constants.MASTER_POOL_SIZE_DEFAULT
|
|
203 | 203 |
|
204 | 204 |
|
205 | 205 |
def Node12To20(node): |
... | ... | |
209 | 209 |
logging.info("Upgrading node %s", node['name']) |
210 | 210 |
if F_SERIAL not in node: |
211 | 211 |
node[F_SERIAL] = 1 |
212 |
if 'master_candidate' not in node:
|
|
213 |
node['master_candidate'] = True
|
|
214 |
for key in 'offline', 'drained':
|
|
212 |
if "master_candidate" not in node:
|
|
213 |
node["master_candidate"] = True
|
|
214 |
for key in "offline", "drained":
|
|
215 | 215 |
if key not in node: |
216 | 216 |
node[key] = False |
217 | 217 |
|
... | ... | |
223 | 223 |
if F_SERIAL not in instance: |
224 | 224 |
instance[F_SERIAL] = 1 |
225 | 225 |
|
226 |
if 'hypervisor' not in instance:
|
|
227 |
instance['hypervisor'] = hypervisor
|
|
226 |
if "hypervisor" not in instance:
|
|
227 |
instance["hypervisor"] = hypervisor
|
|
228 | 228 |
|
229 | 229 |
# hvparams changes |
230 |
if 'hvparams' not in instance:
|
|
231 |
instance['hvparams'] = hvp = {}
|
|
230 |
if "hvparams" not in instance:
|
|
231 |
instance["hvparams"] = hvp = {}
|
|
232 | 232 |
for old, new in INST_HV_CHG.items(): |
233 | 233 |
if old in instance: |
234 | 234 |
if (instance[old] is not None and |
... | ... | |
238 | 238 |
del instance[old] |
239 | 239 |
|
240 | 240 |
# beparams changes |
241 |
if 'beparams' not in instance:
|
|
242 |
instance['beparams'] = bep = {}
|
|
241 |
if "beparams" not in instance:
|
|
242 |
instance["beparams"] = bep = {}
|
|
243 | 243 |
for old, new in INST_BE_CHG.items(): |
244 | 244 |
if old in instance: |
245 | 245 |
if instance[old] is not None: |
... | ... | |
247 | 247 |
del instance[old] |
248 | 248 |
|
249 | 249 |
# disk changes |
250 |
for disk in instance['disks']:
|
|
250 |
for disk in instance["disks"]:
|
|
251 | 251 |
Disk12To20(drbd_minors, secrets, disk) |
252 | 252 |
|
253 | 253 |
# other instance changes |
254 |
if 'status' in instance:
|
|
255 |
instance['admin_up'] = instance['status'] == 'up'
|
|
256 |
del instance['status']
|
|
254 |
if "status" in instance:
|
|
255 |
instance["admin_up"] = instance["status"] == "up"
|
|
256 |
del instance["status"]
|
|
257 | 257 |
|
258 | 258 |
|
259 | 259 |
def Disk12To20(drbd_minors, secrets, disk): |
260 | 260 |
"""Upgrades a disk from 1.2 to 2.0. |
261 | 261 |
|
262 | 262 |
""" |
263 |
if 'mode' not in disk:
|
|
264 |
disk['mode'] = constants.DISK_RDWR
|
|
265 |
if disk['dev_type'] == constants.LD_DRBD8:
|
|
266 |
old_lid = disk['logical_id']
|
|
263 |
if "mode" not in disk:
|
|
264 |
disk["mode"] = constants.DISK_RDWR
|
|
265 |
if disk["dev_type"] == constants.LD_DRBD8:
|
|
266 |
old_lid = disk["logical_id"]
|
|
267 | 267 |
for node in old_lid[:2]: |
268 | 268 |
if node not in drbd_minors: |
269 | 269 |
raise Error("Can't find node '%s' while upgrading disk" % node) |
... | ... | |
271 | 271 |
minor = drbd_minors[node] |
272 | 272 |
old_lid.append(minor) |
273 | 273 |
old_lid.append(GenerateSecret(secrets)) |
274 |
del disk['physical_id']
|
|
275 |
if disk['children']:
|
|
276 |
for child in disk['children']:
|
|
274 |
del disk["physical_id"]
|
|
275 |
if disk["children"]:
|
|
276 |
for child in disk["children"]:
|
|
277 | 277 |
Disk12To20(drbd_minors, secrets, child) |
278 | 278 |
|
279 | 279 |
|
... | ... | |
288 | 288 |
|
289 | 289 |
# Option parsing |
290 | 290 |
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") |
291 |
parser.add_option('--dry-run', dest='dry_run',
|
|
291 |
parser.add_option("--dry-run", dest="dry_run",
|
|
292 | 292 |
action="store_true", |
293 | 293 |
help="Try to do the conversion, but don't write" |
294 | 294 |
" output file") |
295 | 295 |
parser.add_option(cli.FORCE_OPT) |
296 | 296 |
parser.add_option(cli.DEBUG_OPT) |
297 | 297 |
parser.add_option(cli.VERBOSE_OPT) |
298 |
parser.add_option('--path', help="Convert configuration in this"
|
|
298 |
parser.add_option("--path", help="Convert configuration in this"
|
|
299 | 299 |
" directory instead of '%s'" % constants.DATA_DIR, |
300 | 300 |
default=constants.DATA_DIR, dest="data_dir") |
301 | 301 |
(options, args) = parser.parse_args() |
... | ... | |
327 | 327 |
raise Error(("%s does not seem to be a known Ganeti configuration" |
328 | 328 |
" directory") % options.data_dir) |
329 | 329 |
|
330 |
config_version = ReadFile(SsconfName('config_version'), "1.2").strip()
|
|
330 |
config_version = ReadFile(SsconfName("config_version"), "1.2").strip()
|
|
331 | 331 |
logging.info("Found configuration version %s", config_version) |
332 | 332 |
|
333 | 333 |
config_data = serializer.LoadJson(ReadFile(options.CONFIG_DATA_PATH)) |
... | ... | |
343 | 343 |
if old_config_version not in (3, ): |
344 | 344 |
raise Error("Unsupported configuration version: %s" % |
345 | 345 |
old_config_version) |
346 |
if 'version' not in config_data:
|
|
347 |
config_data['version'] = constants.BuildVersion(2, 0, 0)
|
|
346 |
if "version" not in config_data:
|
|
347 |
config_data["version"] = constants.BuildVersion(2, 0, 0)
|
|
348 | 348 |
if F_SERIAL not in config_data: |
349 | 349 |
config_data[F_SERIAL] = 1 |
350 | 350 |
|
... | ... | |
361 | 361 |
" instances using remote_raid1 disk template") |
362 | 362 |
|
363 | 363 |
# Build content of new known_hosts file |
364 |
cluster_name = ReadFile(SsconfName('cluster_name')).rstrip()
|
|
365 |
cluster_key = cluster['rsahostkeypub']
|
|
364 |
cluster_name = ReadFile(SsconfName("cluster_name")).rstrip()
|
|
365 |
cluster_key = cluster["rsahostkeypub"]
|
|
366 | 366 |
known_hosts = "%s ssh-rsa %s\n" % (cluster_name, cluster_key) |
367 | 367 |
|
368 | 368 |
Cluster12To20(cluster) |
... | ... | |
370 | 370 |
# Add node attributes |
371 | 371 |
logging.info("Upgrading nodes") |
372 | 372 |
# stable-sort the names to have repeatable runs |
373 |
for node_name in utils.NiceSort(config_data['nodes'].keys()):
|
|
374 |
Node12To20(config_data['nodes'][node_name])
|
|
373 |
for node_name in utils.NiceSort(config_data["nodes"].keys()):
|
|
374 |
Node12To20(config_data["nodes"][node_name])
|
|
375 | 375 |
|
376 | 376 |
# Instance changes |
377 | 377 |
logging.info("Upgrading instances") |
378 |
drbd_minors = dict.fromkeys(config_data['nodes'], 0)
|
|
378 |
drbd_minors = dict.fromkeys(config_data["nodes"], 0)
|
|
379 | 379 |
secrets = set() |
380 | 380 |
# stable-sort the names to have repeatable runs |
381 |
for instance_name in utils.NiceSort(config_data['instances'].keys()):
|
|
382 |
Instance12To20(drbd_minors, secrets, cluster['default_hypervisor'],
|
|
383 |
config_data['instances'][instance_name])
|
|
381 |
for instance_name in utils.NiceSort(config_data["instances"].keys()):
|
|
382 |
Instance12To20(drbd_minors, secrets, cluster["default_hypervisor"],
|
|
383 |
config_data["instances"][instance_name])
|
|
384 | 384 |
|
385 | 385 |
else: |
386 | 386 |
logging.info("Found a Ganeti 2.0 configuration") |
b/tools/cluster-merge | ||
---|---|---|
197 | 197 |
raise errors.RemoteError("Unable to retrieve list of nodes from %s." |
198 | 198 |
" Fail reason: %s; output: %s" % |
199 | 199 |
(cluster, result.fail_reason, result.output)) |
200 |
nodes_statuses = [line.split(',') for line in result.stdout.splitlines()]
|
|
200 |
nodes_statuses = [line.split(",") for line in result.stdout.splitlines()]
|
|
201 | 201 |
nodes = [node_status[0] for node_status in nodes_statuses |
202 | 202 |
if node_status[1] == "N"] |
203 | 203 |
|
b/tools/lvmstrap | ||
---|---|---|
208 | 208 |
Currently only md devices are used as is. |
209 | 209 |
|
210 | 210 |
""" |
211 |
return not (disk.startswith('md') or PART_RE.match(disk))
|
|
211 |
return not (disk.startswith("md") or PART_RE.match(disk))
|
|
212 | 212 |
|
213 | 213 |
|
214 | 214 |
def DeviceName(disk): |
... | ... | |
219 | 219 |
|
220 | 220 |
""" |
221 | 221 |
if IsPartitioned(disk): |
222 |
device = '/dev/%s1' % disk
|
|
222 |
device = "/dev/%s1" % disk
|
|
223 | 223 |
else: |
224 |
device = '/dev/%s' % disk
|
|
224 |
device = "/dev/%s" % disk
|
|
225 | 225 |
return device |
226 | 226 |
|
227 | 227 |
|
... | ... | |
268 | 268 |
raise PrereqError("This tool runs as root only. Really.") |
269 | 269 |
|
270 | 270 |
osname, _, release, _, _ = os.uname() |
271 |
if osname != 'Linux':
|
|
271 |
if osname != "Linux":
|
|
272 | 272 |
raise PrereqError("This tool only runs on Linux" |
273 | 273 |
" (detected OS: %s)." % osname) |
274 | 274 |
|
Also available in: Unified diff