root / qa / qa_cluster.py @ b3f3aa3d
History | View | Annotate | Download (27.7 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Cluster related QA tests.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import re |
27 |
import tempfile |
28 |
import os.path |
29 |
|
30 |
from ganeti import constants |
31 |
from ganeti import compat |
32 |
from ganeti import utils |
33 |
from ganeti import pathutils |
34 |
|
35 |
import qa_config |
36 |
import qa_utils |
37 |
import qa_error |
38 |
|
39 |
from qa_utils import AssertEqual, AssertCommand, GetCommandOutput |
40 |
|
41 |
|
42 |
# Prefix for LVM volumes created by QA code during tests
|
43 |
_QA_LV_PREFIX = "qa-"
|
44 |
|
45 |
#: cluster verify command
|
46 |
_CLUSTER_VERIFY = ["gnt-cluster", "verify"] |
47 |
|
48 |
|
49 |
def _RemoveFileFromAllNodes(filename): |
50 |
"""Removes a file from all nodes.
|
51 |
|
52 |
"""
|
53 |
for node in qa_config.get("nodes"): |
54 |
AssertCommand(["rm", "-f", filename], node=node) |
55 |
|
56 |
|
57 |
def _CheckFileOnAllNodes(filename, content): |
58 |
"""Verifies the content of the given file on all nodes.
|
59 |
|
60 |
"""
|
61 |
cmd = utils.ShellQuoteArgs(["cat", filename])
|
62 |
for node in qa_config.get("nodes"): |
63 |
AssertEqual(qa_utils.GetCommandOutput(node["primary"], cmd), content)
|
64 |
|
65 |
|
66 |
# "gnt-cluster info" fields
|
67 |
_CIFIELD_RE = re.compile(r"^[-\s]*(?P<field>[^\s:]+):\s*(?P<value>\S.*)$")
|
68 |
|
69 |
|
70 |
def _GetBoolClusterField(field): |
71 |
"""Get the Boolean value of a cluster field.
|
72 |
|
73 |
This function currently assumes that the field name is unique in the cluster
|
74 |
configuration. An assertion checks this assumption.
|
75 |
|
76 |
@type field: string
|
77 |
@param field: Name of the field
|
78 |
@rtype: bool
|
79 |
@return: The effective value of the field
|
80 |
|
81 |
"""
|
82 |
master = qa_config.GetMasterNode() |
83 |
infocmd = "gnt-cluster info"
|
84 |
info_out = qa_utils.GetCommandOutput(master["primary"], infocmd)
|
85 |
ret = None
|
86 |
for l in info_out.splitlines(): |
87 |
m = _CIFIELD_RE.match(l) |
88 |
# FIXME: There should be a way to specify a field through a hierarchy
|
89 |
if m and m.group("field") == field: |
90 |
# Make sure that ignoring the hierarchy doesn't cause a double match
|
91 |
assert ret is None |
92 |
ret = (m.group("value").lower() == "true") |
93 |
if ret is not None: |
94 |
return ret
|
95 |
raise qa_error.Error("Field not found in cluster configuration: %s" % field) |
96 |
|
97 |
|
98 |
# Cluster-verify errors (date, "ERROR", then error code)
|
99 |
_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- ERROR:([A-Z0-9_-]+):")
|
100 |
|
101 |
|
102 |
def _GetCVErrorCodes(cvout): |
103 |
ret = set()
|
104 |
for l in cvout.splitlines(): |
105 |
m = _CVERROR_RE.match(l) |
106 |
if m:
|
107 |
ecode = m.group(1)
|
108 |
ret.add(ecode) |
109 |
return ret
|
110 |
|
111 |
|
112 |
def AssertClusterVerify(fail=False, errors=None): |
113 |
"""Run cluster-verify and check the result
|
114 |
|
115 |
@type fail: bool
|
116 |
@param fail: if cluster-verify is expected to fail instead of succeeding
|
117 |
@type errors: list of tuples
|
118 |
@param errors: List of CV_XXX errors that are expected; if specified, all the
|
119 |
errors listed must appear in cluster-verify output. A non-empty value
|
120 |
implies C{fail=True}.
|
121 |
|
122 |
"""
|
123 |
cvcmd = "gnt-cluster verify"
|
124 |
mnode = qa_config.GetMasterNode() |
125 |
if errors:
|
126 |
cvout = GetCommandOutput(mnode["primary"], cvcmd + " --error-codes", |
127 |
fail=True)
|
128 |
actual = _GetCVErrorCodes(cvout) |
129 |
expected = compat.UniqueFrozenset(e for (_, e, _) in errors) |
130 |
if not actual.issuperset(expected): |
131 |
missing = expected.difference(actual) |
132 |
raise qa_error.Error("Cluster-verify didn't return these expected" |
133 |
" errors: %s" % utils.CommaJoin(missing))
|
134 |
else:
|
135 |
AssertCommand(cvcmd, fail=fail, node=mnode) |
136 |
|
137 |
|
138 |
# data for testing failures due to bad keys/values for disk parameters
|
139 |
_FAIL_PARAMS = ["nonexistent:resync-rate=1",
|
140 |
"drbd:nonexistent=1",
|
141 |
"drbd:resync-rate=invalid",
|
142 |
] |
143 |
|
144 |
|
145 |
def TestClusterInitDisk(): |
146 |
"""gnt-cluster init -D"""
|
147 |
name = qa_config.get("name")
|
148 |
for param in _FAIL_PARAMS: |
149 |
AssertCommand(["gnt-cluster", "init", "-D", param, name], fail=True) |
150 |
|
151 |
|
152 |
def TestClusterInit(rapi_user, rapi_secret): |
153 |
"""gnt-cluster init"""
|
154 |
master = qa_config.GetMasterNode() |
155 |
|
156 |
rapi_dir = os.path.dirname(pathutils.RAPI_USERS_FILE) |
157 |
|
158 |
# First create the RAPI credentials
|
159 |
fh = tempfile.NamedTemporaryFile() |
160 |
try:
|
161 |
fh.write("%s %s write\n" % (rapi_user, rapi_secret))
|
162 |
fh.flush() |
163 |
|
164 |
tmpru = qa_utils.UploadFile(master["primary"], fh.name)
|
165 |
try:
|
166 |
AssertCommand(["mkdir", "-p", rapi_dir]) |
167 |
AssertCommand(["mv", tmpru, pathutils.RAPI_USERS_FILE])
|
168 |
finally:
|
169 |
AssertCommand(["rm", "-f", tmpru]) |
170 |
finally:
|
171 |
fh.close() |
172 |
|
173 |
# Initialize cluster
|
174 |
cmd = [ |
175 |
"gnt-cluster", "init", |
176 |
"--primary-ip-version=%d" % qa_config.get("primary_ip_version", 4), |
177 |
"--enabled-hypervisors=%s" % ",".join(qa_config.GetEnabledHypervisors()), |
178 |
] |
179 |
|
180 |
for spec_type in ("mem-size", "disk-size", "disk-count", "cpu-count", |
181 |
"nic-count"):
|
182 |
for spec_val in ("min", "max", "std"): |
183 |
spec = qa_config.get("ispec_%s_%s" %
|
184 |
(spec_type.replace("-", "_"), spec_val), None) |
185 |
if spec:
|
186 |
cmd.append("--specs-%s=%s=%d" % (spec_type, spec_val, spec))
|
187 |
|
188 |
if master.get("secondary", None): |
189 |
cmd.append("--secondary-ip=%s" % master["secondary"]) |
190 |
|
191 |
vgname = qa_config.get("vg-name", None) |
192 |
if vgname:
|
193 |
cmd.append("--vg-name=%s" % vgname)
|
194 |
|
195 |
master_netdev = qa_config.get("master-netdev", None) |
196 |
if master_netdev:
|
197 |
cmd.append("--master-netdev=%s" % master_netdev)
|
198 |
|
199 |
nicparams = qa_config.get("default-nicparams", None) |
200 |
if nicparams:
|
201 |
cmd.append("--nic-parameters=%s" %
|
202 |
",".join(utils.FormatKeyValue(nicparams)))
|
203 |
|
204 |
# Cluster value of the exclusive-storage node parameter
|
205 |
e_s = qa_config.get("exclusive-storage")
|
206 |
if e_s is not None: |
207 |
cmd.extend(["--node-parameters", "exclusive_storage=%s" % e_s]) |
208 |
else:
|
209 |
e_s = False
|
210 |
qa_config.SetExclusiveStorage(e_s) |
211 |
|
212 |
extra_args = qa_config.get("cluster-init-args")
|
213 |
if extra_args:
|
214 |
cmd.extend(extra_args) |
215 |
|
216 |
cmd.append(qa_config.get("name"))
|
217 |
|
218 |
AssertCommand(cmd) |
219 |
|
220 |
cmd = ["gnt-cluster", "modify"] |
221 |
|
222 |
# hypervisor parameter modifications
|
223 |
hvp = qa_config.get("hypervisor-parameters", {})
|
224 |
for k, v in hvp.items(): |
225 |
cmd.extend(["-H", "%s:%s" % (k, v)]) |
226 |
# backend parameter modifications
|
227 |
bep = qa_config.get("backend-parameters", "") |
228 |
if bep:
|
229 |
cmd.extend(["-B", bep])
|
230 |
|
231 |
if len(cmd) > 2: |
232 |
AssertCommand(cmd) |
233 |
|
234 |
# OS parameters
|
235 |
osp = qa_config.get("os-parameters", {})
|
236 |
for k, v in osp.items(): |
237 |
AssertCommand(["gnt-os", "modify", "-O", v, k]) |
238 |
|
239 |
# OS hypervisor parameters
|
240 |
os_hvp = qa_config.get("os-hvp", {})
|
241 |
for os_name in os_hvp: |
242 |
for hv, hvp in os_hvp[os_name].items(): |
243 |
AssertCommand(["gnt-os", "modify", "-H", "%s:%s" % (hv, hvp), os_name]) |
244 |
|
245 |
|
246 |
def TestClusterRename(): |
247 |
"""gnt-cluster rename"""
|
248 |
cmd = ["gnt-cluster", "rename", "-f"] |
249 |
|
250 |
original_name = qa_config.get("name")
|
251 |
rename_target = qa_config.get("rename", None) |
252 |
if rename_target is None: |
253 |
print qa_utils.FormatError('"rename" entry is missing') |
254 |
return
|
255 |
|
256 |
for data in [ |
257 |
cmd + [rename_target], |
258 |
_CLUSTER_VERIFY, |
259 |
cmd + [original_name], |
260 |
_CLUSTER_VERIFY, |
261 |
]: |
262 |
AssertCommand(data) |
263 |
|
264 |
|
265 |
def TestClusterOob(): |
266 |
"""out-of-band framework"""
|
267 |
oob_path_exists = "/tmp/ganeti-qa-oob-does-exist-%s" % utils.NewUUID()
|
268 |
|
269 |
AssertCommand(_CLUSTER_VERIFY) |
270 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
271 |
"oob_program=/tmp/ganeti-qa-oob-does-not-exist-%s" %
|
272 |
utils.NewUUID()]) |
273 |
|
274 |
AssertCommand(_CLUSTER_VERIFY, fail=True)
|
275 |
|
276 |
AssertCommand(["touch", oob_path_exists])
|
277 |
AssertCommand(["chmod", "0400", oob_path_exists]) |
278 |
AssertCommand(["gnt-cluster", "copyfile", oob_path_exists]) |
279 |
|
280 |
try:
|
281 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
282 |
"oob_program=%s" % oob_path_exists])
|
283 |
|
284 |
AssertCommand(_CLUSTER_VERIFY, fail=True)
|
285 |
|
286 |
AssertCommand(["chmod", "0500", oob_path_exists]) |
287 |
AssertCommand(["gnt-cluster", "copyfile", oob_path_exists]) |
288 |
|
289 |
AssertCommand(_CLUSTER_VERIFY) |
290 |
finally:
|
291 |
AssertCommand(["gnt-cluster", "command", "rm", oob_path_exists]) |
292 |
|
293 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
294 |
"oob_program="])
|
295 |
|
296 |
|
297 |
def TestClusterEpo(): |
298 |
"""gnt-cluster epo"""
|
299 |
master = qa_config.GetMasterNode() |
300 |
|
301 |
# Assert that OOB is unavailable for all nodes
|
302 |
result_output = GetCommandOutput(master["primary"],
|
303 |
"gnt-node list --verbose --no-headers -o"
|
304 |
" powered")
|
305 |
AssertEqual(compat.all(powered == "(unavail)"
|
306 |
for powered in result_output.splitlines()), True) |
307 |
|
308 |
# Conflicting
|
309 |
AssertCommand(["gnt-cluster", "epo", "--groups", "--all"], fail=True) |
310 |
# --all doesn't expect arguments
|
311 |
AssertCommand(["gnt-cluster", "epo", "--all", "some_arg"], fail=True) |
312 |
|
313 |
# Unless --all is given master is not allowed to be in the list
|
314 |
AssertCommand(["gnt-cluster", "epo", "-f", master["primary"]], fail=True) |
315 |
|
316 |
# This shouldn't fail
|
317 |
AssertCommand(["gnt-cluster", "epo", "-f", "--all"]) |
318 |
|
319 |
# All instances should have been stopped now
|
320 |
result_output = GetCommandOutput(master["primary"],
|
321 |
"gnt-instance list --no-headers -o status")
|
322 |
# ERROR_down because the instance is stopped but not recorded as such
|
323 |
AssertEqual(compat.all(status == "ERROR_down"
|
324 |
for status in result_output.splitlines()), True) |
325 |
|
326 |
# Now start everything again
|
327 |
AssertCommand(["gnt-cluster", "epo", "--on", "-f", "--all"]) |
328 |
|
329 |
# All instances should have been started now
|
330 |
result_output = GetCommandOutput(master["primary"],
|
331 |
"gnt-instance list --no-headers -o status")
|
332 |
AssertEqual(compat.all(status == "running"
|
333 |
for status in result_output.splitlines()), True) |
334 |
|
335 |
|
336 |
def TestClusterVerify(): |
337 |
"""gnt-cluster verify"""
|
338 |
AssertCommand(_CLUSTER_VERIFY) |
339 |
AssertCommand(["gnt-cluster", "verify-disks"]) |
340 |
|
341 |
|
342 |
def TestJobqueue(): |
343 |
"""gnt-debug test-jobqueue"""
|
344 |
AssertCommand(["gnt-debug", "test-jobqueue"]) |
345 |
|
346 |
|
347 |
def TestDelay(node): |
348 |
"""gnt-debug delay"""
|
349 |
AssertCommand(["gnt-debug", "delay", "1"]) |
350 |
AssertCommand(["gnt-debug", "delay", "--no-master", "1"]) |
351 |
AssertCommand(["gnt-debug", "delay", "--no-master", |
352 |
"-n", node["primary"], "1"]) |
353 |
|
354 |
|
355 |
def TestClusterReservedLvs(): |
356 |
"""gnt-cluster reserved lvs"""
|
357 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
358 |
lvname = _QA_LV_PREFIX + "test"
|
359 |
lvfullname = "/".join([vgname, lvname])
|
360 |
for fail, cmd in [ |
361 |
(False, _CLUSTER_VERIFY),
|
362 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), |
363 |
(False, ["lvcreate", "-L1G", "-n", lvname, vgname]), |
364 |
(True, _CLUSTER_VERIFY),
|
365 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", |
366 |
"%s,.*/other-test" % lvfullname]),
|
367 |
(False, _CLUSTER_VERIFY),
|
368 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", |
369 |
".*/%s.*" % _QA_LV_PREFIX]),
|
370 |
(False, _CLUSTER_VERIFY),
|
371 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), |
372 |
(True, _CLUSTER_VERIFY),
|
373 |
(False, ["lvremove", "-f", lvfullname]), |
374 |
(False, _CLUSTER_VERIFY),
|
375 |
]: |
376 |
AssertCommand(cmd, fail=fail) |
377 |
|
378 |
|
379 |
def TestClusterModifyEmpty(): |
380 |
"""gnt-cluster modify"""
|
381 |
AssertCommand(["gnt-cluster", "modify"], fail=True) |
382 |
|
383 |
|
384 |
def TestClusterModifyDisk(): |
385 |
"""gnt-cluster modify -D"""
|
386 |
for param in _FAIL_PARAMS: |
387 |
AssertCommand(["gnt-cluster", "modify", "-D", param], fail=True) |
388 |
|
389 |
|
390 |
def TestClusterModifyBe(): |
391 |
"""gnt-cluster modify -B"""
|
392 |
for fail, cmd in [ |
393 |
# max/min mem
|
394 |
(False, ["gnt-cluster", "modify", "-B", "maxmem=256"]), |
395 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), |
396 |
(False, ["gnt-cluster", "modify", "-B", "minmem=256"]), |
397 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), |
398 |
(True, ["gnt-cluster", "modify", "-B", "maxmem=a"]), |
399 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), |
400 |
(True, ["gnt-cluster", "modify", "-B", "minmem=a"]), |
401 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), |
402 |
(False, ["gnt-cluster", "modify", "-B", "maxmem=128,minmem=128"]), |
403 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 128$'"]), |
404 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 128$'"]), |
405 |
# vcpus
|
406 |
(False, ["gnt-cluster", "modify", "-B", "vcpus=4"]), |
407 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 4$'"]), |
408 |
(True, ["gnt-cluster", "modify", "-B", "vcpus=a"]), |
409 |
(False, ["gnt-cluster", "modify", "-B", "vcpus=1"]), |
410 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 1$'"]), |
411 |
# auto_balance
|
412 |
(False, ["gnt-cluster", "modify", "-B", "auto_balance=False"]), |
413 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: False$'"]), |
414 |
(True, ["gnt-cluster", "modify", "-B", "auto_balance=1"]), |
415 |
(False, ["gnt-cluster", "modify", "-B", "auto_balance=True"]), |
416 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: True$'"]), |
417 |
]: |
418 |
AssertCommand(cmd, fail=fail) |
419 |
|
420 |
# redo the original-requested BE parameters, if any
|
421 |
bep = qa_config.get("backend-parameters", "") |
422 |
if bep:
|
423 |
AssertCommand(["gnt-cluster", "modify", "-B", bep]) |
424 |
|
425 |
|
426 |
_START_IPOLICY_RE = re.compile(r"^(\s*)Instance policy")
|
427 |
_START_ISPEC_RE = re.compile(r"^\s+-\s+(std|min|max)")
|
428 |
_VALUE_RE = r"([^\s:][^:]*):\s+(\S.*)$"
|
429 |
_IPOLICY_PARAM_RE = re.compile(r"^\s+-\s+" + _VALUE_RE)
|
430 |
_ISPEC_VALUE_RE = re.compile(r"^\s+" + _VALUE_RE)
|
431 |
|
432 |
|
433 |
def _GetClusterIPolicy(): |
434 |
"""Return the run-time values of the cluster-level instance policy.
|
435 |
|
436 |
@rtype: tuple
|
437 |
@return: (policy, specs), where:
|
438 |
- policy is a dictionary of the policy values, instance specs excluded
|
439 |
- specs is dict of dict, specs[par][key] is a spec value, where key is
|
440 |
"min", "max", or "std"
|
441 |
|
442 |
"""
|
443 |
mnode = qa_config.GetMasterNode() |
444 |
info = GetCommandOutput(mnode["primary"], "gnt-cluster info") |
445 |
inside_policy = False
|
446 |
end_ispec_re = None
|
447 |
curr_spec = ""
|
448 |
specs = {} |
449 |
policy = {} |
450 |
for line in info.splitlines(): |
451 |
if inside_policy:
|
452 |
# The order of the matching is important, as some REs overlap
|
453 |
m = _START_ISPEC_RE.match(line) |
454 |
if m:
|
455 |
curr_spec = m.group(1)
|
456 |
continue
|
457 |
m = _IPOLICY_PARAM_RE.match(line) |
458 |
if m:
|
459 |
policy[m.group(1)] = m.group(2).strip() |
460 |
continue
|
461 |
m = _ISPEC_VALUE_RE.match(line) |
462 |
if m:
|
463 |
assert curr_spec
|
464 |
par = m.group(1)
|
465 |
if par == "memory-size": |
466 |
par = "mem-size"
|
467 |
d = specs.setdefault(par, {}) |
468 |
d[curr_spec] = m.group(2).strip()
|
469 |
continue
|
470 |
assert end_ispec_re is not None |
471 |
if end_ispec_re.match(line):
|
472 |
inside_policy = False
|
473 |
else:
|
474 |
m = _START_IPOLICY_RE.match(line) |
475 |
if m:
|
476 |
inside_policy = True
|
477 |
# We stop parsing when we find the same indentation level
|
478 |
re_str = r"^\s{%s}\S" % len(m.group(1)) |
479 |
end_ispec_re = re.compile(re_str) |
480 |
# Sanity checks
|
481 |
assert len(specs) > 0 |
482 |
good = ("min" in d and "std" in d and "max" in d for d in specs) |
483 |
assert good, "Missing item in specs: %s" % specs |
484 |
assert len(policy) > 0 |
485 |
return (policy, specs)
|
486 |
|
487 |
|
488 |
def TestClusterModifyIPolicy(): |
489 |
"""gnt-cluster modify --ipolicy-*"""
|
490 |
basecmd = ["gnt-cluster", "modify"] |
491 |
(old_policy, old_specs) = _GetClusterIPolicy() |
492 |
for par in ["vcpu-ratio", "spindle-ratio"]: |
493 |
curr_val = float(old_policy[par])
|
494 |
test_values = [ |
495 |
(True, 1.0), |
496 |
(True, 1.5), |
497 |
(True, 2), |
498 |
(False, "a"), |
499 |
# Restore the old value
|
500 |
(True, curr_val),
|
501 |
] |
502 |
for (good, val) in test_values: |
503 |
cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
|
504 |
AssertCommand(cmd, fail=not good)
|
505 |
if good:
|
506 |
curr_val = val |
507 |
# Check the affected parameter
|
508 |
(eff_policy, eff_specs) = _GetClusterIPolicy() |
509 |
AssertEqual(float(eff_policy[par]), curr_val)
|
510 |
# Check everything else
|
511 |
AssertEqual(eff_specs, old_specs) |
512 |
for p in eff_policy.keys(): |
513 |
if p == par:
|
514 |
continue
|
515 |
AssertEqual(eff_policy[p], old_policy[p]) |
516 |
|
517 |
# Disk templates are treated slightly differently
|
518 |
par = "disk-templates"
|
519 |
disp_str = "enabled disk templates"
|
520 |
curr_val = old_policy[disp_str] |
521 |
test_values = [ |
522 |
(True, constants.DT_PLAIN),
|
523 |
(True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)), |
524 |
(False, "thisisnotadisktemplate"), |
525 |
(False, ""), |
526 |
# Restore the old value
|
527 |
(True, curr_val.replace(" ", "")), |
528 |
] |
529 |
for (good, val) in test_values: |
530 |
cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
|
531 |
AssertCommand(cmd, fail=not good)
|
532 |
if good:
|
533 |
curr_val = val |
534 |
# Check the affected parameter
|
535 |
(eff_policy, eff_specs) = _GetClusterIPolicy() |
536 |
AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val) |
537 |
# Check everything else
|
538 |
AssertEqual(eff_specs, old_specs) |
539 |
for p in eff_policy.keys(): |
540 |
if p == disp_str:
|
541 |
continue
|
542 |
AssertEqual(eff_policy[p], old_policy[p]) |
543 |
|
544 |
|
545 |
def TestClusterSetISpecs(new_specs, fail=False, old_values=None): |
546 |
"""Change instance specs.
|
547 |
|
548 |
@type new_specs: dict of dict
|
549 |
@param new_specs: new_specs[par][key], where key is "min", "max", "std". It
|
550 |
can be an empty dictionary.
|
551 |
@type fail: bool
|
552 |
@param fail: if the change is expected to fail
|
553 |
@type old_values: tuple
|
554 |
@param old_values: (old_policy, old_specs), as returned by
|
555 |
L{_GetClusterIPolicy}
|
556 |
@return: same as L{_GetClusterIPolicy}
|
557 |
|
558 |
"""
|
559 |
if old_values:
|
560 |
(old_policy, old_specs) = old_values |
561 |
else:
|
562 |
(old_policy, old_specs) = _GetClusterIPolicy() |
563 |
if new_specs:
|
564 |
cmd = ["gnt-cluster", "modify"] |
565 |
for (par, keyvals) in new_specs.items(): |
566 |
if par == "spindle-use": |
567 |
# ignore spindle-use, which is not settable
|
568 |
continue
|
569 |
cmd += [ |
570 |
"--specs-%s" % par,
|
571 |
",".join(["%s=%s" % (k, v) for (k, v) in keyvals.items()]), |
572 |
] |
573 |
AssertCommand(cmd, fail=fail) |
574 |
# Check the new state
|
575 |
(eff_policy, eff_specs) = _GetClusterIPolicy() |
576 |
AssertEqual(eff_policy, old_policy) |
577 |
if fail:
|
578 |
AssertEqual(eff_specs, old_specs) |
579 |
else:
|
580 |
for par in eff_specs: |
581 |
for key in eff_specs[par]: |
582 |
if par in new_specs and key in new_specs[par]: |
583 |
AssertEqual(int(eff_specs[par][key]), int(new_specs[par][key])) |
584 |
else:
|
585 |
AssertEqual(int(eff_specs[par][key]), int(old_specs[par][key])) |
586 |
return (eff_policy, eff_specs)
|
587 |
|
588 |
|
589 |
def TestClusterModifyISpecs(): |
590 |
"""gnt-cluster modify --specs-*"""
|
591 |
params = ["mem-size", "disk-size", "disk-count", "cpu-count", "nic-count"] |
592 |
(cur_policy, cur_specs) = _GetClusterIPolicy() |
593 |
for par in params: |
594 |
test_values = [ |
595 |
(True, 0, 4, 12), |
596 |
(True, 4, 4, 12), |
597 |
(True, 4, 12, 12), |
598 |
(True, 4, 4, 4), |
599 |
(False, 4, 0, 12), |
600 |
(False, 4, 16, 12), |
601 |
(False, 4, 4, 0), |
602 |
(False, 12, 4, 4), |
603 |
(False, 12, 4, 0), |
604 |
(False, "a", 4, 12), |
605 |
(False, 0, "a", 12), |
606 |
(False, 0, 4, "a"), |
607 |
# This is to restore the old values
|
608 |
(True,
|
609 |
cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"]) |
610 |
] |
611 |
for (good, mn, st, mx) in test_values: |
612 |
new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}} |
613 |
cur_state = (cur_policy, cur_specs) |
614 |
# We update cur_specs, as we've copied the values to restore already
|
615 |
(cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good,
|
616 |
old_values=cur_state) |
617 |
|
618 |
|
619 |
def TestClusterInfo(): |
620 |
"""gnt-cluster info"""
|
621 |
AssertCommand(["gnt-cluster", "info"]) |
622 |
|
623 |
|
624 |
def TestClusterRedistConf(): |
625 |
"""gnt-cluster redist-conf"""
|
626 |
AssertCommand(["gnt-cluster", "redist-conf"]) |
627 |
|
628 |
|
629 |
def TestClusterGetmaster(): |
630 |
"""gnt-cluster getmaster"""
|
631 |
AssertCommand(["gnt-cluster", "getmaster"]) |
632 |
|
633 |
|
634 |
def TestClusterVersion(): |
635 |
"""gnt-cluster version"""
|
636 |
AssertCommand(["gnt-cluster", "version"]) |
637 |
|
638 |
|
639 |
def TestClusterRenewCrypto(): |
640 |
"""gnt-cluster renew-crypto"""
|
641 |
master = qa_config.GetMasterNode() |
642 |
|
643 |
# Conflicting options
|
644 |
cmd = ["gnt-cluster", "renew-crypto", "--force", |
645 |
"--new-cluster-certificate", "--new-confd-hmac-key"] |
646 |
conflicting = [ |
647 |
["--new-rapi-certificate", "--rapi-certificate=/dev/null"], |
648 |
["--new-cluster-domain-secret", "--cluster-domain-secret=/dev/null"], |
649 |
] |
650 |
for i in conflicting: |
651 |
AssertCommand(cmd + i, fail=True)
|
652 |
|
653 |
# Invalid RAPI certificate
|
654 |
cmd = ["gnt-cluster", "renew-crypto", "--force", |
655 |
"--rapi-certificate=/dev/null"]
|
656 |
AssertCommand(cmd, fail=True)
|
657 |
|
658 |
rapi_cert_backup = qa_utils.BackupFile(master["primary"],
|
659 |
pathutils.RAPI_CERT_FILE) |
660 |
try:
|
661 |
# Custom RAPI certificate
|
662 |
fh = tempfile.NamedTemporaryFile() |
663 |
|
664 |
# Ensure certificate doesn't cause "gnt-cluster verify" to complain
|
665 |
validity = constants.SSL_CERT_EXPIRATION_WARN * 3
|
666 |
|
667 |
utils.GenerateSelfSignedSslCert(fh.name, validity=validity) |
668 |
|
669 |
tmpcert = qa_utils.UploadFile(master["primary"], fh.name)
|
670 |
try:
|
671 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
672 |
"--rapi-certificate=%s" % tmpcert])
|
673 |
finally:
|
674 |
AssertCommand(["rm", "-f", tmpcert]) |
675 |
|
676 |
# Custom cluster domain secret
|
677 |
cds_fh = tempfile.NamedTemporaryFile() |
678 |
cds_fh.write(utils.GenerateSecret()) |
679 |
cds_fh.write("\n")
|
680 |
cds_fh.flush() |
681 |
|
682 |
tmpcds = qa_utils.UploadFile(master["primary"], cds_fh.name)
|
683 |
try:
|
684 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
685 |
"--cluster-domain-secret=%s" % tmpcds])
|
686 |
finally:
|
687 |
AssertCommand(["rm", "-f", tmpcds]) |
688 |
|
689 |
# Normal case
|
690 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
691 |
"--new-cluster-certificate", "--new-confd-hmac-key", |
692 |
"--new-rapi-certificate", "--new-cluster-domain-secret"]) |
693 |
|
694 |
# Restore RAPI certificate
|
695 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
696 |
"--rapi-certificate=%s" % rapi_cert_backup])
|
697 |
finally:
|
698 |
AssertCommand(["rm", "-f", rapi_cert_backup]) |
699 |
|
700 |
|
701 |
def TestClusterBurnin(): |
702 |
"""Burnin"""
|
703 |
master = qa_config.GetMasterNode() |
704 |
|
705 |
options = qa_config.get("options", {})
|
706 |
disk_template = options.get("burnin-disk-template", "drbd") |
707 |
parallel = options.get("burnin-in-parallel", False) |
708 |
check_inst = options.get("burnin-check-instances", False) |
709 |
do_rename = options.get("burnin-rename", "") |
710 |
do_reboot = options.get("burnin-reboot", True) |
711 |
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
|
712 |
|
713 |
# Get as many instances as we need
|
714 |
instances = [] |
715 |
try:
|
716 |
try:
|
717 |
num = qa_config.get("options", {}).get("burnin-instances", 1) |
718 |
for _ in range(0, num): |
719 |
instances.append(qa_config.AcquireInstance()) |
720 |
except qa_error.OutOfInstancesError:
|
721 |
print "Not enough instances, continuing anyway." |
722 |
|
723 |
if len(instances) < 1: |
724 |
raise qa_error.Error("Burnin needs at least one instance") |
725 |
|
726 |
script = qa_utils.UploadFile(master["primary"], "../tools/burnin") |
727 |
try:
|
728 |
# Run burnin
|
729 |
cmd = [script, |
730 |
"--os=%s" % qa_config.get("os"), |
731 |
"--minmem-size=%s" % qa_config.get(constants.BE_MINMEM),
|
732 |
"--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM),
|
733 |
"--disk-size=%s" % ",".join(qa_config.get("disk")), |
734 |
"--disk-growth=%s" % ",".join(qa_config.get("disk-growth")), |
735 |
"--disk-template=%s" % disk_template]
|
736 |
if parallel:
|
737 |
cmd.append("--parallel")
|
738 |
cmd.append("--early-release")
|
739 |
if check_inst:
|
740 |
cmd.append("--http-check")
|
741 |
if do_rename:
|
742 |
cmd.append("--rename=%s" % do_rename)
|
743 |
if not do_reboot: |
744 |
cmd.append("--no-reboot")
|
745 |
else:
|
746 |
cmd.append("--reboot-types=%s" % ",".join(reboot_types)) |
747 |
cmd += [inst["name"] for inst in instances] |
748 |
AssertCommand(cmd) |
749 |
finally:
|
750 |
AssertCommand(["rm", "-f", script]) |
751 |
|
752 |
finally:
|
753 |
for inst in instances: |
754 |
qa_config.ReleaseInstance(inst) |
755 |
|
756 |
|
757 |
def TestClusterMasterFailover(): |
758 |
"""gnt-cluster master-failover"""
|
759 |
master = qa_config.GetMasterNode() |
760 |
failovermaster = qa_config.AcquireNode(exclude=master) |
761 |
|
762 |
cmd = ["gnt-cluster", "master-failover"] |
763 |
try:
|
764 |
AssertCommand(cmd, node=failovermaster) |
765 |
# Back to original master node
|
766 |
AssertCommand(cmd, node=master) |
767 |
finally:
|
768 |
qa_config.ReleaseNode(failovermaster) |
769 |
|
770 |
|
771 |
def TestClusterMasterFailoverWithDrainedQueue(): |
772 |
"""gnt-cluster master-failover with drained queue"""
|
773 |
drain_check = ["test", "-f", pathutils.JOB_QUEUE_DRAIN_FILE] |
774 |
|
775 |
master = qa_config.GetMasterNode() |
776 |
failovermaster = qa_config.AcquireNode(exclude=master) |
777 |
|
778 |
# Ensure queue is not drained
|
779 |
for node in [master, failovermaster]: |
780 |
AssertCommand(drain_check, node=node, fail=True)
|
781 |
|
782 |
# Drain queue on failover master
|
783 |
AssertCommand(["touch", pathutils.JOB_QUEUE_DRAIN_FILE], node=failovermaster)
|
784 |
|
785 |
cmd = ["gnt-cluster", "master-failover"] |
786 |
try:
|
787 |
AssertCommand(drain_check, node=failovermaster) |
788 |
AssertCommand(cmd, node=failovermaster) |
789 |
AssertCommand(drain_check, fail=True)
|
790 |
AssertCommand(drain_check, node=failovermaster, fail=True)
|
791 |
|
792 |
# Back to original master node
|
793 |
AssertCommand(cmd, node=master) |
794 |
finally:
|
795 |
qa_config.ReleaseNode(failovermaster) |
796 |
|
797 |
AssertCommand(drain_check, fail=True)
|
798 |
AssertCommand(drain_check, node=failovermaster, fail=True)
|
799 |
|
800 |
|
801 |
def TestClusterCopyfile(): |
802 |
"""gnt-cluster copyfile"""
|
803 |
master = qa_config.GetMasterNode() |
804 |
|
805 |
uniqueid = utils.NewUUID() |
806 |
|
807 |
# Create temporary file
|
808 |
f = tempfile.NamedTemporaryFile() |
809 |
f.write(uniqueid) |
810 |
f.flush() |
811 |
f.seek(0)
|
812 |
|
813 |
# Upload file to master node
|
814 |
testname = qa_utils.UploadFile(master["primary"], f.name)
|
815 |
try:
|
816 |
# Copy file to all nodes
|
817 |
AssertCommand(["gnt-cluster", "copyfile", testname]) |
818 |
_CheckFileOnAllNodes(testname, uniqueid) |
819 |
finally:
|
820 |
_RemoveFileFromAllNodes(testname) |
821 |
|
822 |
|
823 |
def TestClusterCommand(): |
824 |
"""gnt-cluster command"""
|
825 |
uniqueid = utils.NewUUID() |
826 |
rfile = "/tmp/gnt%s" % utils.NewUUID()
|
827 |
rcmd = utils.ShellQuoteArgs(["echo", "-n", uniqueid]) |
828 |
cmd = utils.ShellQuoteArgs(["gnt-cluster", "command", |
829 |
"%s >%s" % (rcmd, rfile)])
|
830 |
|
831 |
try:
|
832 |
AssertCommand(cmd) |
833 |
_CheckFileOnAllNodes(rfile, uniqueid) |
834 |
finally:
|
835 |
_RemoveFileFromAllNodes(rfile) |
836 |
|
837 |
|
838 |
def TestClusterDestroy(): |
839 |
"""gnt-cluster destroy"""
|
840 |
AssertCommand(["gnt-cluster", "destroy", "--yes-do-it"]) |
841 |
|
842 |
|
843 |
def TestClusterRepairDiskSizes(): |
844 |
"""gnt-cluster repair-disk-sizes"""
|
845 |
AssertCommand(["gnt-cluster", "repair-disk-sizes"]) |
846 |
|
847 |
|
848 |
def TestSetExclStorCluster(newvalue): |
849 |
"""Set the exclusive_storage node parameter at the cluster level.
|
850 |
|
851 |
@type newvalue: bool
|
852 |
@param newvalue: New value of exclusive_storage
|
853 |
@rtype: bool
|
854 |
@return: The old value of exclusive_storage
|
855 |
|
856 |
"""
|
857 |
oldvalue = _GetBoolClusterField("exclusive_storage")
|
858 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
859 |
"exclusive_storage=%s" % newvalue])
|
860 |
effvalue = _GetBoolClusterField("exclusive_storage")
|
861 |
if effvalue != newvalue:
|
862 |
raise qa_error.Error("exclusive_storage has the wrong value: %s instead" |
863 |
" of %s" % (effvalue, newvalue))
|
864 |
qa_config.SetExclusiveStorage(newvalue) |
865 |
return oldvalue
|
866 |
|
867 |
|
868 |
def TestExclStorSharedPv(node): |
869 |
"""cluster-verify reports LVs that share the same PV with exclusive_storage.
|
870 |
|
871 |
"""
|
872 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
873 |
lvname1 = _QA_LV_PREFIX + "vol1"
|
874 |
lvname2 = _QA_LV_PREFIX + "vol2"
|
875 |
node_name = node["primary"]
|
876 |
AssertCommand(["lvcreate", "-L1G", "-n", lvname1, vgname], node=node_name) |
877 |
AssertClusterVerify(fail=True, errors=[constants.CV_ENODEORPHANLV])
|
878 |
AssertCommand(["lvcreate", "-L1G", "-n", lvname2, vgname], node=node_name) |
879 |
AssertClusterVerify(fail=True, errors=[constants.CV_ENODELVM,
|
880 |
constants.CV_ENODEORPHANLV]) |
881 |
AssertCommand(["lvremove", "-f", "/".join([vgname, lvname1])], node=node_name) |
882 |
AssertCommand(["lvremove", "-f", "/".join([vgname, lvname2])], node=node_name) |
883 |
AssertClusterVerify() |