root / qa / qa_cluster.py @ 5949c31c
History | View | Annotate | Download (36.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Cluster related QA tests.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import re |
27 |
import tempfile |
28 |
import os.path |
29 |
|
30 |
from ganeti import constants |
31 |
from ganeti import compat |
32 |
from ganeti import utils |
33 |
from ganeti import pathutils |
34 |
|
35 |
import qa_config |
36 |
import qa_utils |
37 |
import qa_error |
38 |
import qa_instance |
39 |
|
40 |
from qa_utils import AssertEqual, AssertCommand, GetCommandOutput |
41 |
|
42 |
|
43 |
# Prefix for LVM volumes created by QA code during tests
|
44 |
_QA_LV_PREFIX = "qa-"
|
45 |
|
46 |
#: cluster verify command
|
47 |
_CLUSTER_VERIFY = ["gnt-cluster", "verify"] |
48 |
|
49 |
|
50 |
def _RemoveFileFromAllNodes(filename): |
51 |
"""Removes a file from all nodes.
|
52 |
|
53 |
"""
|
54 |
for node in qa_config.get("nodes"): |
55 |
AssertCommand(["rm", "-f", filename], node=node) |
56 |
|
57 |
|
58 |
def _CheckFileOnAllNodes(filename, content): |
59 |
"""Verifies the content of the given file on all nodes.
|
60 |
|
61 |
"""
|
62 |
cmd = utils.ShellQuoteArgs(["cat", filename])
|
63 |
for node in qa_config.get("nodes"): |
64 |
AssertEqual(qa_utils.GetCommandOutput(node.primary, cmd), content) |
65 |
|
66 |
|
67 |
def _GetClusterField(field_path): |
68 |
"""Get the value of a cluster field.
|
69 |
|
70 |
@type field_path: list of strings
|
71 |
@param field_path: Names of the groups/fields to navigate to get the desired
|
72 |
value, e.g. C{["Default node parameters", "oob_program"]}
|
73 |
@return: The effective value of the field (the actual type depends on the
|
74 |
chosen field)
|
75 |
|
76 |
"""
|
77 |
assert isinstance(field_path, list) |
78 |
assert field_path
|
79 |
ret = qa_utils.GetObjectInfo(["gnt-cluster", "info"]) |
80 |
for key in field_path: |
81 |
ret = ret[key] |
82 |
return ret
|
83 |
|
84 |
|
85 |
# Cluster-verify errors (date, "ERROR", then error code)
|
86 |
_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- (ERROR|WARNING):([A-Z0-9_-]+):")
|
87 |
|
88 |
|
89 |
def _GetCVErrorCodes(cvout): |
90 |
errs = set()
|
91 |
warns = set()
|
92 |
for l in cvout.splitlines(): |
93 |
m = _CVERROR_RE.match(l) |
94 |
if m:
|
95 |
etype = m.group(1)
|
96 |
ecode = m.group(2)
|
97 |
if etype == "ERROR": |
98 |
errs.add(ecode) |
99 |
elif etype == "WARNING": |
100 |
warns.add(ecode) |
101 |
return (errs, warns)
|
102 |
|
103 |
|
104 |
def _CheckVerifyErrors(actual, expected, etype): |
105 |
exp_codes = compat.UniqueFrozenset(e for (_, e, _) in expected) |
106 |
if not actual.issuperset(exp_codes): |
107 |
missing = exp_codes.difference(actual) |
108 |
raise qa_error.Error("Cluster-verify didn't return these expected" |
109 |
" %ss: %s" % (etype, utils.CommaJoin(missing)))
|
110 |
|
111 |
|
112 |
def AssertClusterVerify(fail=False, errors=None, warnings=None): |
113 |
"""Run cluster-verify and check the result
|
114 |
|
115 |
@type fail: bool
|
116 |
@param fail: if cluster-verify is expected to fail instead of succeeding
|
117 |
@type errors: list of tuples
|
118 |
@param errors: List of CV_XXX errors that are expected; if specified, all the
|
119 |
errors listed must appear in cluster-verify output. A non-empty value
|
120 |
implies C{fail=True}.
|
121 |
@type warnings: list of tuples
|
122 |
@param warnings: Same as C{errors} but for warnings.
|
123 |
|
124 |
"""
|
125 |
cvcmd = "gnt-cluster verify"
|
126 |
mnode = qa_config.GetMasterNode() |
127 |
if errors or warnings: |
128 |
cvout = GetCommandOutput(mnode.primary, cvcmd + " --error-codes",
|
129 |
fail=(fail or errors))
|
130 |
(act_errs, act_warns) = _GetCVErrorCodes(cvout) |
131 |
if errors:
|
132 |
_CheckVerifyErrors(act_errs, errors, "error")
|
133 |
if warnings:
|
134 |
_CheckVerifyErrors(act_warns, warnings, "warning")
|
135 |
else:
|
136 |
AssertCommand(cvcmd, fail=fail, node=mnode) |
137 |
|
138 |
|
139 |
# data for testing failures due to bad keys/values for disk parameters
|
140 |
_FAIL_PARAMS = ["nonexistent:resync-rate=1",
|
141 |
"drbd:nonexistent=1",
|
142 |
"drbd:resync-rate=invalid",
|
143 |
] |
144 |
|
145 |
|
146 |
def TestClusterInitDisk(): |
147 |
"""gnt-cluster init -D"""
|
148 |
name = qa_config.get("name")
|
149 |
for param in _FAIL_PARAMS: |
150 |
AssertCommand(["gnt-cluster", "init", "-D", param, name], fail=True) |
151 |
|
152 |
|
153 |
def TestClusterInit(rapi_user, rapi_secret): |
154 |
"""gnt-cluster init"""
|
155 |
master = qa_config.GetMasterNode() |
156 |
|
157 |
rapi_users_path = qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE) |
158 |
rapi_dir = os.path.dirname(rapi_users_path) |
159 |
|
160 |
# First create the RAPI credentials
|
161 |
fh = tempfile.NamedTemporaryFile() |
162 |
try:
|
163 |
fh.write("%s %s write\n" % (rapi_user, rapi_secret))
|
164 |
fh.flush() |
165 |
|
166 |
tmpru = qa_utils.UploadFile(master.primary, fh.name) |
167 |
try:
|
168 |
AssertCommand(["mkdir", "-p", rapi_dir]) |
169 |
AssertCommand(["mv", tmpru, rapi_users_path])
|
170 |
finally:
|
171 |
AssertCommand(["rm", "-f", tmpru]) |
172 |
finally:
|
173 |
fh.close() |
174 |
|
175 |
# Initialize cluster
|
176 |
cmd = [ |
177 |
"gnt-cluster", "init", |
178 |
"--primary-ip-version=%d" % qa_config.get("primary_ip_version", 4), |
179 |
"--enabled-hypervisors=%s" % ",".join(qa_config.GetEnabledHypervisors()), |
180 |
"--enabled-disk-templates=%s" %
|
181 |
",".join(qa_config.GetEnabledDiskTemplates()),
|
182 |
"--file-storage-dir=%s" %
|
183 |
qa_config.get("file-storage-dir", pathutils.DEFAULT_FILE_STORAGE_DIR),
|
184 |
] |
185 |
|
186 |
for spec_type in ("mem-size", "disk-size", "disk-count", "cpu-count", |
187 |
"nic-count"):
|
188 |
for spec_val in ("min", "max", "std"): |
189 |
spec = qa_config.get("ispec_%s_%s" %
|
190 |
(spec_type.replace("-", "_"), spec_val), None) |
191 |
if spec is not None: |
192 |
cmd.append("--specs-%s=%s=%d" % (spec_type, spec_val, spec))
|
193 |
|
194 |
if master.secondary:
|
195 |
cmd.append("--secondary-ip=%s" % master.secondary)
|
196 |
|
197 |
if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
|
198 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
199 |
if vgname:
|
200 |
cmd.append("--vg-name=%s" % vgname)
|
201 |
else:
|
202 |
raise qa_error.Error("Please specify a volume group if you enable" |
203 |
" lvm-based disk templates in the QA.")
|
204 |
|
205 |
master_netdev = qa_config.get("master-netdev", None) |
206 |
if master_netdev:
|
207 |
cmd.append("--master-netdev=%s" % master_netdev)
|
208 |
|
209 |
nicparams = qa_config.get("default-nicparams", None) |
210 |
if nicparams:
|
211 |
cmd.append("--nic-parameters=%s" %
|
212 |
",".join(utils.FormatKeyValue(nicparams)))
|
213 |
|
214 |
# Cluster value of the exclusive-storage node parameter
|
215 |
e_s = qa_config.get("exclusive-storage")
|
216 |
if e_s is not None: |
217 |
cmd.extend(["--node-parameters", "exclusive_storage=%s" % e_s]) |
218 |
else:
|
219 |
e_s = False
|
220 |
qa_config.SetExclusiveStorage(e_s) |
221 |
|
222 |
extra_args = qa_config.get("cluster-init-args")
|
223 |
if extra_args:
|
224 |
cmd.extend(extra_args) |
225 |
|
226 |
cmd.append(qa_config.get("name"))
|
227 |
|
228 |
AssertCommand(cmd) |
229 |
|
230 |
cmd = ["gnt-cluster", "modify"] |
231 |
|
232 |
# hypervisor parameter modifications
|
233 |
hvp = qa_config.get("hypervisor-parameters", {})
|
234 |
for k, v in hvp.items(): |
235 |
cmd.extend(["-H", "%s:%s" % (k, v)]) |
236 |
# backend parameter modifications
|
237 |
bep = qa_config.get("backend-parameters", "") |
238 |
if bep:
|
239 |
cmd.extend(["-B", bep])
|
240 |
|
241 |
if len(cmd) > 2: |
242 |
AssertCommand(cmd) |
243 |
|
244 |
# OS parameters
|
245 |
osp = qa_config.get("os-parameters", {})
|
246 |
for k, v in osp.items(): |
247 |
AssertCommand(["gnt-os", "modify", "-O", v, k]) |
248 |
|
249 |
# OS hypervisor parameters
|
250 |
os_hvp = qa_config.get("os-hvp", {})
|
251 |
for os_name in os_hvp: |
252 |
for hv, hvp in os_hvp[os_name].items(): |
253 |
AssertCommand(["gnt-os", "modify", "-H", "%s:%s" % (hv, hvp), os_name]) |
254 |
|
255 |
|
256 |
def TestClusterRename(): |
257 |
"""gnt-cluster rename"""
|
258 |
cmd = ["gnt-cluster", "rename", "-f"] |
259 |
|
260 |
original_name = qa_config.get("name")
|
261 |
rename_target = qa_config.get("rename", None) |
262 |
if rename_target is None: |
263 |
print qa_utils.FormatError('"rename" entry is missing') |
264 |
return
|
265 |
|
266 |
for data in [ |
267 |
cmd + [rename_target], |
268 |
_CLUSTER_VERIFY, |
269 |
cmd + [original_name], |
270 |
_CLUSTER_VERIFY, |
271 |
]: |
272 |
AssertCommand(data) |
273 |
|
274 |
|
275 |
def TestClusterOob(): |
276 |
"""out-of-band framework"""
|
277 |
oob_path_exists = "/tmp/ganeti-qa-oob-does-exist-%s" % utils.NewUUID()
|
278 |
|
279 |
AssertCommand(_CLUSTER_VERIFY) |
280 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
281 |
"oob_program=/tmp/ganeti-qa-oob-does-not-exist-%s" %
|
282 |
utils.NewUUID()]) |
283 |
|
284 |
AssertCommand(_CLUSTER_VERIFY, fail=True)
|
285 |
|
286 |
AssertCommand(["touch", oob_path_exists])
|
287 |
AssertCommand(["chmod", "0400", oob_path_exists]) |
288 |
AssertCommand(["gnt-cluster", "copyfile", oob_path_exists]) |
289 |
|
290 |
try:
|
291 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
292 |
"oob_program=%s" % oob_path_exists])
|
293 |
|
294 |
AssertCommand(_CLUSTER_VERIFY, fail=True)
|
295 |
|
296 |
AssertCommand(["chmod", "0500", oob_path_exists]) |
297 |
AssertCommand(["gnt-cluster", "copyfile", oob_path_exists]) |
298 |
|
299 |
AssertCommand(_CLUSTER_VERIFY) |
300 |
finally:
|
301 |
AssertCommand(["gnt-cluster", "command", "rm", oob_path_exists]) |
302 |
|
303 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
304 |
"oob_program="])
|
305 |
|
306 |
|
307 |
def TestClusterEpo(): |
308 |
"""gnt-cluster epo"""
|
309 |
master = qa_config.GetMasterNode() |
310 |
|
311 |
# Assert that OOB is unavailable for all nodes
|
312 |
result_output = GetCommandOutput(master.primary, |
313 |
"gnt-node list --verbose --no-headers -o"
|
314 |
" powered")
|
315 |
AssertEqual(compat.all(powered == "(unavail)"
|
316 |
for powered in result_output.splitlines()), True) |
317 |
|
318 |
# Conflicting
|
319 |
AssertCommand(["gnt-cluster", "epo", "--groups", "--all"], fail=True) |
320 |
# --all doesn't expect arguments
|
321 |
AssertCommand(["gnt-cluster", "epo", "--all", "some_arg"], fail=True) |
322 |
|
323 |
# Unless --all is given master is not allowed to be in the list
|
324 |
AssertCommand(["gnt-cluster", "epo", "-f", master.primary], fail=True) |
325 |
|
326 |
# This shouldn't fail
|
327 |
AssertCommand(["gnt-cluster", "epo", "-f", "--all"]) |
328 |
|
329 |
# All instances should have been stopped now
|
330 |
result_output = GetCommandOutput(master.primary, |
331 |
"gnt-instance list --no-headers -o status")
|
332 |
# ERROR_down because the instance is stopped but not recorded as such
|
333 |
AssertEqual(compat.all(status == "ERROR_down"
|
334 |
for status in result_output.splitlines()), True) |
335 |
|
336 |
# Now start everything again
|
337 |
AssertCommand(["gnt-cluster", "epo", "--on", "-f", "--all"]) |
338 |
|
339 |
# All instances should have been started now
|
340 |
result_output = GetCommandOutput(master.primary, |
341 |
"gnt-instance list --no-headers -o status")
|
342 |
AssertEqual(compat.all(status == "running"
|
343 |
for status in result_output.splitlines()), True) |
344 |
|
345 |
|
346 |
def TestClusterVerify(): |
347 |
"""gnt-cluster verify"""
|
348 |
AssertCommand(_CLUSTER_VERIFY) |
349 |
AssertCommand(["gnt-cluster", "verify-disks"]) |
350 |
|
351 |
|
352 |
# pylint: disable=W0613
|
353 |
def TestClusterVerifyDisksBrokenDRBD(instance, inst_nodes): |
354 |
"""gnt-cluster verify-disks with broken DRBD"""
|
355 |
pass
|
356 |
|
357 |
# FIXME (thomasth): reenable once it works (see issue 516!)
|
358 |
# qa_daemon.TestPauseWatcher()
|
359 |
#
|
360 |
# try:
|
361 |
# info = qa_instance.GetInstanceInfo(instance.name)
|
362 |
# snode = inst_nodes[1]
|
363 |
# for idx, minor in enumerate(info["drbd-minors"][snode.primary]):
|
364 |
# if idx % 2 == 0:
|
365 |
# break_drbd_cmd = \
|
366 |
# "(drbdsetup %d down >/dev/null 2>&1;" \
|
367 |
# " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
|
368 |
# (minor, minor)
|
369 |
# else:
|
370 |
# break_drbd_cmd = \
|
371 |
# "(drbdsetup %d detach >/dev/null 2>&1;" \
|
372 |
# " drbdsetup detach %d >/dev/null 2>&1) || /bin/true" % \
|
373 |
# (minor, minor)
|
374 |
# AssertCommand(break_drbd_cmd, node=snode)
|
375 |
#
|
376 |
# verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
|
377 |
# "gnt-cluster verify-disks")
|
378 |
# activation_msg = "Activating disks for instance '%s'" % instance.name
|
379 |
# if activation_msg not in verify_output:
|
380 |
# raise qa_error.Error("gnt-cluster verify-disks did not activate broken"
|
381 |
# " DRBD disks:\n%s" % verify_output)
|
382 |
#
|
383 |
# verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
|
384 |
# "gnt-cluster verify-disks")
|
385 |
# if activation_msg in verify_output:
|
386 |
# raise qa_error.Error("gnt-cluster verify-disks wants to activate broken"
|
387 |
# " DRBD disks on second attempt:\n%s" % verify_output)
|
388 |
#
|
389 |
# AssertCommand(_CLUSTER_VERIFY)
|
390 |
# finally:
|
391 |
# qa_daemon.TestResumeWatcher()
|
392 |
|
393 |
|
394 |
def TestJobqueue(): |
395 |
"""gnt-debug test-jobqueue"""
|
396 |
AssertCommand(["gnt-debug", "test-jobqueue"]) |
397 |
|
398 |
|
399 |
def TestDelay(node): |
400 |
"""gnt-debug delay"""
|
401 |
AssertCommand(["gnt-debug", "delay", "1"]) |
402 |
AssertCommand(["gnt-debug", "delay", "--no-master", "1"]) |
403 |
AssertCommand(["gnt-debug", "delay", "--no-master", |
404 |
"-n", node.primary, "1"]) |
405 |
|
406 |
|
407 |
def TestClusterReservedLvs(): |
408 |
"""gnt-cluster reserved lvs"""
|
409 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
410 |
lvname = _QA_LV_PREFIX + "test"
|
411 |
lvfullname = "/".join([vgname, lvname])
|
412 |
for fail, cmd in [ |
413 |
(False, _CLUSTER_VERIFY),
|
414 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), |
415 |
(False, ["lvcreate", "-L1G", "-n", lvname, vgname]), |
416 |
(True, _CLUSTER_VERIFY),
|
417 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", |
418 |
"%s,.*/other-test" % lvfullname]),
|
419 |
(False, _CLUSTER_VERIFY),
|
420 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", |
421 |
".*/%s.*" % _QA_LV_PREFIX]),
|
422 |
(False, _CLUSTER_VERIFY),
|
423 |
(False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), |
424 |
(True, _CLUSTER_VERIFY),
|
425 |
(False, ["lvremove", "-f", lvfullname]), |
426 |
(False, _CLUSTER_VERIFY),
|
427 |
]: |
428 |
AssertCommand(cmd, fail=fail) |
429 |
|
430 |
|
431 |
def TestClusterModifyEmpty(): |
432 |
"""gnt-cluster modify"""
|
433 |
AssertCommand(["gnt-cluster", "modify"], fail=True) |
434 |
|
435 |
|
436 |
def TestClusterModifyDisk(): |
437 |
"""gnt-cluster modify -D"""
|
438 |
for param in _FAIL_PARAMS: |
439 |
AssertCommand(["gnt-cluster", "modify", "-D", param], fail=True) |
440 |
|
441 |
|
442 |
def TestClusterModifyDiskTemplates(): |
443 |
"""gnt-cluster modify --enabled-disk-templates=..."""
|
444 |
enabled_disk_templates = qa_config.GetEnabledDiskTemplates() |
445 |
default_disk_template = qa_config.GetDefaultDiskTemplate() |
446 |
|
447 |
_TestClusterModifyDiskTemplatesArguments(default_disk_template, |
448 |
enabled_disk_templates) |
449 |
_TestClusterModifyDiskTemplatesVgName(enabled_disk_templates) |
450 |
|
451 |
_RestoreEnabledDiskTemplates() |
452 |
nodes = qa_config.AcquireManyNodes(2)
|
453 |
|
454 |
instance_template = enabled_disk_templates[0]
|
455 |
instance = qa_instance.CreateInstanceByDiskTemplate(nodes, instance_template) |
456 |
|
457 |
_TestClusterModifyUnusedDiskTemplate(instance_template) |
458 |
_TestClusterModifyUsedDiskTemplate(instance_template, |
459 |
enabled_disk_templates) |
460 |
|
461 |
qa_instance.TestInstanceRemove(instance) |
462 |
_RestoreEnabledDiskTemplates() |
463 |
|
464 |
|
465 |
def _RestoreEnabledDiskTemplates(): |
466 |
"""Sets the list of enabled disk templates back to the list of enabled disk
|
467 |
templates from the QA configuration. This can be used to make sure that
|
468 |
the tests that modify the list of disk templates do not interfere with
|
469 |
other tests.
|
470 |
|
471 |
"""
|
472 |
cmd = ["gnt-cluster", "modify", "--enabled-disk-templates=%s" % |
473 |
",".join(qa_config.GetEnabledDiskTemplates())]
|
474 |
|
475 |
if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
|
476 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
477 |
cmd.append("--vg-name=%s" % vgname)
|
478 |
|
479 |
AssertCommand(cmd, fail=False)
|
480 |
|
481 |
|
482 |
def _TestClusterModifyDiskTemplatesArguments(default_disk_template, |
483 |
enabled_disk_templates): |
484 |
"""Tests argument handling of 'gnt-cluster modify' with respect to
|
485 |
the parameter '--enabled-disk-templates'. This test is independent
|
486 |
of instances.
|
487 |
|
488 |
"""
|
489 |
_RestoreEnabledDiskTemplates() |
490 |
|
491 |
# bogus templates
|
492 |
AssertCommand(["gnt-cluster", "modify", |
493 |
"--enabled-disk-templates=pinkbunny"],
|
494 |
fail=True)
|
495 |
|
496 |
# duplicate entries do no harm
|
497 |
AssertCommand( |
498 |
["gnt-cluster", "modify", |
499 |
"--enabled-disk-templates=%s,%s" %
|
500 |
(default_disk_template, default_disk_template)], |
501 |
fail=False)
|
502 |
|
503 |
if constants.DT_DRBD8 in enabled_disk_templates: |
504 |
# interaction with --drbd-usermode-helper option
|
505 |
drbd_usermode_helper = qa_config.get("drbd-usermode-helper", None) |
506 |
if not drbd_usermode_helper: |
507 |
drbd_usermode_helper = "/bin/true"
|
508 |
# specifying a helper when drbd gets disabled is ok. Note that drbd still
|
509 |
# has to be installed on the nodes in this case
|
510 |
AssertCommand(["gnt-cluster", "modify", |
511 |
"--drbd-usermode-helper=%s" % drbd_usermode_helper,
|
512 |
"--enabled-disk-templates=%s" % constants.DT_DISKLESS],
|
513 |
fail=False)
|
514 |
# specifying a helper when drbd is re-enabled
|
515 |
AssertCommand(["gnt-cluster", "modify", |
516 |
"--drbd-usermode-helper=%s" % drbd_usermode_helper,
|
517 |
"--enabled-disk-templates=%s" %
|
518 |
",".join(enabled_disk_templates)],
|
519 |
fail=False)
|
520 |
|
521 |
|
522 |
def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates): |
523 |
"""Tests argument handling of 'gnt-cluster modify' with respect to
|
524 |
the parameter '--enabled-disk-templates' and '--vg-name'. This test is
|
525 |
independent of instances.
|
526 |
|
527 |
"""
|
528 |
if not utils.IsLvmEnabled(enabled_disk_templates): |
529 |
# These tests only make sense if lvm is enabled for QA
|
530 |
return
|
531 |
|
532 |
# determine an LVM and a non-LVM disk template for the tests
|
533 |
non_lvm_templates = list(set(enabled_disk_templates) |
534 |
- set(utils.GetLvmDiskTemplates()))
|
535 |
lvm_template = list(set(enabled_disk_templates) |
536 |
.intersection(set(utils.GetLvmDiskTemplates())))[0] |
537 |
non_lvm_template = None
|
538 |
if non_lvm_templates:
|
539 |
non_lvm_template = non_lvm_templates[0]
|
540 |
else:
|
541 |
# If no non-lvm disk template is available for QA, choose 'diskless' and
|
542 |
# hope for the best.
|
543 |
non_lvm_template = constants.ST_DISKLESS |
544 |
|
545 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
546 |
|
547 |
# Clean start: unset volume group name, disable lvm storage
|
548 |
AssertCommand( |
549 |
["gnt-cluster", "modify", |
550 |
"--enabled-disk-templates=%s" % non_lvm_template,
|
551 |
"--vg-name="],
|
552 |
fail=False)
|
553 |
|
554 |
# Try to enable lvm, when no volume group is given
|
555 |
AssertCommand( |
556 |
["gnt-cluster", "modify", |
557 |
"--enabled-disk-templates=%s" % lvm_template],
|
558 |
fail=True)
|
559 |
|
560 |
# Set volume group, with lvm still disabled: just a warning
|
561 |
AssertCommand(["gnt-cluster", "modify", "--vg-name=%s" % vgname], fail=False) |
562 |
|
563 |
# Try unsetting vg name and enabling lvm at the same time
|
564 |
AssertCommand( |
565 |
["gnt-cluster", "modify", |
566 |
"--enabled-disk-templates=%s" % lvm_template,
|
567 |
"--vg-name="],
|
568 |
fail=True)
|
569 |
|
570 |
# Enable lvm with vg name present
|
571 |
AssertCommand( |
572 |
["gnt-cluster", "modify", |
573 |
"--enabled-disk-templates=%s" % lvm_template],
|
574 |
fail=False)
|
575 |
|
576 |
# Try unsetting vg name with lvm still enabled
|
577 |
AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=True) |
578 |
|
579 |
# Disable lvm with vg name still set
|
580 |
AssertCommand( |
581 |
["gnt-cluster", "modify", "--enabled-disk-templates=%s" % non_lvm_template], |
582 |
fail=False)
|
583 |
|
584 |
# Try unsetting vg name with lvm disabled
|
585 |
AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=False) |
586 |
|
587 |
# Set vg name and enable lvm at the same time
|
588 |
AssertCommand( |
589 |
["gnt-cluster", "modify", |
590 |
"--enabled-disk-templates=%s" % lvm_template,
|
591 |
"--vg-name=%s" % vgname],
|
592 |
fail=False)
|
593 |
|
594 |
# Unset vg name and disable lvm at the same time
|
595 |
AssertCommand( |
596 |
["gnt-cluster", "modify", |
597 |
"--enabled-disk-templates=%s" % non_lvm_template,
|
598 |
"--vg-name="],
|
599 |
fail=False)
|
600 |
|
601 |
_RestoreEnabledDiskTemplates() |
602 |
|
603 |
|
604 |
def _TestClusterModifyUsedDiskTemplate(instance_template, |
605 |
enabled_disk_templates): |
606 |
"""Tests that disk templates that are currently in use by instances cannot
|
607 |
be disabled on the cluster.
|
608 |
|
609 |
"""
|
610 |
# If the list of enabled disk templates contains only one template
|
611 |
# we need to add some other templates, because the list of enabled disk
|
612 |
# templates can only be set to a non-empty list.
|
613 |
new_disk_templates = list(set(enabled_disk_templates) |
614 |
- set([instance_template]))
|
615 |
if not new_disk_templates: |
616 |
new_disk_templates = list(set([constants.DT_DISKLESS, constants.DT_BLOCK]) |
617 |
- set([instance_template]))
|
618 |
AssertCommand( |
619 |
["gnt-cluster", "modify", |
620 |
"--enabled-disk-templates=%s" %
|
621 |
",".join(new_disk_templates)],
|
622 |
fail=True)
|
623 |
|
624 |
|
625 |
def _TestClusterModifyUnusedDiskTemplate(instance_template): |
626 |
"""Tests that unused disk templates can be disabled safely."""
|
627 |
all_disk_templates = constants.DISK_TEMPLATES |
628 |
if not utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()): |
629 |
all_disk_templates = list(set(all_disk_templates) - |
630 |
set(utils.GetLvmDiskTemplates()))
|
631 |
|
632 |
AssertCommand( |
633 |
["gnt-cluster", "modify", |
634 |
"--enabled-disk-templates=%s" %
|
635 |
",".join(all_disk_templates)],
|
636 |
fail=False)
|
637 |
new_disk_templates = [instance_template] |
638 |
AssertCommand( |
639 |
["gnt-cluster", "modify", |
640 |
"--enabled-disk-templates=%s" %
|
641 |
",".join(new_disk_templates)],
|
642 |
fail=False)
|
643 |
|
644 |
|
645 |
def TestClusterModifyBe(): |
646 |
"""gnt-cluster modify -B"""
|
647 |
for fail, cmd in [ |
648 |
# max/min mem
|
649 |
(False, ["gnt-cluster", "modify", "-B", "maxmem=256"]), |
650 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), |
651 |
(False, ["gnt-cluster", "modify", "-B", "minmem=256"]), |
652 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), |
653 |
(True, ["gnt-cluster", "modify", "-B", "maxmem=a"]), |
654 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), |
655 |
(True, ["gnt-cluster", "modify", "-B", "minmem=a"]), |
656 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), |
657 |
(False, ["gnt-cluster", "modify", "-B", "maxmem=128,minmem=128"]), |
658 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 128$'"]), |
659 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 128$'"]), |
660 |
# vcpus
|
661 |
(False, ["gnt-cluster", "modify", "-B", "vcpus=4"]), |
662 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 4$'"]), |
663 |
(True, ["gnt-cluster", "modify", "-B", "vcpus=a"]), |
664 |
(False, ["gnt-cluster", "modify", "-B", "vcpus=1"]), |
665 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 1$'"]), |
666 |
# auto_balance
|
667 |
(False, ["gnt-cluster", "modify", "-B", "auto_balance=False"]), |
668 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: False$'"]), |
669 |
(True, ["gnt-cluster", "modify", "-B", "auto_balance=1"]), |
670 |
(False, ["gnt-cluster", "modify", "-B", "auto_balance=True"]), |
671 |
(False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: True$'"]), |
672 |
]: |
673 |
AssertCommand(cmd, fail=fail) |
674 |
|
675 |
# redo the original-requested BE parameters, if any
|
676 |
bep = qa_config.get("backend-parameters", "") |
677 |
if bep:
|
678 |
AssertCommand(["gnt-cluster", "modify", "-B", bep]) |
679 |
|
680 |
|
681 |
def _GetClusterIPolicy(): |
682 |
"""Return the run-time values of the cluster-level instance policy.
|
683 |
|
684 |
@rtype: tuple
|
685 |
@return: (policy, specs), where:
|
686 |
- policy is a dictionary of the policy values, instance specs excluded
|
687 |
- specs is a dictionary containing only the specs, using the internal
|
688 |
format (see L{constants.IPOLICY_DEFAULTS} for an example)
|
689 |
|
690 |
"""
|
691 |
info = qa_utils.GetObjectInfo(["gnt-cluster", "info"]) |
692 |
policy = info["Instance policy - limits for instances"]
|
693 |
(ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy) |
694 |
|
695 |
# Sanity checks
|
696 |
assert "minmax" in ret_specs and "std" in ret_specs |
697 |
assert len(ret_specs["minmax"]) > 0 |
698 |
assert len(ret_policy) > 0 |
699 |
return (ret_policy, ret_specs)
|
700 |
|
701 |
|
702 |
def TestClusterModifyIPolicy(): |
703 |
"""gnt-cluster modify --ipolicy-*"""
|
704 |
basecmd = ["gnt-cluster", "modify"] |
705 |
(old_policy, old_specs) = _GetClusterIPolicy() |
706 |
for par in ["vcpu-ratio", "spindle-ratio"]: |
707 |
curr_val = float(old_policy[par])
|
708 |
test_values = [ |
709 |
(True, 1.0), |
710 |
(True, 1.5), |
711 |
(True, 2), |
712 |
(False, "a"), |
713 |
# Restore the old value
|
714 |
(True, curr_val),
|
715 |
] |
716 |
for (good, val) in test_values: |
717 |
cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
|
718 |
AssertCommand(cmd, fail=not good)
|
719 |
if good:
|
720 |
curr_val = val |
721 |
# Check the affected parameter
|
722 |
(eff_policy, eff_specs) = _GetClusterIPolicy() |
723 |
AssertEqual(float(eff_policy[par]), curr_val)
|
724 |
# Check everything else
|
725 |
AssertEqual(eff_specs, old_specs) |
726 |
for p in eff_policy.keys(): |
727 |
if p == par:
|
728 |
continue
|
729 |
AssertEqual(eff_policy[p], old_policy[p]) |
730 |
|
731 |
# Disk templates are treated slightly differently
|
732 |
par = "disk-templates"
|
733 |
disp_str = "allowed disk templates"
|
734 |
curr_val = old_policy[disp_str] |
735 |
test_values = [ |
736 |
(True, constants.DT_PLAIN),
|
737 |
(True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)), |
738 |
(False, "thisisnotadisktemplate"), |
739 |
(False, ""), |
740 |
# Restore the old value
|
741 |
(True, curr_val.replace(" ", "")), |
742 |
] |
743 |
for (good, val) in test_values: |
744 |
cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
|
745 |
AssertCommand(cmd, fail=not good)
|
746 |
if good:
|
747 |
curr_val = val |
748 |
# Check the affected parameter
|
749 |
(eff_policy, eff_specs) = _GetClusterIPolicy() |
750 |
AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val) |
751 |
# Check everything else
|
752 |
AssertEqual(eff_specs, old_specs) |
753 |
for p in eff_policy.keys(): |
754 |
if p == disp_str:
|
755 |
continue
|
756 |
AssertEqual(eff_policy[p], old_policy[p]) |
757 |
|
758 |
|
759 |
def TestClusterSetISpecs(new_specs=None, diff_specs=None, fail=False, |
760 |
old_values=None):
|
761 |
"""Change instance specs.
|
762 |
|
763 |
At most one of new_specs or diff_specs can be specified.
|
764 |
|
765 |
@type new_specs: dict
|
766 |
@param new_specs: new complete specs, in the same format returned by
|
767 |
L{_GetClusterIPolicy}
|
768 |
@type diff_specs: dict
|
769 |
@param diff_specs: partial specs, it can be an incomplete specifications, but
|
770 |
if min/max specs are specified, their number must match the number of the
|
771 |
existing specs
|
772 |
@type fail: bool
|
773 |
@param fail: if the change is expected to fail
|
774 |
@type old_values: tuple
|
775 |
@param old_values: (old_policy, old_specs), as returned by
|
776 |
L{_GetClusterIPolicy}
|
777 |
@return: same as L{_GetClusterIPolicy}
|
778 |
|
779 |
"""
|
780 |
build_cmd = lambda opts: ["gnt-cluster", "modify"] + opts |
781 |
return qa_utils.TestSetISpecs(
|
782 |
new_specs=new_specs, diff_specs=diff_specs, |
783 |
get_policy_fn=_GetClusterIPolicy, build_cmd_fn=build_cmd, |
784 |
fail=fail, old_values=old_values) |
785 |
|
786 |
|
787 |
def TestClusterModifyISpecs(): |
788 |
"""gnt-cluster modify --specs-*"""
|
789 |
params = ["memory-size", "disk-size", "disk-count", "cpu-count", "nic-count"] |
790 |
(cur_policy, cur_specs) = _GetClusterIPolicy() |
791 |
# This test assumes that there is only one min/max bound
|
792 |
assert len(cur_specs[constants.ISPECS_MINMAX]) == 1 |
793 |
for par in params: |
794 |
test_values = [ |
795 |
(True, 0, 4, 12), |
796 |
(True, 4, 4, 12), |
797 |
(True, 4, 12, 12), |
798 |
(True, 4, 4, 4), |
799 |
(False, 4, 0, 12), |
800 |
(False, 4, 16, 12), |
801 |
(False, 4, 4, 0), |
802 |
(False, 12, 4, 4), |
803 |
(False, 12, 4, 0), |
804 |
(False, "a", 4, 12), |
805 |
(False, 0, "a", 12), |
806 |
(False, 0, 4, "a"), |
807 |
# This is to restore the old values
|
808 |
(True,
|
809 |
cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MIN][par],
|
810 |
cur_specs[constants.ISPECS_STD][par], |
811 |
cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MAX][par])
|
812 |
] |
813 |
for (good, mn, st, mx) in test_values: |
814 |
new_vals = { |
815 |
constants.ISPECS_MINMAX: [{ |
816 |
constants.ISPECS_MIN: {par: mn}, |
817 |
constants.ISPECS_MAX: {par: mx} |
818 |
}], |
819 |
constants.ISPECS_STD: {par: st} |
820 |
} |
821 |
cur_state = (cur_policy, cur_specs) |
822 |
# We update cur_specs, as we've copied the values to restore already
|
823 |
(cur_policy, cur_specs) = TestClusterSetISpecs( |
824 |
diff_specs=new_vals, fail=not good, old_values=cur_state)
|
825 |
|
826 |
# Get the ipolicy command
|
827 |
mnode = qa_config.GetMasterNode() |
828 |
initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
|
829 |
modcmd = ["gnt-cluster", "modify"] |
830 |
opts = initcmd.split() |
831 |
assert opts[0:2] == ["gnt-cluster", "init"] |
832 |
for k in range(2, len(opts) - 1): |
833 |
if opts[k].startswith("--ipolicy-"): |
834 |
assert k + 2 <= len(opts) |
835 |
modcmd.extend(opts[k:k + 2])
|
836 |
# Re-apply the ipolicy (this should be a no-op)
|
837 |
AssertCommand(modcmd) |
838 |
new_initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
|
839 |
AssertEqual(initcmd, new_initcmd) |
840 |
|
841 |
|
842 |
def TestClusterInfo(): |
843 |
"""gnt-cluster info"""
|
844 |
AssertCommand(["gnt-cluster", "info"]) |
845 |
|
846 |
|
847 |
def TestClusterRedistConf(): |
848 |
"""gnt-cluster redist-conf"""
|
849 |
AssertCommand(["gnt-cluster", "redist-conf"]) |
850 |
|
851 |
|
852 |
def TestClusterGetmaster(): |
853 |
"""gnt-cluster getmaster"""
|
854 |
AssertCommand(["gnt-cluster", "getmaster"]) |
855 |
|
856 |
|
857 |
def TestClusterVersion(): |
858 |
"""gnt-cluster version"""
|
859 |
AssertCommand(["gnt-cluster", "version"]) |
860 |
|
861 |
|
862 |
def TestClusterRenewCrypto(): |
863 |
"""gnt-cluster renew-crypto"""
|
864 |
master = qa_config.GetMasterNode() |
865 |
|
866 |
# Conflicting options
|
867 |
cmd = ["gnt-cluster", "renew-crypto", "--force", |
868 |
"--new-cluster-certificate", "--new-confd-hmac-key"] |
869 |
conflicting = [ |
870 |
["--new-rapi-certificate", "--rapi-certificate=/dev/null"], |
871 |
["--new-cluster-domain-secret", "--cluster-domain-secret=/dev/null"], |
872 |
] |
873 |
for i in conflicting: |
874 |
AssertCommand(cmd + i, fail=True)
|
875 |
|
876 |
# Invalid RAPI certificate
|
877 |
cmd = ["gnt-cluster", "renew-crypto", "--force", |
878 |
"--rapi-certificate=/dev/null"]
|
879 |
AssertCommand(cmd, fail=True)
|
880 |
|
881 |
rapi_cert_backup = qa_utils.BackupFile(master.primary, |
882 |
pathutils.RAPI_CERT_FILE) |
883 |
try:
|
884 |
# Custom RAPI certificate
|
885 |
fh = tempfile.NamedTemporaryFile() |
886 |
|
887 |
# Ensure certificate doesn't cause "gnt-cluster verify" to complain
|
888 |
validity = constants.SSL_CERT_EXPIRATION_WARN * 3
|
889 |
|
890 |
utils.GenerateSelfSignedSslCert(fh.name, validity=validity) |
891 |
|
892 |
tmpcert = qa_utils.UploadFile(master.primary, fh.name) |
893 |
try:
|
894 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
895 |
"--rapi-certificate=%s" % tmpcert])
|
896 |
finally:
|
897 |
AssertCommand(["rm", "-f", tmpcert]) |
898 |
|
899 |
# Custom cluster domain secret
|
900 |
cds_fh = tempfile.NamedTemporaryFile() |
901 |
cds_fh.write(utils.GenerateSecret()) |
902 |
cds_fh.write("\n")
|
903 |
cds_fh.flush() |
904 |
|
905 |
tmpcds = qa_utils.UploadFile(master.primary, cds_fh.name) |
906 |
try:
|
907 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
908 |
"--cluster-domain-secret=%s" % tmpcds])
|
909 |
finally:
|
910 |
AssertCommand(["rm", "-f", tmpcds]) |
911 |
|
912 |
# Normal case
|
913 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
914 |
"--new-cluster-certificate", "--new-confd-hmac-key", |
915 |
"--new-rapi-certificate", "--new-cluster-domain-secret"]) |
916 |
|
917 |
# Restore RAPI certificate
|
918 |
AssertCommand(["gnt-cluster", "renew-crypto", "--force", |
919 |
"--rapi-certificate=%s" % rapi_cert_backup])
|
920 |
finally:
|
921 |
AssertCommand(["rm", "-f", rapi_cert_backup]) |
922 |
|
923 |
|
924 |
def TestClusterBurnin(): |
925 |
"""Burnin"""
|
926 |
master = qa_config.GetMasterNode() |
927 |
|
928 |
options = qa_config.get("options", {})
|
929 |
disk_template = options.get("burnin-disk-template", constants.DT_DRBD8)
|
930 |
parallel = options.get("burnin-in-parallel", False) |
931 |
check_inst = options.get("burnin-check-instances", False) |
932 |
do_rename = options.get("burnin-rename", "") |
933 |
do_reboot = options.get("burnin-reboot", True) |
934 |
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
|
935 |
|
936 |
# Get as many instances as we need
|
937 |
instances = [] |
938 |
try:
|
939 |
try:
|
940 |
num = qa_config.get("options", {}).get("burnin-instances", 1) |
941 |
for _ in range(0, num): |
942 |
instances.append(qa_config.AcquireInstance()) |
943 |
except qa_error.OutOfInstancesError:
|
944 |
print "Not enough instances, continuing anyway." |
945 |
|
946 |
if len(instances) < 1: |
947 |
raise qa_error.Error("Burnin needs at least one instance") |
948 |
|
949 |
script = qa_utils.UploadFile(master.primary, "../tools/burnin")
|
950 |
try:
|
951 |
disks = qa_config.GetDiskOptions() |
952 |
# Run burnin
|
953 |
cmd = [script, |
954 |
"--os=%s" % qa_config.get("os"), |
955 |
"--minmem-size=%s" % qa_config.get(constants.BE_MINMEM),
|
956 |
"--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM),
|
957 |
"--disk-size=%s" % ",".join([d.get("size") for d in disks]), |
958 |
"--disk-growth=%s" % ",".join([d.get("growth") for d in disks]), |
959 |
"--disk-template=%s" % disk_template]
|
960 |
if parallel:
|
961 |
cmd.append("--parallel")
|
962 |
cmd.append("--early-release")
|
963 |
if check_inst:
|
964 |
cmd.append("--http-check")
|
965 |
if do_rename:
|
966 |
cmd.append("--rename=%s" % do_rename)
|
967 |
if not do_reboot: |
968 |
cmd.append("--no-reboot")
|
969 |
else:
|
970 |
cmd.append("--reboot-types=%s" % ",".join(reboot_types)) |
971 |
cmd += [inst.name for inst in instances] |
972 |
AssertCommand(cmd) |
973 |
finally:
|
974 |
AssertCommand(["rm", "-f", script]) |
975 |
|
976 |
finally:
|
977 |
for inst in instances: |
978 |
inst.Release() |
979 |
|
980 |
|
981 |
def TestClusterMasterFailover(): |
982 |
"""gnt-cluster master-failover"""
|
983 |
master = qa_config.GetMasterNode() |
984 |
failovermaster = qa_config.AcquireNode(exclude=master) |
985 |
|
986 |
cmd = ["gnt-cluster", "master-failover"] |
987 |
try:
|
988 |
AssertCommand(cmd, node=failovermaster) |
989 |
# Back to original master node
|
990 |
AssertCommand(cmd, node=master) |
991 |
finally:
|
992 |
failovermaster.Release() |
993 |
|
994 |
|
995 |
def _NodeQueueDrainFile(node): |
996 |
"""Returns path to queue drain file for a node.
|
997 |
|
998 |
"""
|
999 |
return qa_utils.MakeNodePath(node, pathutils.JOB_QUEUE_DRAIN_FILE)
|
1000 |
|
1001 |
|
1002 |
def _AssertDrainFile(node, **kwargs): |
1003 |
"""Checks for the queue drain file.
|
1004 |
|
1005 |
"""
|
1006 |
AssertCommand(["test", "-f", _NodeQueueDrainFile(node)], node=node, **kwargs) |
1007 |
|
1008 |
|
1009 |
def TestClusterMasterFailoverWithDrainedQueue(): |
1010 |
"""gnt-cluster master-failover with drained queue"""
|
1011 |
master = qa_config.GetMasterNode() |
1012 |
failovermaster = qa_config.AcquireNode(exclude=master) |
1013 |
|
1014 |
# Ensure queue is not drained
|
1015 |
for node in [master, failovermaster]: |
1016 |
_AssertDrainFile(node, fail=True)
|
1017 |
|
1018 |
# Drain queue on failover master
|
1019 |
AssertCommand(["touch", _NodeQueueDrainFile(failovermaster)],
|
1020 |
node=failovermaster) |
1021 |
|
1022 |
cmd = ["gnt-cluster", "master-failover"] |
1023 |
try:
|
1024 |
_AssertDrainFile(failovermaster) |
1025 |
AssertCommand(cmd, node=failovermaster) |
1026 |
_AssertDrainFile(master, fail=True)
|
1027 |
_AssertDrainFile(failovermaster, fail=True)
|
1028 |
|
1029 |
# Back to original master node
|
1030 |
AssertCommand(cmd, node=master) |
1031 |
finally:
|
1032 |
failovermaster.Release() |
1033 |
|
1034 |
# Ensure queue is not drained
|
1035 |
for node in [master, failovermaster]: |
1036 |
_AssertDrainFile(node, fail=True)
|
1037 |
|
1038 |
|
1039 |
def TestClusterCopyfile(): |
1040 |
"""gnt-cluster copyfile"""
|
1041 |
master = qa_config.GetMasterNode() |
1042 |
|
1043 |
uniqueid = utils.NewUUID() |
1044 |
|
1045 |
# Create temporary file
|
1046 |
f = tempfile.NamedTemporaryFile() |
1047 |
f.write(uniqueid) |
1048 |
f.flush() |
1049 |
f.seek(0)
|
1050 |
|
1051 |
# Upload file to master node
|
1052 |
testname = qa_utils.UploadFile(master.primary, f.name) |
1053 |
try:
|
1054 |
# Copy file to all nodes
|
1055 |
AssertCommand(["gnt-cluster", "copyfile", testname]) |
1056 |
_CheckFileOnAllNodes(testname, uniqueid) |
1057 |
finally:
|
1058 |
_RemoveFileFromAllNodes(testname) |
1059 |
|
1060 |
|
1061 |
def TestClusterCommand(): |
1062 |
"""gnt-cluster command"""
|
1063 |
uniqueid = utils.NewUUID() |
1064 |
rfile = "/tmp/gnt%s" % utils.NewUUID()
|
1065 |
rcmd = utils.ShellQuoteArgs(["echo", "-n", uniqueid]) |
1066 |
cmd = utils.ShellQuoteArgs(["gnt-cluster", "command", |
1067 |
"%s >%s" % (rcmd, rfile)])
|
1068 |
|
1069 |
try:
|
1070 |
AssertCommand(cmd) |
1071 |
_CheckFileOnAllNodes(rfile, uniqueid) |
1072 |
finally:
|
1073 |
_RemoveFileFromAllNodes(rfile) |
1074 |
|
1075 |
|
1076 |
def TestClusterDestroy(): |
1077 |
"""gnt-cluster destroy"""
|
1078 |
AssertCommand(["gnt-cluster", "destroy", "--yes-do-it"]) |
1079 |
|
1080 |
|
1081 |
def TestClusterRepairDiskSizes(): |
1082 |
"""gnt-cluster repair-disk-sizes"""
|
1083 |
AssertCommand(["gnt-cluster", "repair-disk-sizes"]) |
1084 |
|
1085 |
|
1086 |
def TestSetExclStorCluster(newvalue): |
1087 |
"""Set the exclusive_storage node parameter at the cluster level.
|
1088 |
|
1089 |
@type newvalue: bool
|
1090 |
@param newvalue: New value of exclusive_storage
|
1091 |
@rtype: bool
|
1092 |
@return: The old value of exclusive_storage
|
1093 |
|
1094 |
"""
|
1095 |
es_path = ["Default node parameters", "exclusive_storage"] |
1096 |
oldvalue = _GetClusterField(es_path) |
1097 |
AssertCommand(["gnt-cluster", "modify", "--node-parameters", |
1098 |
"exclusive_storage=%s" % newvalue])
|
1099 |
effvalue = _GetClusterField(es_path) |
1100 |
if effvalue != newvalue:
|
1101 |
raise qa_error.Error("exclusive_storage has the wrong value: %s instead" |
1102 |
" of %s" % (effvalue, newvalue))
|
1103 |
qa_config.SetExclusiveStorage(newvalue) |
1104 |
return oldvalue
|
1105 |
|
1106 |
|
1107 |
def TestExclStorSharedPv(node): |
1108 |
"""cluster-verify reports LVs that share the same PV with exclusive_storage.
|
1109 |
|
1110 |
"""
|
1111 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
1112 |
lvname1 = _QA_LV_PREFIX + "vol1"
|
1113 |
lvname2 = _QA_LV_PREFIX + "vol2"
|
1114 |
node_name = node.primary |
1115 |
AssertCommand(["lvcreate", "-L1G", "-n", lvname1, vgname], node=node_name) |
1116 |
AssertClusterVerify(fail=True, errors=[constants.CV_ENODEORPHANLV])
|
1117 |
AssertCommand(["lvcreate", "-L1G", "-n", lvname2, vgname], node=node_name) |
1118 |
AssertClusterVerify(fail=True, errors=[constants.CV_ENODELVM,
|
1119 |
constants.CV_ENODEORPHANLV]) |
1120 |
AssertCommand(["lvremove", "-f", "/".join([vgname, lvname1])], node=node_name) |
1121 |
AssertCommand(["lvremove", "-f", "/".join([vgname, lvname2])], node=node_name) |
1122 |
AssertClusterVerify() |