root / qa / ganeti-qa.py @ 6c005975
History | View | Annotate | Download (32.7 kB)
1 |
#!/usr/bin/python -u
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Script for doing QA on Ganeti.
|
23 |
|
24 |
"""
|
25 |
|
26 |
# pylint: disable=C0103
|
27 |
# due to invalid name
|
28 |
|
29 |
import copy |
30 |
import datetime |
31 |
import optparse |
32 |
import sys |
33 |
|
34 |
import colors |
35 |
import qa_cluster |
36 |
import qa_config |
37 |
import qa_daemon |
38 |
import qa_env |
39 |
import qa_error |
40 |
import qa_group |
41 |
import qa_instance |
42 |
import qa_iptables |
43 |
import qa_monitoring |
44 |
import qa_network |
45 |
import qa_node |
46 |
import qa_os |
47 |
import qa_job |
48 |
import qa_rapi |
49 |
import qa_tags |
50 |
import qa_utils |
51 |
|
52 |
from ganeti import utils |
53 |
from ganeti import rapi # pylint: disable=W0611 |
54 |
from ganeti import constants |
55 |
from ganeti import pathutils |
56 |
|
57 |
from ganeti.http.auth import ParsePasswordFile |
58 |
import ganeti.rapi.client # pylint: disable=W0611 |
59 |
from ganeti.rapi.client import UsesRapiClient |
60 |
|
61 |
|
62 |
def _FormatHeader(line, end=72, mark="-", color=None): |
63 |
"""Fill a line up to the end column.
|
64 |
|
65 |
"""
|
66 |
line = (mark * 4) + " " + line + " " |
67 |
line += "-" * (end - len(line)) |
68 |
line = line.rstrip() |
69 |
line = colors.colorize(line, color=color) |
70 |
return line
|
71 |
|
72 |
|
73 |
def _DescriptionOf(fn): |
74 |
"""Computes the description of an item.
|
75 |
|
76 |
"""
|
77 |
if fn.__doc__:
|
78 |
desc = fn.__doc__.splitlines()[0].strip()
|
79 |
desc = desc.rstrip(".")
|
80 |
if fn.__name__:
|
81 |
desc = "[" + fn.__name__ + "] " + desc |
82 |
else:
|
83 |
desc = "%r" % fn
|
84 |
|
85 |
return desc
|
86 |
|
87 |
|
88 |
def RunTest(fn, *args, **kwargs): |
89 |
"""Runs a test after printing a header.
|
90 |
|
91 |
"""
|
92 |
|
93 |
tstart = datetime.datetime.now() |
94 |
|
95 |
desc = _DescriptionOf(fn) |
96 |
|
97 |
print
|
98 |
print _FormatHeader("%s start %s" % (tstart, desc), |
99 |
color=colors.YELLOW, mark="<")
|
100 |
|
101 |
try:
|
102 |
retval = fn(*args, **kwargs) |
103 |
print _FormatHeader("PASSED %s" % (desc, ), color=colors.GREEN) |
104 |
return retval
|
105 |
except Exception, e: |
106 |
print _FormatHeader("FAILED %s: %s" % (desc, e), color=colors.RED) |
107 |
raise
|
108 |
finally:
|
109 |
tstop = datetime.datetime.now() |
110 |
tdelta = tstop - tstart |
111 |
print _FormatHeader("%s time=%s %s" % (tstop, tdelta, desc), |
112 |
color=colors.MAGENTA, mark=">")
|
113 |
|
114 |
|
115 |
def RunTestIf(testnames, fn, *args, **kwargs): |
116 |
"""Runs a test conditionally.
|
117 |
|
118 |
@param testnames: either a single test name in the configuration
|
119 |
file, or a list of testnames (which will be AND-ed together)
|
120 |
|
121 |
"""
|
122 |
if qa_config.TestEnabled(testnames):
|
123 |
RunTest(fn, *args, **kwargs) |
124 |
else:
|
125 |
tstart = datetime.datetime.now() |
126 |
desc = _DescriptionOf(fn) |
127 |
# TODO: Formatting test names when non-string names are involved
|
128 |
print _FormatHeader("%s skipping %s, test(s) %s disabled" % |
129 |
(tstart, desc, testnames), |
130 |
color=colors.BLUE, mark="*")
|
131 |
|
132 |
|
133 |
def RunTestBlock(fn, *args, **kwargs): |
134 |
"""Runs a block of tests after printing a header.
|
135 |
|
136 |
"""
|
137 |
tstart = datetime.datetime.now() |
138 |
|
139 |
desc = _DescriptionOf(fn) |
140 |
|
141 |
print
|
142 |
print _FormatHeader("BLOCK %s start %s" % (tstart, desc), |
143 |
color=[colors.YELLOW, colors.BOLD], mark="v")
|
144 |
|
145 |
try:
|
146 |
return fn(*args, **kwargs)
|
147 |
except Exception, e: |
148 |
print _FormatHeader("BLOCK FAILED %s: %s" % (desc, e), |
149 |
color=[colors.RED, colors.BOLD]) |
150 |
raise
|
151 |
finally:
|
152 |
tstop = datetime.datetime.now() |
153 |
tdelta = tstop - tstart |
154 |
print _FormatHeader("BLOCK %s time=%s %s" % (tstop, tdelta, desc), |
155 |
color=[colors.MAGENTA, colors.BOLD], mark="^")
|
156 |
|
157 |
|
158 |
def RunEnvTests(): |
159 |
"""Run several environment tests.
|
160 |
|
161 |
"""
|
162 |
RunTestIf("env", qa_env.TestSshConnection)
|
163 |
RunTestIf("env", qa_env.TestIcmpPing)
|
164 |
RunTestIf("env", qa_env.TestGanetiCommands)
|
165 |
|
166 |
|
167 |
def _LookupRapiSecret(rapi_user): |
168 |
"""Find the RAPI secret for the given user.
|
169 |
|
170 |
@param rapi_user: Login user
|
171 |
@return: Login secret for the user
|
172 |
|
173 |
"""
|
174 |
CTEXT = "{CLEARTEXT}"
|
175 |
master = qa_config.GetMasterNode() |
176 |
cmd = ["cat", qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE)]
|
177 |
file_content = qa_utils.GetCommandOutput(master.primary, |
178 |
utils.ShellQuoteArgs(cmd)) |
179 |
users = ParsePasswordFile(file_content) |
180 |
entry = users.get(rapi_user) |
181 |
if not entry: |
182 |
raise qa_error.Error("User %s not found in RAPI users file" % rapi_user) |
183 |
secret = entry.password |
184 |
if secret.upper().startswith(CTEXT):
|
185 |
secret = secret[len(CTEXT):]
|
186 |
elif secret.startswith("{"): |
187 |
raise qa_error.Error("Unsupported password schema for RAPI user %s:" |
188 |
" not a clear text password" % rapi_user)
|
189 |
return secret
|
190 |
|
191 |
|
192 |
def SetupCluster(rapi_user): |
193 |
"""Initializes the cluster.
|
194 |
|
195 |
@param rapi_user: Login user for RAPI
|
196 |
@return: Login secret for RAPI
|
197 |
|
198 |
"""
|
199 |
rapi_secret = utils.GenerateSecret() |
200 |
RunTestIf("create-cluster", qa_cluster.TestClusterInit,
|
201 |
rapi_user, rapi_secret) |
202 |
if not qa_config.TestEnabled("create-cluster"): |
203 |
# If the cluster is already in place, we assume that exclusive-storage is
|
204 |
# already set according to the configuration
|
205 |
qa_config.SetExclusiveStorage(qa_config.get("exclusive-storage", False)) |
206 |
if qa_rapi.Enabled():
|
207 |
# To support RAPI on an existing cluster we have to find out the secret
|
208 |
rapi_secret = _LookupRapiSecret(rapi_user) |
209 |
|
210 |
qa_group.ConfigureGroups() |
211 |
|
212 |
# Test on empty cluster
|
213 |
RunTestIf("node-list", qa_node.TestNodeList)
|
214 |
RunTestIf("instance-list", qa_instance.TestInstanceList)
|
215 |
RunTestIf("job-list", qa_job.TestJobList)
|
216 |
|
217 |
RunTestIf("create-cluster", qa_node.TestNodeAddAll)
|
218 |
if not qa_config.TestEnabled("create-cluster"): |
219 |
# consider the nodes are already there
|
220 |
qa_node.MarkNodeAddedAll() |
221 |
|
222 |
RunTestIf("test-jobqueue", qa_cluster.TestJobqueue)
|
223 |
|
224 |
# enable the watcher (unconditionally)
|
225 |
RunTest(qa_daemon.TestResumeWatcher) |
226 |
|
227 |
RunTestIf("node-list", qa_node.TestNodeList)
|
228 |
|
229 |
# Test listing fields
|
230 |
RunTestIf("node-list", qa_node.TestNodeListFields)
|
231 |
RunTestIf("instance-list", qa_instance.TestInstanceListFields)
|
232 |
RunTestIf("job-list", qa_job.TestJobListFields)
|
233 |
RunTestIf("instance-export", qa_instance.TestBackupListFields)
|
234 |
|
235 |
RunTestIf("node-info", qa_node.TestNodeInfo)
|
236 |
|
237 |
return rapi_secret
|
238 |
|
239 |
|
240 |
def RunClusterTests(): |
241 |
"""Runs tests related to gnt-cluster.
|
242 |
|
243 |
"""
|
244 |
for test, fn in [ |
245 |
("create-cluster", qa_cluster.TestClusterInitDisk),
|
246 |
("cluster-renew-crypto", qa_cluster.TestClusterRenewCrypto),
|
247 |
("cluster-verify", qa_cluster.TestClusterVerify),
|
248 |
("cluster-reserved-lvs", qa_cluster.TestClusterReservedLvs),
|
249 |
# TODO: add more cluster modify tests
|
250 |
("cluster-modify", qa_cluster.TestClusterModifyEmpty),
|
251 |
("cluster-modify", qa_cluster.TestClusterModifyIPolicy),
|
252 |
("cluster-modify", qa_cluster.TestClusterModifyISpecs),
|
253 |
("cluster-modify", qa_cluster.TestClusterModifyBe),
|
254 |
("cluster-modify", qa_cluster.TestClusterModifyDisk),
|
255 |
("cluster-modify", qa_cluster.TestClusterModifyDiskTemplates),
|
256 |
("cluster-modify", qa_cluster.TestClusterModifyFileStorageDir),
|
257 |
("cluster-modify", qa_cluster.TestClusterModifySharedFileStorageDir),
|
258 |
("cluster-rename", qa_cluster.TestClusterRename),
|
259 |
("cluster-info", qa_cluster.TestClusterVersion),
|
260 |
("cluster-info", qa_cluster.TestClusterInfo),
|
261 |
("cluster-info", qa_cluster.TestClusterGetmaster),
|
262 |
("cluster-redist-conf", qa_cluster.TestClusterRedistConf),
|
263 |
(["cluster-copyfile", qa_config.NoVirtualCluster],
|
264 |
qa_cluster.TestClusterCopyfile), |
265 |
("cluster-command", qa_cluster.TestClusterCommand),
|
266 |
("cluster-burnin", qa_cluster.TestClusterBurnin),
|
267 |
("cluster-master-failover", qa_cluster.TestClusterMasterFailover),
|
268 |
("cluster-master-failover",
|
269 |
qa_cluster.TestClusterMasterFailoverWithDrainedQueue), |
270 |
(["cluster-oob", qa_config.NoVirtualCluster],
|
271 |
qa_cluster.TestClusterOob), |
272 |
(qa_rapi.Enabled, qa_rapi.TestVersion), |
273 |
(qa_rapi.Enabled, qa_rapi.TestEmptyCluster), |
274 |
(qa_rapi.Enabled, qa_rapi.TestRapiQuery), |
275 |
]: |
276 |
RunTestIf(test, fn) |
277 |
|
278 |
|
279 |
def RunRepairDiskSizes(): |
280 |
"""Run the repair disk-sizes test.
|
281 |
|
282 |
"""
|
283 |
RunTestIf("cluster-repair-disk-sizes", qa_cluster.TestClusterRepairDiskSizes)
|
284 |
|
285 |
|
286 |
def RunOsTests(): |
287 |
"""Runs all tests related to gnt-os.
|
288 |
|
289 |
"""
|
290 |
os_enabled = ["os", qa_config.NoVirtualCluster]
|
291 |
|
292 |
if qa_config.TestEnabled(qa_rapi.Enabled):
|
293 |
rapi_getos = qa_rapi.GetOperatingSystems |
294 |
else:
|
295 |
rapi_getos = None
|
296 |
|
297 |
for fn in [ |
298 |
qa_os.TestOsList, |
299 |
qa_os.TestOsDiagnose, |
300 |
]: |
301 |
RunTestIf(os_enabled, fn) |
302 |
|
303 |
for fn in [ |
304 |
qa_os.TestOsValid, |
305 |
qa_os.TestOsInvalid, |
306 |
qa_os.TestOsPartiallyValid, |
307 |
]: |
308 |
RunTestIf(os_enabled, fn, rapi_getos) |
309 |
|
310 |
for fn in [ |
311 |
qa_os.TestOsModifyValid, |
312 |
qa_os.TestOsModifyInvalid, |
313 |
qa_os.TestOsStatesNonExisting, |
314 |
]: |
315 |
RunTestIf(os_enabled, fn) |
316 |
|
317 |
|
318 |
def RunCommonInstanceTests(instance, inst_nodes): |
319 |
"""Runs a few tests that are common to all disk types.
|
320 |
|
321 |
"""
|
322 |
RunTestIf("instance-shutdown", qa_instance.TestInstanceShutdown, instance)
|
323 |
RunTestIf(["instance-shutdown", "instance-console", qa_rapi.Enabled], |
324 |
qa_rapi.TestRapiStoppedInstanceConsole, instance) |
325 |
RunTestIf(["instance-shutdown", "instance-modify"], |
326 |
qa_instance.TestInstanceStoppedModify, instance) |
327 |
RunTestIf("instance-shutdown", qa_instance.TestInstanceStartup, instance)
|
328 |
|
329 |
# Test shutdown/start via RAPI
|
330 |
RunTestIf(["instance-shutdown", qa_rapi.Enabled],
|
331 |
qa_rapi.TestRapiInstanceShutdown, instance) |
332 |
RunTestIf(["instance-shutdown", qa_rapi.Enabled],
|
333 |
qa_rapi.TestRapiInstanceStartup, instance) |
334 |
|
335 |
RunTestIf("instance-list", qa_instance.TestInstanceList)
|
336 |
|
337 |
RunTestIf("instance-info", qa_instance.TestInstanceInfo, instance)
|
338 |
|
339 |
RunTestIf("instance-modify", qa_instance.TestInstanceModify, instance)
|
340 |
RunTestIf(["instance-modify", qa_rapi.Enabled],
|
341 |
qa_rapi.TestRapiInstanceModify, instance) |
342 |
|
343 |
RunTestIf("instance-console", qa_instance.TestInstanceConsole, instance)
|
344 |
RunTestIf(["instance-console", qa_rapi.Enabled],
|
345 |
qa_rapi.TestRapiInstanceConsole, instance) |
346 |
|
347 |
RunTestIf("instance-device-names", qa_instance.TestInstanceDeviceNames,
|
348 |
instance) |
349 |
DOWN_TESTS = qa_config.Either([ |
350 |
"instance-reinstall",
|
351 |
"instance-rename",
|
352 |
"instance-grow-disk",
|
353 |
]) |
354 |
|
355 |
# shutdown instance for any 'down' tests
|
356 |
RunTestIf(DOWN_TESTS, qa_instance.TestInstanceShutdown, instance) |
357 |
|
358 |
# now run the 'down' state tests
|
359 |
RunTestIf("instance-reinstall", qa_instance.TestInstanceReinstall, instance)
|
360 |
RunTestIf(["instance-reinstall", qa_rapi.Enabled],
|
361 |
qa_rapi.TestRapiInstanceReinstall, instance) |
362 |
|
363 |
if qa_config.TestEnabled("instance-rename"): |
364 |
tgt_instance = qa_config.AcquireInstance() |
365 |
try:
|
366 |
rename_source = instance.name |
367 |
rename_target = tgt_instance.name |
368 |
# perform instance rename to the same name
|
369 |
RunTest(qa_instance.TestInstanceRenameAndBack, |
370 |
rename_source, rename_source) |
371 |
RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceRenameAndBack, |
372 |
rename_source, rename_source) |
373 |
if rename_target is not None: |
374 |
# perform instance rename to a different name, if we have one configured
|
375 |
RunTest(qa_instance.TestInstanceRenameAndBack, |
376 |
rename_source, rename_target) |
377 |
RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceRenameAndBack, |
378 |
rename_source, rename_target) |
379 |
finally:
|
380 |
tgt_instance.Release() |
381 |
|
382 |
RunTestIf(["instance-grow-disk"], qa_instance.TestInstanceGrowDisk, instance)
|
383 |
|
384 |
# and now start the instance again
|
385 |
RunTestIf(DOWN_TESTS, qa_instance.TestInstanceStartup, instance) |
386 |
|
387 |
RunTestIf("instance-reboot", qa_instance.TestInstanceReboot, instance)
|
388 |
|
389 |
RunTestIf("tags", qa_tags.TestInstanceTags, instance)
|
390 |
|
391 |
if instance.disk_template == constants.DT_DRBD8:
|
392 |
RunTestIf("cluster-verify",
|
393 |
qa_cluster.TestClusterVerifyDisksBrokenDRBD, instance, inst_nodes) |
394 |
RunTestIf("cluster-verify", qa_cluster.TestClusterVerify)
|
395 |
|
396 |
RunTestIf(qa_rapi.Enabled, qa_rapi.TestInstance, instance) |
397 |
|
398 |
# Lists instances, too
|
399 |
RunTestIf("node-list", qa_node.TestNodeList)
|
400 |
|
401 |
# Some jobs have been run, let's test listing them
|
402 |
RunTestIf("job-list", qa_job.TestJobList)
|
403 |
|
404 |
|
405 |
def RunCommonNodeTests(): |
406 |
"""Run a few common node tests.
|
407 |
|
408 |
"""
|
409 |
RunTestIf("node-volumes", qa_node.TestNodeVolumes)
|
410 |
RunTestIf("node-storage", qa_node.TestNodeStorage)
|
411 |
RunTestIf(["node-oob", qa_config.NoVirtualCluster], qa_node.TestOutOfBand)
|
412 |
|
413 |
|
414 |
def RunGroupListTests(): |
415 |
"""Run tests for listing node groups.
|
416 |
|
417 |
"""
|
418 |
RunTestIf("group-list", qa_group.TestGroupList)
|
419 |
RunTestIf("group-list", qa_group.TestGroupListFields)
|
420 |
|
421 |
|
422 |
def RunNetworkTests(): |
423 |
"""Run tests for network management.
|
424 |
|
425 |
"""
|
426 |
RunTestIf("network", qa_network.TestNetworkAddRemove)
|
427 |
RunTestIf("network", qa_network.TestNetworkConnect)
|
428 |
|
429 |
|
430 |
def RunGroupRwTests(): |
431 |
"""Run tests for adding/removing/renaming groups.
|
432 |
|
433 |
"""
|
434 |
RunTestIf("group-rwops", qa_group.TestGroupAddRemoveRename)
|
435 |
RunTestIf("group-rwops", qa_group.TestGroupAddWithOptions)
|
436 |
RunTestIf("group-rwops", qa_group.TestGroupModify)
|
437 |
RunTestIf(["group-rwops", qa_rapi.Enabled], qa_rapi.TestRapiNodeGroups)
|
438 |
RunTestIf(["group-rwops", "tags"], qa_tags.TestGroupTags, |
439 |
qa_group.GetDefaultGroup()) |
440 |
|
441 |
|
442 |
def RunExportImportTests(instance, inodes): |
443 |
"""Tries to export and import the instance.
|
444 |
|
445 |
@type inodes: list of nodes
|
446 |
@param inodes: current nodes of the instance
|
447 |
|
448 |
"""
|
449 |
# FIXME: export explicitly bails out on file based storage. other non-lvm
|
450 |
# based storage types are untested, though. Also note that import could still
|
451 |
# work, but is deeply embedded into the "export" case.
|
452 |
if (qa_config.TestEnabled("instance-export") and |
453 |
instance.disk_template not in constants.DTS_FILEBASED): |
454 |
RunTest(qa_instance.TestInstanceExportNoTarget, instance) |
455 |
|
456 |
pnode = inodes[0]
|
457 |
expnode = qa_config.AcquireNode(exclude=pnode) |
458 |
try:
|
459 |
name = RunTest(qa_instance.TestInstanceExport, instance, expnode) |
460 |
|
461 |
RunTest(qa_instance.TestBackupList, expnode) |
462 |
|
463 |
if qa_config.TestEnabled("instance-import"): |
464 |
newinst = qa_config.AcquireInstance() |
465 |
try:
|
466 |
RunTest(qa_instance.TestInstanceImport, newinst, pnode, |
467 |
expnode, name) |
468 |
# Check if starting the instance works
|
469 |
RunTest(qa_instance.TestInstanceStartup, newinst) |
470 |
RunTest(qa_instance.TestInstanceRemove, newinst) |
471 |
finally:
|
472 |
newinst.Release() |
473 |
finally:
|
474 |
expnode.Release() |
475 |
|
476 |
# FIXME: inter-cluster-instance-move crashes on file based instances :/
|
477 |
# See Issue 414.
|
478 |
if (qa_config.TestEnabled([qa_rapi.Enabled, "inter-cluster-instance-move"]) |
479 |
and (instance.disk_template not in constants.DTS_FILEBASED)): |
480 |
newinst = qa_config.AcquireInstance() |
481 |
try:
|
482 |
tnode = qa_config.AcquireNode(exclude=inodes) |
483 |
try:
|
484 |
RunTest(qa_rapi.TestInterClusterInstanceMove, instance, newinst, |
485 |
inodes, tnode) |
486 |
finally:
|
487 |
tnode.Release() |
488 |
finally:
|
489 |
newinst.Release() |
490 |
|
491 |
|
492 |
def RunDaemonTests(instance): |
493 |
"""Test the ganeti-watcher script.
|
494 |
|
495 |
"""
|
496 |
RunTest(qa_daemon.TestPauseWatcher) |
497 |
|
498 |
RunTestIf("instance-automatic-restart",
|
499 |
qa_daemon.TestInstanceAutomaticRestart, instance) |
500 |
RunTestIf("instance-consecutive-failures",
|
501 |
qa_daemon.TestInstanceConsecutiveFailures, instance) |
502 |
|
503 |
RunTest(qa_daemon.TestResumeWatcher) |
504 |
|
505 |
|
506 |
def RunHardwareFailureTests(instance, inodes): |
507 |
"""Test cluster internal hardware failure recovery.
|
508 |
|
509 |
"""
|
510 |
RunTestIf("instance-failover", qa_instance.TestInstanceFailover, instance)
|
511 |
RunTestIf(["instance-failover", qa_rapi.Enabled],
|
512 |
qa_rapi.TestRapiInstanceFailover, instance) |
513 |
|
514 |
RunTestIf("instance-migrate", qa_instance.TestInstanceMigrate, instance)
|
515 |
RunTestIf(["instance-migrate", qa_rapi.Enabled],
|
516 |
qa_rapi.TestRapiInstanceMigrate, instance) |
517 |
|
518 |
if qa_config.TestEnabled("instance-replace-disks"): |
519 |
# We just need alternative secondary nodes, hence "- 1"
|
520 |
othernodes = qa_config.AcquireManyNodes(len(inodes) - 1, exclude=inodes) |
521 |
try:
|
522 |
RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceReplaceDisks, instance) |
523 |
RunTest(qa_instance.TestReplaceDisks, |
524 |
instance, inodes, othernodes) |
525 |
finally:
|
526 |
qa_config.ReleaseManyNodes(othernodes) |
527 |
del othernodes
|
528 |
|
529 |
if qa_config.TestEnabled("instance-recreate-disks"): |
530 |
try:
|
531 |
acquirednodes = qa_config.AcquireManyNodes(len(inodes), exclude=inodes)
|
532 |
othernodes = acquirednodes |
533 |
except qa_error.OutOfNodesError:
|
534 |
if len(inodes) > 1: |
535 |
# If the cluster is not big enough, let's reuse some of the nodes, but
|
536 |
# with different roles. In this way, we can test a DRBD instance even on
|
537 |
# a 3-node cluster.
|
538 |
acquirednodes = [qa_config.AcquireNode(exclude=inodes)] |
539 |
othernodes = acquirednodes + inodes[:-1]
|
540 |
else:
|
541 |
raise
|
542 |
try:
|
543 |
RunTest(qa_instance.TestRecreateDisks, |
544 |
instance, inodes, othernodes) |
545 |
finally:
|
546 |
qa_config.ReleaseManyNodes(acquirednodes) |
547 |
|
548 |
if len(inodes) >= 2: |
549 |
RunTestIf("node-evacuate", qa_node.TestNodeEvacuate, inodes[0], inodes[1]) |
550 |
RunTestIf("node-failover", qa_node.TestNodeFailover, inodes[0], inodes[1]) |
551 |
RunTestIf("node-migrate", qa_node.TestNodeMigrate, inodes[0], inodes[1]) |
552 |
|
553 |
|
554 |
def RunExclusiveStorageTests(): |
555 |
"""Test exclusive storage."""
|
556 |
if not qa_config.TestEnabled("cluster-exclusive-storage"): |
557 |
return
|
558 |
|
559 |
node = qa_config.AcquireNode() |
560 |
try:
|
561 |
old_es = qa_cluster.TestSetExclStorCluster(False)
|
562 |
qa_node.TestExclStorSingleNode(node) |
563 |
|
564 |
qa_cluster.TestSetExclStorCluster(True)
|
565 |
qa_cluster.TestExclStorSharedPv(node) |
566 |
|
567 |
if qa_config.TestEnabled("instance-add-plain-disk"): |
568 |
# Make sure that the cluster doesn't have any pre-existing problem
|
569 |
qa_cluster.AssertClusterVerify() |
570 |
|
571 |
# Create and allocate instances
|
572 |
instance1 = qa_instance.TestInstanceAddWithPlainDisk([node]) |
573 |
try:
|
574 |
instance2 = qa_instance.TestInstanceAddWithPlainDisk([node]) |
575 |
try:
|
576 |
# cluster-verify checks that disks are allocated correctly
|
577 |
qa_cluster.AssertClusterVerify() |
578 |
|
579 |
# Remove instances
|
580 |
qa_instance.TestInstanceRemove(instance2) |
581 |
qa_instance.TestInstanceRemove(instance1) |
582 |
finally:
|
583 |
instance2.Release() |
584 |
finally:
|
585 |
instance1.Release() |
586 |
|
587 |
if qa_config.TestEnabled("instance-add-drbd-disk"): |
588 |
snode = qa_config.AcquireNode() |
589 |
try:
|
590 |
qa_cluster.TestSetExclStorCluster(False)
|
591 |
instance = qa_instance.TestInstanceAddWithDrbdDisk([node, snode]) |
592 |
try:
|
593 |
qa_cluster.TestSetExclStorCluster(True)
|
594 |
exp_err = [constants.CV_EINSTANCEUNSUITABLENODE] |
595 |
qa_cluster.AssertClusterVerify(fail=True, errors=exp_err)
|
596 |
qa_instance.TestInstanceRemove(instance) |
597 |
finally:
|
598 |
instance.Release() |
599 |
finally:
|
600 |
snode.Release() |
601 |
qa_cluster.TestSetExclStorCluster(old_es) |
602 |
finally:
|
603 |
node.Release() |
604 |
|
605 |
|
606 |
def RunCustomSshPortTests(): |
607 |
"""Test accessing nodes with custom SSH ports.
|
608 |
|
609 |
This requires removing nodes, adding them to a new group, and then undoing
|
610 |
the change.
|
611 |
"""
|
612 |
if not qa_config.TestEnabled("group-custom-ssh-port"): |
613 |
return
|
614 |
|
615 |
port = 211
|
616 |
master = qa_config.GetMasterNode() |
617 |
with qa_config.AcquireManyNodesCtx(1, exclude=master) as nodes: |
618 |
for node in nodes: |
619 |
qa_node.NodeRemove(node) |
620 |
with qa_iptables.RulesContext(nodes) as r: |
621 |
with qa_group.NewGroupCtx() as group: |
622 |
qa_group.ModifyGroupSshPort(r, group, nodes, port) |
623 |
|
624 |
for node in nodes: |
625 |
qa_node.NodeAdd(node, group=group) |
626 |
|
627 |
# Make sure that the cluster doesn't have any pre-existing problem
|
628 |
qa_cluster.AssertClusterVerify() |
629 |
|
630 |
# Create and allocate instances
|
631 |
instance1 = qa_instance.TestInstanceAddWithPlainDisk(nodes) |
632 |
try:
|
633 |
instance2 = qa_instance.TestInstanceAddWithPlainDisk(nodes) |
634 |
try:
|
635 |
# cluster-verify checks that disks are allocated correctly
|
636 |
qa_cluster.AssertClusterVerify() |
637 |
|
638 |
# Remove instances
|
639 |
qa_instance.TestInstanceRemove(instance2) |
640 |
qa_instance.TestInstanceRemove(instance1) |
641 |
finally:
|
642 |
instance2.Release() |
643 |
finally:
|
644 |
instance1.Release() |
645 |
|
646 |
for node in nodes: |
647 |
qa_node.NodeRemove(node) |
648 |
|
649 |
for node in nodes: |
650 |
qa_node.NodeAdd(node) |
651 |
|
652 |
qa_cluster.AssertClusterVerify() |
653 |
|
654 |
|
655 |
def _BuildSpecDict(par, mn, st, mx): |
656 |
return {
|
657 |
constants.ISPECS_MINMAX: [{ |
658 |
constants.ISPECS_MIN: {par: mn}, |
659 |
constants.ISPECS_MAX: {par: mx}, |
660 |
}], |
661 |
constants.ISPECS_STD: {par: st}, |
662 |
} |
663 |
|
664 |
|
665 |
def _BuildDoubleSpecDict(index, par, mn, st, mx): |
666 |
new_spec = { |
667 |
constants.ISPECS_MINMAX: [{}, {}], |
668 |
} |
669 |
if st is not None: |
670 |
new_spec[constants.ISPECS_STD] = {par: st} |
671 |
new_spec[constants.ISPECS_MINMAX][index] = { |
672 |
constants.ISPECS_MIN: {par: mn}, |
673 |
constants.ISPECS_MAX: {par: mx}, |
674 |
} |
675 |
return new_spec
|
676 |
|
677 |
|
678 |
def TestIPolicyPlainInstance(): |
679 |
"""Test instance policy interaction with instances"""
|
680 |
params = ["memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"] |
681 |
if not qa_config.IsTemplateSupported(constants.DT_PLAIN): |
682 |
print "Template %s not supported" % constants.DT_PLAIN |
683 |
return
|
684 |
|
685 |
# This test assumes that the group policy is empty
|
686 |
(_, old_specs) = qa_cluster.TestClusterSetISpecs() |
687 |
# We also assume to have only one min/max bound
|
688 |
assert len(old_specs[constants.ISPECS_MINMAX]) == 1 |
689 |
node = qa_config.AcquireNode() |
690 |
try:
|
691 |
# Log of policy changes, list of tuples:
|
692 |
# (full_change, incremental_change, policy_violated)
|
693 |
history = [] |
694 |
instance = qa_instance.TestInstanceAddWithPlainDisk([node]) |
695 |
try:
|
696 |
policyerror = [constants.CV_EINSTANCEPOLICY] |
697 |
for par in params: |
698 |
(iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par) |
699 |
# Some specs must be multiple of 4
|
700 |
new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4) |
701 |
history.append((None, new_spec, True)) |
702 |
if iminval > 0: |
703 |
# Some specs must be multiple of 4
|
704 |
if iminval >= 4: |
705 |
upper = iminval - 4
|
706 |
else:
|
707 |
upper = iminval - 1
|
708 |
new_spec = _BuildSpecDict(par, 0, upper, upper)
|
709 |
history.append((None, new_spec, True)) |
710 |
history.append((old_specs, None, False)) |
711 |
|
712 |
# Test with two instance specs
|
713 |
double_specs = copy.deepcopy(old_specs) |
714 |
double_specs[constants.ISPECS_MINMAX] = \ |
715 |
double_specs[constants.ISPECS_MINMAX] * 2
|
716 |
(par1, par2) = params[0:2] |
717 |
(_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1) |
718 |
(_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2) |
719 |
old_minmax = old_specs[constants.ISPECS_MINMAX][0]
|
720 |
history.extend([ |
721 |
(double_specs, None, False), |
722 |
# The first min/max limit is being violated
|
723 |
(None,
|
724 |
_BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4, |
725 |
imaxval1 + 4),
|
726 |
False),
|
727 |
# Both min/max limits are being violated
|
728 |
(None,
|
729 |
_BuildDoubleSpecDict(1, par2, imaxval2 + 4, None, imaxval2 + 4), |
730 |
True),
|
731 |
# The second min/max limit is being violated
|
732 |
(None,
|
733 |
_BuildDoubleSpecDict(0, par1,
|
734 |
old_minmax[constants.ISPECS_MIN][par1], |
735 |
old_specs[constants.ISPECS_STD][par1], |
736 |
old_minmax[constants.ISPECS_MAX][par1]), |
737 |
False),
|
738 |
(old_specs, None, False), |
739 |
]) |
740 |
|
741 |
# Apply the changes, and check policy violations after each change
|
742 |
qa_cluster.AssertClusterVerify() |
743 |
for (new_specs, diff_specs, failed) in history: |
744 |
qa_cluster.TestClusterSetISpecs(new_specs=new_specs, |
745 |
diff_specs=diff_specs) |
746 |
if failed:
|
747 |
qa_cluster.AssertClusterVerify(warnings=policyerror) |
748 |
else:
|
749 |
qa_cluster.AssertClusterVerify() |
750 |
|
751 |
qa_instance.TestInstanceRemove(instance) |
752 |
finally:
|
753 |
instance.Release() |
754 |
|
755 |
# Now we replay the same policy changes, and we expect that the instance
|
756 |
# cannot be created for the cases where we had a policy violation above
|
757 |
for (new_specs, diff_specs, failed) in history: |
758 |
qa_cluster.TestClusterSetISpecs(new_specs=new_specs, |
759 |
diff_specs=diff_specs) |
760 |
if failed:
|
761 |
qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
|
762 |
# Instance creation with no policy violation has been tested already
|
763 |
finally:
|
764 |
node.Release() |
765 |
|
766 |
|
767 |
def IsExclusiveStorageInstanceTestEnabled(): |
768 |
test_name = "exclusive-storage-instance-tests"
|
769 |
if qa_config.TestEnabled(test_name):
|
770 |
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
|
771 |
vgscmd = utils.ShellQuoteArgs([ |
772 |
"vgs", "--noheadings", "-o", "pv_count", vgname, |
773 |
]) |
774 |
nodes = qa_config.GetConfig()["nodes"]
|
775 |
for node in nodes: |
776 |
try:
|
777 |
pvnum = int(qa_utils.GetCommandOutput(node.primary, vgscmd))
|
778 |
except Exception, e: |
779 |
msg = ("Cannot get the number of PVs on %s, needed by '%s': %s" %
|
780 |
(node.primary, test_name, e)) |
781 |
raise qa_error.Error(msg)
|
782 |
if pvnum < 2: |
783 |
raise qa_error.Error("Node %s has not enough PVs (%s) to run '%s'" % |
784 |
(node.primary, pvnum, test_name)) |
785 |
res = True
|
786 |
else:
|
787 |
res = False
|
788 |
return res
|
789 |
|
790 |
|
791 |
def RunInstanceTests(): |
792 |
"""Create and exercise instances."""
|
793 |
|
794 |
for (test_name, templ, create_fun, num_nodes) in \ |
795 |
qa_instance.available_instance_tests: |
796 |
if (qa_config.TestEnabled(test_name) and |
797 |
qa_config.IsTemplateSupported(templ)): |
798 |
inodes = qa_config.AcquireManyNodes(num_nodes) |
799 |
try:
|
800 |
instance = RunTest(create_fun, inodes) |
801 |
try:
|
802 |
RunTestIf("instance-user-down", qa_instance.TestInstanceUserDown,
|
803 |
instance, qa_config.GetMasterNode()) |
804 |
RunTestIf("cluster-epo", qa_cluster.TestClusterEpo)
|
805 |
RunDaemonTests(instance) |
806 |
for node in inodes: |
807 |
RunTestIf("haskell-confd", qa_node.TestNodeListDrbd, node)
|
808 |
if len(inodes) > 1: |
809 |
RunTestIf("group-rwops", qa_group.TestAssignNodesIncludingSplit,
|
810 |
constants.INITIAL_NODE_GROUP_NAME, |
811 |
inodes[0].primary, inodes[1].primary) |
812 |
if qa_config.TestEnabled("instance-convert-disk"): |
813 |
RunTest(qa_instance.TestInstanceShutdown, instance) |
814 |
RunTest(qa_instance.TestInstanceConvertDiskToPlain, |
815 |
instance, inodes) |
816 |
RunTest(qa_instance.TestInstanceStartup, instance) |
817 |
RunTestIf("instance-modify-disks",
|
818 |
qa_instance.TestInstanceModifyDisks, instance) |
819 |
RunCommonInstanceTests(instance, inodes) |
820 |
if qa_config.TestEnabled("instance-modify-primary"): |
821 |
othernode = qa_config.AcquireNode() |
822 |
RunTest(qa_instance.TestInstanceModifyPrimaryAndBack, |
823 |
instance, inodes[0], othernode)
|
824 |
othernode.Release() |
825 |
RunGroupListTests() |
826 |
RunExportImportTests(instance, inodes) |
827 |
RunHardwareFailureTests(instance, inodes) |
828 |
RunRepairDiskSizes() |
829 |
RunTest(qa_instance.TestInstanceRemove, instance) |
830 |
finally:
|
831 |
instance.Release() |
832 |
del instance
|
833 |
finally:
|
834 |
qa_config.ReleaseManyNodes(inodes) |
835 |
qa_cluster.AssertClusterVerify() |
836 |
|
837 |
|
838 |
def RunMonitoringTests(): |
839 |
if qa_config.TestEnabled("mon-collector"): |
840 |
RunTest(qa_monitoring.TestInstStatusCollector) |
841 |
|
842 |
|
843 |
def RunQa(): |
844 |
"""Main QA body.
|
845 |
|
846 |
"""
|
847 |
rapi_user = "ganeti-qa"
|
848 |
|
849 |
RunTestBlock(RunEnvTests) |
850 |
rapi_secret = SetupCluster(rapi_user) |
851 |
|
852 |
if qa_rapi.Enabled():
|
853 |
# Load RAPI certificate
|
854 |
qa_rapi.Setup(rapi_user, rapi_secret) |
855 |
|
856 |
RunTestBlock(RunClusterTests) |
857 |
RunTestBlock(RunOsTests) |
858 |
|
859 |
RunTestIf("tags", qa_tags.TestClusterTags)
|
860 |
|
861 |
RunTestBlock(RunCommonNodeTests) |
862 |
RunTestBlock(RunGroupListTests) |
863 |
RunTestBlock(RunGroupRwTests) |
864 |
RunTestBlock(RunNetworkTests) |
865 |
|
866 |
# The master shouldn't be readded or put offline; "delay" needs a non-master
|
867 |
# node to test
|
868 |
pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode()) |
869 |
try:
|
870 |
RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
|
871 |
RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
|
872 |
RunTestIf("delay", qa_cluster.TestDelay, pnode)
|
873 |
finally:
|
874 |
pnode.Release() |
875 |
|
876 |
# Make sure the cluster is clean before running instance tests
|
877 |
qa_cluster.AssertClusterVerify() |
878 |
|
879 |
pnode = qa_config.AcquireNode() |
880 |
try:
|
881 |
RunTestIf("tags", qa_tags.TestNodeTags, pnode)
|
882 |
|
883 |
if qa_rapi.Enabled():
|
884 |
RunTest(qa_rapi.TestNode, pnode) |
885 |
|
886 |
if (qa_config.TestEnabled("instance-add-plain-disk") |
887 |
and qa_config.IsTemplateSupported(constants.DT_PLAIN)):
|
888 |
for use_client in [True, False]: |
889 |
rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode, |
890 |
use_client) |
891 |
try:
|
892 |
if qa_config.TestEnabled("instance-plain-rapi-common-tests"): |
893 |
RunCommonInstanceTests(rapi_instance, [pnode]) |
894 |
RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance, use_client) |
895 |
finally:
|
896 |
rapi_instance.Release() |
897 |
del rapi_instance
|
898 |
|
899 |
finally:
|
900 |
pnode.Release() |
901 |
|
902 |
config_list = [ |
903 |
("default-instance-tests", lambda: None, lambda _: None), |
904 |
(IsExclusiveStorageInstanceTestEnabled, |
905 |
lambda: qa_cluster.TestSetExclStorCluster(True), |
906 |
qa_cluster.TestSetExclStorCluster), |
907 |
] |
908 |
for (conf_name, setup_conf_f, restore_conf_f) in config_list: |
909 |
if qa_config.TestEnabled(conf_name):
|
910 |
oldconf = setup_conf_f() |
911 |
RunTestBlock(RunInstanceTests) |
912 |
restore_conf_f(oldconf) |
913 |
|
914 |
pnode = qa_config.AcquireNode() |
915 |
try:
|
916 |
if qa_config.TestEnabled(["instance-add-plain-disk", "instance-export"]): |
917 |
for shutdown in [False, True]: |
918 |
instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk, [pnode]) |
919 |
try:
|
920 |
expnode = qa_config.AcquireNode(exclude=pnode) |
921 |
try:
|
922 |
if shutdown:
|
923 |
# Stop instance before exporting and removing it
|
924 |
RunTest(qa_instance.TestInstanceShutdown, instance) |
925 |
RunTest(qa_instance.TestInstanceExportWithRemove, instance, expnode) |
926 |
RunTest(qa_instance.TestBackupList, expnode) |
927 |
finally:
|
928 |
expnode.Release() |
929 |
finally:
|
930 |
instance.Release() |
931 |
del expnode
|
932 |
del instance
|
933 |
qa_cluster.AssertClusterVerify() |
934 |
|
935 |
finally:
|
936 |
pnode.Release() |
937 |
|
938 |
RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade)
|
939 |
|
940 |
RunTestBlock(RunExclusiveStorageTests) |
941 |
RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"], |
942 |
TestIPolicyPlainInstance) |
943 |
|
944 |
RunTestBlock(RunCustomSshPortTests) |
945 |
|
946 |
RunTestIf( |
947 |
"instance-add-restricted-by-disktemplates",
|
948 |
qa_instance.TestInstanceCreationRestrictedByDiskTemplates) |
949 |
|
950 |
# Test removing instance with offline drbd secondary
|
951 |
if qa_config.TestEnabled(["instance-remove-drbd-offline", |
952 |
"instance-add-drbd-disk"]):
|
953 |
# Make sure the master is not put offline
|
954 |
snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode()) |
955 |
try:
|
956 |
pnode = qa_config.AcquireNode(exclude=snode) |
957 |
try:
|
958 |
instance = qa_instance.TestInstanceAddWithDrbdDisk([pnode, snode]) |
959 |
set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes") |
960 |
set_online = lambda node: qa_node.MakeNodeOffline(node, "no") |
961 |
RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance, snode, |
962 |
set_offline, set_online) |
963 |
finally:
|
964 |
pnode.Release() |
965 |
finally:
|
966 |
snode.Release() |
967 |
qa_cluster.AssertClusterVerify() |
968 |
|
969 |
RunTestBlock(RunMonitoringTests) |
970 |
|
971 |
RunTestIf("create-cluster", qa_node.TestNodeRemoveAll)
|
972 |
|
973 |
RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
|
974 |
|
975 |
|
976 |
@UsesRapiClient
|
977 |
def main(): |
978 |
"""Main program.
|
979 |
|
980 |
"""
|
981 |
colors.check_for_colors() |
982 |
|
983 |
parser = optparse.OptionParser(usage="%prog [options] <config-file>")
|
984 |
parser.add_option("--yes-do-it", dest="yes_do_it", |
985 |
action="store_true",
|
986 |
help="Really execute the tests")
|
987 |
(opts, args) = parser.parse_args() |
988 |
|
989 |
if len(args) == 1: |
990 |
(config_file, ) = args |
991 |
else:
|
992 |
parser.error("Wrong number of arguments.")
|
993 |
|
994 |
if not opts.yes_do_it: |
995 |
print ("Executing this script irreversibly destroys any Ganeti\n" |
996 |
"configuration on all nodes involved. If you really want\n"
|
997 |
"to start testing, supply the --yes-do-it option.")
|
998 |
sys.exit(1)
|
999 |
|
1000 |
qa_config.Load(config_file) |
1001 |
|
1002 |
primary = qa_config.GetMasterNode().primary |
1003 |
qa_utils.StartMultiplexer(primary) |
1004 |
print ("SSH command for primary node: %s" % |
1005 |
utils.ShellQuoteArgs(qa_utils.GetSSHCommand(primary, "")))
|
1006 |
print ("SSH command for other nodes: %s" % |
1007 |
utils.ShellQuoteArgs(qa_utils.GetSSHCommand("NODE", ""))) |
1008 |
try:
|
1009 |
RunQa() |
1010 |
finally:
|
1011 |
qa_utils.CloseMultiplexers() |
1012 |
|
1013 |
if __name__ == "__main__": |
1014 |
main() |