root / qa / qa_instance.py @ 9bd52012
History | View | Annotate | Download (49.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Instance related QA tests.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import os |
27 |
import re |
28 |
import time |
29 |
|
30 |
from ganeti import utils |
31 |
from ganeti import constants |
32 |
from ganeti import pathutils |
33 |
from ganeti import query |
34 |
from ganeti.netutils import IP4Address |
35 |
|
36 |
import qa_config |
37 |
import qa_daemon |
38 |
import qa_utils |
39 |
import qa_error |
40 |
|
41 |
from qa_utils import AssertCommand, AssertEqual, AssertIn |
42 |
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE |
43 |
from qa_instance_utils import CheckSsconfInstanceList, \ |
44 |
CreateInstanceDrbd8, \
|
45 |
CreateInstanceByDiskTemplate, \
|
46 |
CreateInstanceByDiskTemplateOneNode, \
|
47 |
GetGenericAddParameters
|
48 |
|
49 |
|
50 |
def _GetDiskStatePath(disk): |
51 |
return "/sys/block/%s/device/state" % disk |
52 |
|
53 |
|
54 |
def GetInstanceInfo(instance): |
55 |
"""Return information about the actual state of an instance.
|
56 |
|
57 |
@type instance: string
|
58 |
@param instance: the instance name
|
59 |
@return: a dictionary with the following keys:
|
60 |
- "nodes": instance nodes, a list of strings
|
61 |
- "volumes": instance volume IDs, a list of strings
|
62 |
- "drbd-minors": DRBD minors used by the instance, a dictionary where
|
63 |
keys are nodes, and values are lists of integers (or an empty
|
64 |
dictionary for non-DRBD instances)
|
65 |
- "disk-template": instance disk template
|
66 |
- "storage-type": storage type associated with the instance disk template
|
67 |
|
68 |
"""
|
69 |
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
|
70 |
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
|
71 |
# node1.fqdn
|
72 |
# node2.fqdn,node3.fqdn
|
73 |
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
|
74 |
# FIXME This works with no more than 2 secondaries
|
75 |
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$") |
76 |
|
77 |
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0] |
78 |
nodes = [] |
79 |
for nodeinfo in info["Nodes"]: |
80 |
if "primary" in nodeinfo: |
81 |
nodes.append(nodeinfo["primary"])
|
82 |
elif "secondaries" in nodeinfo: |
83 |
nodestr = nodeinfo["secondaries"]
|
84 |
if nodestr:
|
85 |
m = re_nodelist.match(nodestr) |
86 |
if m:
|
87 |
nodes.extend(filter(None, m.groups())) |
88 |
else:
|
89 |
nodes.append(nodestr) |
90 |
|
91 |
disk_template = info["Disk template"]
|
92 |
if not disk_template: |
93 |
raise qa_error.Error("Can't get instance disk template") |
94 |
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] |
95 |
|
96 |
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
|
97 |
vols = [] |
98 |
drbd_min = {} |
99 |
for (count, diskinfo) in enumerate(info["Disks"]): |
100 |
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1) |
101 |
if dtype == constants.DT_DRBD8:
|
102 |
for child in diskinfo["child devices"]: |
103 |
vols.append(child["logical_id"])
|
104 |
for key in ["nodeA", "nodeB"]: |
105 |
m = re_drbdnode.match(diskinfo[key]) |
106 |
if not m: |
107 |
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key]) |
108 |
node = m.group(1)
|
109 |
minor = int(m.group(2)) |
110 |
minorlist = drbd_min.setdefault(node, []) |
111 |
minorlist.append(minor) |
112 |
elif dtype == constants.DT_PLAIN:
|
113 |
vols.append(diskinfo["logical_id"])
|
114 |
|
115 |
assert nodes
|
116 |
assert len(nodes) < 2 or vols |
117 |
return {
|
118 |
"nodes": nodes,
|
119 |
"volumes": vols,
|
120 |
"drbd-minors": drbd_min,
|
121 |
"disk-template": disk_template,
|
122 |
"storage-type": storage_type,
|
123 |
} |
124 |
|
125 |
|
126 |
def _DestroyInstanceDisks(instance): |
127 |
"""Remove all the backend disks of an instance.
|
128 |
|
129 |
This is used to simulate HW errors (dead nodes, broken disks...); the
|
130 |
configuration of the instance is not affected.
|
131 |
@type instance: dictionary
|
132 |
@param instance: the instance
|
133 |
|
134 |
"""
|
135 |
info = GetInstanceInfo(instance.name) |
136 |
# FIXME: destruction/removal should be part of the disk class
|
137 |
if info["storage-type"] == constants.ST_LVM_VG: |
138 |
vols = info["volumes"]
|
139 |
for node in info["nodes"]: |
140 |
AssertCommand(["lvremove", "-f"] + vols, node=node) |
141 |
elif info["storage-type"] in (constants.ST_FILE, constants.ST_SHARED_FILE): |
142 |
# Note that this works for both file and sharedfile, and this is intended.
|
143 |
storage_dir = qa_config.get("file-storage-dir",
|
144 |
pathutils.DEFAULT_FILE_STORAGE_DIR) |
145 |
idir = os.path.join(storage_dir, instance.name) |
146 |
for node in info["nodes"]: |
147 |
AssertCommand(["rm", "-rf", idir], node=node) |
148 |
elif info["storage-type"] == constants.ST_DISKLESS: |
149 |
pass
|
150 |
|
151 |
|
152 |
def _GetInstanceField(instance, field): |
153 |
"""Get the value of a field of an instance.
|
154 |
|
155 |
@type instance: string
|
156 |
@param instance: Instance name
|
157 |
@type field: string
|
158 |
@param field: Name of the field
|
159 |
@rtype: string
|
160 |
|
161 |
"""
|
162 |
master = qa_config.GetMasterNode() |
163 |
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers", |
164 |
"--units", "m", "-o", field, instance]) |
165 |
return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
|
166 |
|
167 |
|
168 |
def _GetBoolInstanceField(instance, field): |
169 |
"""Get the Boolean value of a field of an instance.
|
170 |
|
171 |
@type instance: string
|
172 |
@param instance: Instance name
|
173 |
@type field: string
|
174 |
@param field: Name of the field
|
175 |
@rtype: bool
|
176 |
|
177 |
"""
|
178 |
info_out = _GetInstanceField(instance, field) |
179 |
if info_out == "Y": |
180 |
return True |
181 |
elif info_out == "N": |
182 |
return False |
183 |
else:
|
184 |
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:" |
185 |
" %s" % (field, instance, info_out))
|
186 |
|
187 |
|
188 |
def _GetNumInstanceField(instance, field): |
189 |
"""Get a numeric value of a field of an instance.
|
190 |
|
191 |
@type instance: string
|
192 |
@param instance: Instance name
|
193 |
@type field: string
|
194 |
@param field: Name of the field
|
195 |
@rtype: int or float
|
196 |
|
197 |
"""
|
198 |
info_out = _GetInstanceField(instance, field) |
199 |
try:
|
200 |
ret = int(info_out)
|
201 |
except ValueError: |
202 |
try:
|
203 |
ret = float(info_out)
|
204 |
except ValueError: |
205 |
raise qa_error.Error("Field %s of instance %s has a non-numeric value:" |
206 |
" %s" % (field, instance, info_out))
|
207 |
return ret
|
208 |
|
209 |
|
210 |
def GetInstanceSpec(instance, spec): |
211 |
"""Return the current spec for the given parameter.
|
212 |
|
213 |
@type instance: string
|
214 |
@param instance: Instance name
|
215 |
@type spec: string
|
216 |
@param spec: one of the supported parameters: "memory-size", "cpu-count",
|
217 |
"disk-count", "disk-size", "nic-count"
|
218 |
@rtype: tuple
|
219 |
@return: (minspec, maxspec); minspec and maxspec can be different only for
|
220 |
memory and disk size
|
221 |
|
222 |
"""
|
223 |
specmap = { |
224 |
"memory-size": ["be/minmem", "be/maxmem"], |
225 |
"cpu-count": ["vcpus"], |
226 |
"disk-count": ["disk.count"], |
227 |
"disk-size": ["disk.size/ "], |
228 |
"nic-count": ["nic.count"], |
229 |
} |
230 |
# For disks, first we need the number of disks
|
231 |
if spec == "disk-size": |
232 |
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
|
233 |
fields = ["disk.size/%s" % k for k in range(0, numdisk)] |
234 |
else:
|
235 |
assert spec in specmap, "%s not in %s" % (spec, specmap) |
236 |
fields = specmap[spec] |
237 |
values = [_GetNumInstanceField(instance, f) for f in fields] |
238 |
return (min(values), max(values)) |
239 |
|
240 |
|
241 |
def IsFailoverSupported(instance): |
242 |
return instance.disk_template in constants.DTS_MIRRORED |
243 |
|
244 |
|
245 |
def IsMigrationSupported(instance): |
246 |
return instance.disk_template in constants.DTS_MIRRORED |
247 |
|
248 |
|
249 |
def IsDiskReplacingSupported(instance): |
250 |
return instance.disk_template == constants.DT_DRBD8
|
251 |
|
252 |
|
253 |
def IsDiskSupported(instance): |
254 |
return instance.disk_template != constants.DT_DISKLESS
|
255 |
|
256 |
|
257 |
def TestInstanceAddWithPlainDisk(nodes, fail=False): |
258 |
"""gnt-instance add -t plain"""
|
259 |
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates(): |
260 |
instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN, |
261 |
fail=fail) |
262 |
if not fail: |
263 |
qa_utils.RunInstanceCheck(instance, True)
|
264 |
return instance
|
265 |
|
266 |
|
267 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
268 |
def TestInstanceAddWithDrbdDisk(nodes): |
269 |
"""gnt-instance add -t drbd"""
|
270 |
if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates(): |
271 |
return CreateInstanceDrbd8(nodes)
|
272 |
|
273 |
|
274 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
275 |
def TestInstanceAddFile(nodes): |
276 |
"""gnt-instance add -t file"""
|
277 |
assert len(nodes) == 1 |
278 |
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates(): |
279 |
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
|
280 |
|
281 |
|
282 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
283 |
def TestInstanceAddSharedFile(nodes): |
284 |
"""gnt-instance add -t sharedfile"""
|
285 |
assert len(nodes) == 1 |
286 |
if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates(): |
287 |
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
|
288 |
|
289 |
|
290 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
291 |
def TestInstanceAddDiskless(nodes): |
292 |
"""gnt-instance add -t diskless"""
|
293 |
assert len(nodes) == 1 |
294 |
if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates(): |
295 |
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
|
296 |
|
297 |
|
298 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
299 |
def TestInstanceRemove(instance): |
300 |
"""gnt-instance remove"""
|
301 |
AssertCommand(["gnt-instance", "remove", "-f", instance.name]) |
302 |
|
303 |
|
304 |
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
|
305 |
def TestInstanceStartup(instance): |
306 |
"""gnt-instance startup"""
|
307 |
AssertCommand(["gnt-instance", "startup", instance.name]) |
308 |
|
309 |
|
310 |
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
|
311 |
def TestInstanceShutdown(instance): |
312 |
"""gnt-instance shutdown"""
|
313 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
314 |
|
315 |
|
316 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
317 |
def TestInstanceReboot(instance): |
318 |
"""gnt-instance reboot"""
|
319 |
options = qa_config.get("options", {})
|
320 |
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
|
321 |
name = instance.name |
322 |
for rtype in reboot_types: |
323 |
AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name]) |
324 |
|
325 |
AssertCommand(["gnt-instance", "shutdown", name]) |
326 |
qa_utils.RunInstanceCheck(instance, False)
|
327 |
AssertCommand(["gnt-instance", "reboot", name]) |
328 |
|
329 |
master = qa_config.GetMasterNode() |
330 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name] |
331 |
result_output = qa_utils.GetCommandOutput(master.primary, |
332 |
utils.ShellQuoteArgs(cmd)) |
333 |
AssertEqual(result_output.strip(), constants.INSTST_RUNNING) |
334 |
|
335 |
|
336 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
337 |
def TestInstanceReinstall(instance): |
338 |
"""gnt-instance reinstall"""
|
339 |
if instance.disk_template == constants.DT_DISKLESS:
|
340 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
341 |
return
|
342 |
|
343 |
qa_storage = qa_config.get("qa-storage")
|
344 |
|
345 |
if qa_storage is None: |
346 |
print qa_utils.FormatInfo("Test not supported because the additional QA" |
347 |
" storage is not available")
|
348 |
else:
|
349 |
# Reinstall with OS image from QA storage
|
350 |
url = "%s/busybox.img" % qa_storage
|
351 |
AssertCommand(["gnt-instance", "reinstall", |
352 |
"--os-parameters", "os-image=" + url, |
353 |
"-f", instance.name])
|
354 |
|
355 |
# Reinstall with OS image as local file on the node
|
356 |
pnode = _GetInstanceField(instance.name, "pnode")
|
357 |
|
358 |
cmd = ("wget -O busybox.img %s &> /dev/null &&"
|
359 |
" echo $(pwd)/busybox.img") % url
|
360 |
image = qa_utils.GetCommandOutput(pnode, cmd).strip() |
361 |
|
362 |
AssertCommand(["gnt-instance", "reinstall", |
363 |
"--os-parameters", "os-image=" + image, |
364 |
"-f", instance.name])
|
365 |
|
366 |
# Reinstall non existing local file
|
367 |
AssertCommand(["gnt-instance", "reinstall", |
368 |
"--os-parameters", "os-image=NonExistantOsForQa", |
369 |
"-f", instance.name], fail=True) |
370 |
|
371 |
# Reinstall non existing URL
|
372 |
AssertCommand(["gnt-instance", "reinstall", |
373 |
"--os-parameters", "os-image=http://NonExistantOsForQa", |
374 |
"-f", instance.name], fail=True) |
375 |
|
376 |
# Reinstall using OS scripts
|
377 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
378 |
|
379 |
# Test with non-existant OS definition
|
380 |
AssertCommand(["gnt-instance", "reinstall", "-f", |
381 |
"--os-type=NonExistantOsForQa",
|
382 |
instance.name], |
383 |
fail=True)
|
384 |
|
385 |
|
386 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
387 |
def TestInstanceRenameAndBack(rename_source, rename_target): |
388 |
"""gnt-instance rename
|
389 |
|
390 |
This must leave the instance with the original name, not the target
|
391 |
name.
|
392 |
|
393 |
"""
|
394 |
CheckSsconfInstanceList(rename_source) |
395 |
|
396 |
# first do a rename to a different actual name, expecting it to fail
|
397 |
qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
|
398 |
try:
|
399 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target], |
400 |
fail=True)
|
401 |
CheckSsconfInstanceList(rename_source) |
402 |
finally:
|
403 |
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
|
404 |
|
405 |
info = GetInstanceInfo(rename_source) |
406 |
|
407 |
# Check instance volume tags correctly updated. Note that this check is lvm
|
408 |
# specific, so we skip it for non-lvm-based instances.
|
409 |
# FIXME: This will need updating when instances will be able to have
|
410 |
# different disks living on storage pools with etherogeneous storage types.
|
411 |
# FIXME: This check should be put inside the disk/storage class themselves,
|
412 |
# rather than explicitly called here.
|
413 |
if info["storage-type"] == constants.ST_LVM_VG: |
414 |
# In the lvm world we can check for tags on the logical volume
|
415 |
tags_cmd = ("lvs -o tags --noheadings %s | grep " %
|
416 |
(" ".join(info["volumes"]), )) |
417 |
else:
|
418 |
# Other storage types don't have tags, so we use an always failing command,
|
419 |
# to make sure it never gets executed
|
420 |
tags_cmd = "false"
|
421 |
|
422 |
# and now rename instance to rename_target...
|
423 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target]) |
424 |
CheckSsconfInstanceList(rename_target) |
425 |
qa_utils.RunInstanceCheck(rename_source, False)
|
426 |
qa_utils.RunInstanceCheck(rename_target, False)
|
427 |
|
428 |
# NOTE: tags might not be the exactly as the instance name, due to
|
429 |
# charset restrictions; hence the test might be flaky
|
430 |
if (rename_source != rename_target and |
431 |
info["storage-type"] == constants.ST_LVM_VG):
|
432 |
for node in info["nodes"]: |
433 |
AssertCommand(tags_cmd + rename_source, node=node, fail=True)
|
434 |
AssertCommand(tags_cmd + rename_target, node=node, fail=False)
|
435 |
|
436 |
# and back
|
437 |
AssertCommand(["gnt-instance", "rename", rename_target, rename_source]) |
438 |
CheckSsconfInstanceList(rename_source) |
439 |
qa_utils.RunInstanceCheck(rename_target, False)
|
440 |
|
441 |
if (rename_source != rename_target and |
442 |
info["storage-type"] == constants.ST_LVM_VG):
|
443 |
for node in info["nodes"]: |
444 |
AssertCommand(tags_cmd + rename_source, node=node, fail=False)
|
445 |
AssertCommand(tags_cmd + rename_target, node=node, fail=True)
|
446 |
|
447 |
|
448 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
449 |
def TestInstanceFailover(instance): |
450 |
"""gnt-instance failover"""
|
451 |
if not IsFailoverSupported(instance): |
452 |
print qa_utils.FormatInfo("Instance doesn't support failover, skipping" |
453 |
" test")
|
454 |
return
|
455 |
|
456 |
cmd = ["gnt-instance", "failover", "--force", instance.name] |
457 |
|
458 |
# failover ...
|
459 |
AssertCommand(cmd) |
460 |
qa_utils.RunInstanceCheck(instance, True)
|
461 |
|
462 |
# ... and back
|
463 |
AssertCommand(cmd) |
464 |
|
465 |
|
466 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
467 |
def TestInstanceMigrate(instance, toggle_always_failover=True): |
468 |
"""gnt-instance migrate"""
|
469 |
if not IsMigrationSupported(instance): |
470 |
print qa_utils.FormatInfo("Instance doesn't support migration, skipping" |
471 |
" test")
|
472 |
return
|
473 |
|
474 |
cmd = ["gnt-instance", "migrate", "--force", instance.name] |
475 |
af_par = constants.BE_ALWAYS_FAILOVER |
476 |
af_field = "be/" + constants.BE_ALWAYS_FAILOVER
|
477 |
af_init_val = _GetBoolInstanceField(instance.name, af_field) |
478 |
|
479 |
# migrate ...
|
480 |
AssertCommand(cmd) |
481 |
# TODO: Verify the choice between failover and migration
|
482 |
qa_utils.RunInstanceCheck(instance, True)
|
483 |
|
484 |
# ... and back (possibly with always_failover toggled)
|
485 |
if toggle_always_failover:
|
486 |
AssertCommand(["gnt-instance", "modify", "-B", |
487 |
("%s=%s" % (af_par, not af_init_val)), |
488 |
instance.name]) |
489 |
AssertCommand(cmd) |
490 |
# TODO: Verify the choice between failover and migration
|
491 |
qa_utils.RunInstanceCheck(instance, True)
|
492 |
if toggle_always_failover:
|
493 |
AssertCommand(["gnt-instance", "modify", "-B", |
494 |
("%s=%s" % (af_par, af_init_val)), instance.name])
|
495 |
|
496 |
# TODO: Split into multiple tests
|
497 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
498 |
qa_utils.RunInstanceCheck(instance, False)
|
499 |
AssertCommand(cmd, fail=True)
|
500 |
AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover", |
501 |
instance.name]) |
502 |
AssertCommand(["gnt-instance", "start", instance.name]) |
503 |
AssertCommand(cmd) |
504 |
# @InstanceCheck enforces the check that the instance is running
|
505 |
qa_utils.RunInstanceCheck(instance, True)
|
506 |
|
507 |
AssertCommand(["gnt-instance", "modify", "-B", |
508 |
("%s=%s" %
|
509 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)), |
510 |
instance.name]) |
511 |
|
512 |
AssertCommand(cmd) |
513 |
qa_utils.RunInstanceCheck(instance, True)
|
514 |
# TODO: Verify that a failover has been done instead of a migration
|
515 |
|
516 |
# TODO: Verify whether the default value is restored here (not hardcoded)
|
517 |
AssertCommand(["gnt-instance", "modify", "-B", |
518 |
("%s=%s" %
|
519 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)), |
520 |
instance.name]) |
521 |
|
522 |
AssertCommand(cmd) |
523 |
qa_utils.RunInstanceCheck(instance, True)
|
524 |
|
525 |
|
526 |
def TestInstanceInfo(instance): |
527 |
"""gnt-instance info"""
|
528 |
AssertCommand(["gnt-instance", "info", instance.name]) |
529 |
|
530 |
|
531 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
532 |
def TestInstanceModify(instance): |
533 |
"""gnt-instance modify"""
|
534 |
default_hv = qa_config.GetDefaultHypervisor() |
535 |
|
536 |
# Assume /sbin/init exists on all systems
|
537 |
test_kernel = "/sbin/init"
|
538 |
test_initrd = test_kernel |
539 |
|
540 |
orig_maxmem = qa_config.get(constants.BE_MAXMEM) |
541 |
orig_minmem = qa_config.get(constants.BE_MINMEM) |
542 |
#orig_bridge = qa_config.get("bridge", "xen-br0")
|
543 |
|
544 |
args = [ |
545 |
["-B", "%s=128" % constants.BE_MINMEM], |
546 |
["-B", "%s=128" % constants.BE_MAXMEM], |
547 |
["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem, |
548 |
constants.BE_MAXMEM, orig_maxmem)], |
549 |
["-B", "%s=2" % constants.BE_VCPUS], |
550 |
["-B", "%s=1" % constants.BE_VCPUS], |
551 |
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], |
552 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)], |
553 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)], |
554 |
|
555 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], |
556 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], |
557 |
|
558 |
# TODO: bridge tests
|
559 |
#["--bridge", "xen-br1"],
|
560 |
#["--bridge", orig_bridge],
|
561 |
] |
562 |
|
563 |
if default_hv == constants.HT_XEN_PVM:
|
564 |
args.extend([ |
565 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], |
566 |
["-H", "no_%s" % (constants.HV_INITRD_PATH, )], |
567 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)], |
568 |
]) |
569 |
elif default_hv == constants.HT_XEN_HVM:
|
570 |
args.extend([ |
571 |
["-H", "%s=acn" % constants.HV_BOOT_ORDER], |
572 |
["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)], |
573 |
]) |
574 |
elif default_hv == constants.HT_KVM and \ |
575 |
qa_config.TestEnabled("instance-device-hotplug"):
|
576 |
args.extend([ |
577 |
["--net", "-1:add", "--hotplug"], |
578 |
["--net", "-1:modify,mac=aa:bb:cc:dd:ee:ff", "--hotplug", "--force"], |
579 |
["--net", "-1:remove", "--hotplug"], |
580 |
["--disk", "-1:add,size=1G", "--hotplug"], |
581 |
["--disk", "-1:remove", "--hotplug"], |
582 |
]) |
583 |
|
584 |
url = "http://example.com/busybox.img"
|
585 |
args.extend([ |
586 |
["--os-parameters", "os-image=" + url], |
587 |
["--os-parameters", "os-image=default"] |
588 |
]) |
589 |
|
590 |
for alist in args: |
591 |
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name]) |
592 |
|
593 |
# check no-modify
|
594 |
AssertCommand(["gnt-instance", "modify", instance.name], fail=True) |
595 |
|
596 |
# Marking offline while instance is running must fail...
|
597 |
AssertCommand(["gnt-instance", "modify", "--offline", instance.name], |
598 |
fail=True)
|
599 |
|
600 |
# ...while making it online is ok, and should work
|
601 |
AssertCommand(["gnt-instance", "modify", "--online", instance.name]) |
602 |
|
603 |
|
604 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
605 |
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode): |
606 |
"""gnt-instance modify --new-primary
|
607 |
|
608 |
This will leave the instance on its original primary node, not other node.
|
609 |
|
610 |
"""
|
611 |
if instance.disk_template != constants.DT_FILE:
|
612 |
print qa_utils.FormatInfo("Test only supported for the file disk template") |
613 |
return
|
614 |
|
615 |
cluster_name = qa_config.get("name")
|
616 |
|
617 |
name = instance.name |
618 |
current = currentnode.primary |
619 |
other = othernode.primary |
620 |
|
621 |
filestorage = qa_config.get("file-storage-dir",
|
622 |
pathutils.DEFAULT_FILE_STORAGE_DIR) |
623 |
disk = os.path.join(filestorage, name) |
624 |
|
625 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name], |
626 |
fail=True)
|
627 |
AssertCommand(["gnt-instance", "shutdown", name]) |
628 |
AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" % |
629 |
pathutils.SSH_KNOWN_HOSTS_FILE, |
630 |
"-oCheckHostIp=no", "-oStrictHostKeyChecking=yes", |
631 |
"-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name, |
632 |
"-r", disk, "%s:%s" % (other, filestorage)], node=current) |
633 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name]) |
634 |
AssertCommand(["gnt-instance", "startup", name]) |
635 |
|
636 |
# and back
|
637 |
AssertCommand(["gnt-instance", "shutdown", name]) |
638 |
AssertCommand(["rm", "-rf", disk], node=other) |
639 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name]) |
640 |
AssertCommand(["gnt-instance", "startup", name]) |
641 |
|
642 |
|
643 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
644 |
def TestInstanceStoppedModify(instance): |
645 |
"""gnt-instance modify (stopped instance)"""
|
646 |
name = instance.name |
647 |
|
648 |
# Instance was not marked offline; try marking it online once more
|
649 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
650 |
|
651 |
# Mark instance as offline
|
652 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
653 |
|
654 |
# When the instance is offline shutdown should only work with --force,
|
655 |
# while start should never work
|
656 |
AssertCommand(["gnt-instance", "shutdown", name], fail=True) |
657 |
AssertCommand(["gnt-instance", "shutdown", "--force", name]) |
658 |
AssertCommand(["gnt-instance", "start", name], fail=True) |
659 |
AssertCommand(["gnt-instance", "start", "--force", name], fail=True) |
660 |
|
661 |
# Also do offline to offline
|
662 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
663 |
|
664 |
# And online again
|
665 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
666 |
|
667 |
|
668 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
669 |
def TestInstanceConvertDiskToPlain(instance, inodes): |
670 |
"""gnt-instance modify -t"""
|
671 |
name = instance.name |
672 |
|
673 |
template = instance.disk_template |
674 |
if template != constants.DT_DRBD8:
|
675 |
print qa_utils.FormatInfo("Unsupported template %s, skipping conversion" |
676 |
" test" % template)
|
677 |
return
|
678 |
|
679 |
assert len(inodes) == 2 |
680 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name]) |
681 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8, |
682 |
"-n", inodes[1].primary, name]) |
683 |
|
684 |
|
685 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
686 |
def TestInstanceModifyDisks(instance): |
687 |
"""gnt-instance modify --disk"""
|
688 |
if not IsDiskSupported(instance): |
689 |
print qa_utils.FormatInfo("Instance doesn't support disks, skipping test") |
690 |
return
|
691 |
|
692 |
disk_conf = qa_config.GetDiskOptions()[-1]
|
693 |
size = disk_conf.get("size")
|
694 |
name = instance.name |
695 |
build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name] |
696 |
if qa_config.AreSpindlesSupported():
|
697 |
spindles = disk_conf.get("spindles")
|
698 |
spindles_supported = True
|
699 |
else:
|
700 |
# Any number is good for spindles in this case
|
701 |
spindles = 1
|
702 |
spindles_supported = False
|
703 |
AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
|
704 |
fail=not spindles_supported)
|
705 |
AssertCommand(build_cmd("add:size=%s" % size),
|
706 |
fail=spindles_supported) |
707 |
# Exactly one of the above commands has succeded, so we need one remove
|
708 |
AssertCommand(build_cmd("remove"))
|
709 |
|
710 |
|
711 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
712 |
def TestInstanceGrowDisk(instance): |
713 |
"""gnt-instance grow-disk"""
|
714 |
if instance.disk_template == constants.DT_DISKLESS:
|
715 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
716 |
return
|
717 |
|
718 |
name = instance.name |
719 |
disks = qa_config.GetDiskOptions() |
720 |
all_size = [d.get("size") for d in disks] |
721 |
all_grow = [d.get("growth") for d in disks] |
722 |
|
723 |
if not all_grow: |
724 |
# missing disk sizes but instance grow disk has been enabled,
|
725 |
# let's set fixed/nomimal growth
|
726 |
all_grow = ["128M" for _ in all_size] |
727 |
|
728 |
for idx, (size, grow) in enumerate(zip(all_size, all_grow)): |
729 |
# succeed in grow by amount
|
730 |
AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow]) |
731 |
# fail in grow to the old size
|
732 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
733 |
size], fail=True)
|
734 |
# succeed to grow to old size + 2 * growth
|
735 |
int_size = utils.ParseUnit(size) |
736 |
int_grow = utils.ParseUnit(grow) |
737 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
738 |
str(int_size + 2 * int_grow)]) |
739 |
|
740 |
|
741 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
742 |
def TestInstanceDeviceNames(instance): |
743 |
if instance.disk_template == constants.DT_DISKLESS:
|
744 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
745 |
return
|
746 |
|
747 |
name = instance.name |
748 |
for dev_type in ["disk", "net"]: |
749 |
if dev_type == "disk": |
750 |
options = ",size=512M"
|
751 |
if qa_config.AreSpindlesSupported():
|
752 |
options += ",spindles=1"
|
753 |
else:
|
754 |
options = ""
|
755 |
# succeed in adding a device named 'test_device'
|
756 |
AssertCommand(["gnt-instance", "modify", |
757 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
758 |
name]) |
759 |
# succeed in removing the 'test_device'
|
760 |
AssertCommand(["gnt-instance", "modify", |
761 |
"--%s=test_device:remove" % dev_type,
|
762 |
name]) |
763 |
# fail to add two devices with the same name
|
764 |
AssertCommand(["gnt-instance", "modify", |
765 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
766 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
767 |
name], fail=True)
|
768 |
# fail to add a device with invalid name
|
769 |
AssertCommand(["gnt-instance", "modify", |
770 |
"--%s=-1:add,name=2%s" % (dev_type, options),
|
771 |
name], fail=True)
|
772 |
# Rename disks
|
773 |
disks = qa_config.GetDiskOptions() |
774 |
disk_names = [d.get("name") for d in disks] |
775 |
for idx, disk_name in enumerate(disk_names): |
776 |
# Refer to disk by idx
|
777 |
AssertCommand(["gnt-instance", "modify", |
778 |
"--disk=%s:modify,name=renamed" % idx,
|
779 |
name]) |
780 |
# Refer to by name and rename to original name
|
781 |
AssertCommand(["gnt-instance", "modify", |
782 |
"--disk=renamed:modify,name=%s" % disk_name,
|
783 |
name]) |
784 |
if len(disks) >= 2: |
785 |
# fail in renaming to disks to the same name
|
786 |
AssertCommand(["gnt-instance", "modify", |
787 |
"--disk=0:modify,name=same_name",
|
788 |
"--disk=1:modify,name=same_name",
|
789 |
name], fail=True)
|
790 |
|
791 |
|
792 |
def TestInstanceList(): |
793 |
"""gnt-instance list"""
|
794 |
qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
795 |
|
796 |
|
797 |
def TestInstanceListFields(): |
798 |
"""gnt-instance list-fields"""
|
799 |
qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
800 |
|
801 |
|
802 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
803 |
def TestInstanceConsole(instance): |
804 |
"""gnt-instance console"""
|
805 |
AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name]) |
806 |
|
807 |
|
808 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
809 |
def TestReplaceDisks(instance, curr_nodes, other_nodes): |
810 |
"""gnt-instance replace-disks"""
|
811 |
def buildcmd(args): |
812 |
cmd = ["gnt-instance", "replace-disks"] |
813 |
cmd.extend(args) |
814 |
cmd.append(instance.name) |
815 |
return cmd
|
816 |
|
817 |
if not IsDiskReplacingSupported(instance): |
818 |
print qa_utils.FormatInfo("Instance doesn't support disk replacing," |
819 |
" skipping test")
|
820 |
return
|
821 |
|
822 |
# Currently all supported templates have one primary and one secondary node
|
823 |
assert len(curr_nodes) == 2 |
824 |
snode = curr_nodes[1]
|
825 |
assert len(other_nodes) == 1 |
826 |
othernode = other_nodes[0]
|
827 |
|
828 |
options = qa_config.get("options", {})
|
829 |
use_ialloc = options.get("use-iallocators", True) |
830 |
for data in [ |
831 |
["-p"],
|
832 |
["-s"],
|
833 |
# A placeholder; the actual command choice depends on use_ialloc
|
834 |
None,
|
835 |
# Restore the original secondary
|
836 |
["--new-secondary=%s" % snode.primary],
|
837 |
]: |
838 |
if data is None: |
839 |
if use_ialloc:
|
840 |
data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
|
841 |
else:
|
842 |
data = ["--new-secondary=%s" % othernode.primary]
|
843 |
AssertCommand(buildcmd(data)) |
844 |
|
845 |
AssertCommand(buildcmd(["-a"]))
|
846 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
847 |
AssertCommand(buildcmd(["-a"]), fail=True) |
848 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
849 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
850 |
instance.name]) |
851 |
AssertCommand(buildcmd(["-a"]))
|
852 |
AssertCommand(["gnt-instance", "start", instance.name]) |
853 |
|
854 |
|
855 |
def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True, |
856 |
destroy=True):
|
857 |
"""Execute gnt-instance recreate-disks and check the result
|
858 |
|
859 |
@param cmdargs: Arguments (instance name excluded)
|
860 |
@param instance: Instance to operate on
|
861 |
@param fail: True if the command is expected to fail
|
862 |
@param check: If True and fail is False, check that the disks work
|
863 |
@prama destroy: If True, destroy the old disks first
|
864 |
|
865 |
"""
|
866 |
if destroy:
|
867 |
_DestroyInstanceDisks(instance) |
868 |
AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs + |
869 |
[instance.name]), fail) |
870 |
if not fail and check: |
871 |
# Quick check that the disks are there
|
872 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
873 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
874 |
instance.name]) |
875 |
AssertCommand(["gnt-instance", "deactivate-disks", instance.name]) |
876 |
|
877 |
|
878 |
def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth, |
879 |
spindles_supported): |
880 |
if with_spindles:
|
881 |
if spindles_supported:
|
882 |
if with_growth:
|
883 |
build_spindles_opt = (lambda disk:
|
884 |
",spindles=%s" %
|
885 |
(disk["spindles"] + disk["spindles-growth"])) |
886 |
else:
|
887 |
build_spindles_opt = (lambda disk:
|
888 |
",spindles=%s" % disk["spindles"]) |
889 |
else:
|
890 |
build_spindles_opt = (lambda _: ",spindles=1") |
891 |
else:
|
892 |
build_spindles_opt = (lambda _: "") |
893 |
if with_growth:
|
894 |
build_size_opt = (lambda disk:
|
895 |
"size=%s" % (utils.ParseUnit(disk["size"]) + |
896 |
utils.ParseUnit(disk["growth"])))
|
897 |
else:
|
898 |
build_size_opt = (lambda disk: "size=%s" % disk["size"]) |
899 |
build_disk_opt = (lambda (idx, disk):
|
900 |
"--disk=%s:%s%s" % (idx, build_size_opt(disk),
|
901 |
build_spindles_opt(disk))) |
902 |
return map(build_disk_opt, en_disks) |
903 |
|
904 |
|
905 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
906 |
def TestRecreateDisks(instance, inodes, othernodes): |
907 |
"""gnt-instance recreate-disks
|
908 |
|
909 |
@param instance: Instance to work on
|
910 |
@param inodes: List of the current nodes of the instance
|
911 |
@param othernodes: list/tuple of nodes where to temporarily recreate disks
|
912 |
|
913 |
"""
|
914 |
options = qa_config.get("options", {})
|
915 |
use_ialloc = options.get("use-iallocators", True) |
916 |
other_seq = ":".join([n.primary for n in othernodes]) |
917 |
orig_seq = ":".join([n.primary for n in inodes]) |
918 |
# These fail because the instance is running
|
919 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
920 |
if use_ialloc:
|
921 |
_AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False) |
922 |
else:
|
923 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
924 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
925 |
# Disks exist: this should fail
|
926 |
_AssertRecreateDisks([], instance, fail=True, destroy=False) |
927 |
# Unsupported spindles parameters: fail
|
928 |
if not qa_config.AreSpindlesSupported(): |
929 |
_AssertRecreateDisks(["--disk=0:spindles=2"], instance,
|
930 |
fail=True, destroy=False) |
931 |
# Recreate disks in place
|
932 |
_AssertRecreateDisks([], instance) |
933 |
# Move disks away
|
934 |
if use_ialloc:
|
935 |
_AssertRecreateDisks(["-I", "hail"], instance) |
936 |
# Move disks somewhere else
|
937 |
_AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
|
938 |
instance) |
939 |
else:
|
940 |
_AssertRecreateDisks(["-n", other_seq], instance)
|
941 |
# Move disks back
|
942 |
_AssertRecreateDisks(["-n", orig_seq], instance)
|
943 |
# Recreate resized disks
|
944 |
# One of the two commands fails because either spindles are given when they
|
945 |
# should not or vice versa
|
946 |
alldisks = qa_config.GetDiskOptions() |
947 |
spindles_supported = qa_config.AreSpindlesSupported() |
948 |
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True, |
949 |
spindles_supported) |
950 |
_AssertRecreateDisks(disk_opts, instance, destroy=True,
|
951 |
fail=not spindles_supported)
|
952 |
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True, |
953 |
spindles_supported) |
954 |
_AssertRecreateDisks(disk_opts, instance, destroy=False,
|
955 |
fail=spindles_supported) |
956 |
# Recreate the disks one by one (with the original size)
|
957 |
for (idx, disk) in enumerate(alldisks): |
958 |
# Only the first call should destroy all the disk
|
959 |
destroy = (idx == 0)
|
960 |
# Again, one of the two commands is expected to fail
|
961 |
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False, |
962 |
spindles_supported) |
963 |
_AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
|
964 |
fail=not spindles_supported)
|
965 |
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False, |
966 |
spindles_supported) |
967 |
_AssertRecreateDisks(disk_opts, instance, destroy=False, check=False, |
968 |
fail=spindles_supported) |
969 |
# This and InstanceCheck decoration check that the disks are working
|
970 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
971 |
AssertCommand(["gnt-instance", "start", instance.name]) |
972 |
|
973 |
|
974 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
975 |
def TestInstanceExport(instance, node): |
976 |
"""gnt-backup export -n ..."""
|
977 |
name = instance.name |
978 |
# Export does not work for file-based templates, thus we skip the test
|
979 |
if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
980 |
return
|
981 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, name]) |
982 |
return qa_utils.ResolveInstanceName(name)
|
983 |
|
984 |
|
985 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
986 |
def TestInstanceExportWithRemove(instance, node): |
987 |
"""gnt-backup export --remove-instance"""
|
988 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, |
989 |
"--remove-instance", instance.name])
|
990 |
|
991 |
|
992 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
993 |
def TestInstanceExportNoTarget(instance): |
994 |
"""gnt-backup export (without target node, should fail)"""
|
995 |
AssertCommand(["gnt-backup", "export", instance.name], fail=True) |
996 |
|
997 |
|
998 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
999 |
def TestInstanceImport(newinst, node, expnode, name): |
1000 |
"""gnt-backup import"""
|
1001 |
templ = constants.DT_PLAIN |
1002 |
if not qa_config.IsTemplateSupported(templ): |
1003 |
return
|
1004 |
cmd = (["gnt-backup", "import", |
1005 |
"--disk-template=%s" % templ,
|
1006 |
"--no-ip-check",
|
1007 |
"--src-node=%s" % expnode.primary,
|
1008 |
"--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
|
1009 |
"--node=%s" % node.primary] +
|
1010 |
GetGenericAddParameters(newinst, templ, |
1011 |
force_mac=constants.VALUE_GENERATE)) |
1012 |
cmd.append(newinst.name) |
1013 |
AssertCommand(cmd) |
1014 |
newinst.SetDiskTemplate(templ) |
1015 |
|
1016 |
|
1017 |
def TestBackupList(expnode): |
1018 |
"""gnt-backup list"""
|
1019 |
AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary]) |
1020 |
|
1021 |
qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
|
1022 |
namefield=None, test_unknown=False) |
1023 |
|
1024 |
|
1025 |
def TestBackupListFields(): |
1026 |
"""gnt-backup list-fields"""
|
1027 |
qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
|
1028 |
|
1029 |
|
1030 |
def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online): |
1031 |
"""gnt-instance remove with an off-line node
|
1032 |
|
1033 |
@param instance: instance
|
1034 |
@param snode: secondary node, to be set offline
|
1035 |
@param set_offline: function to call to set the node off-line
|
1036 |
@param set_online: function to call to set the node on-line
|
1037 |
|
1038 |
"""
|
1039 |
info = GetInstanceInfo(instance.name) |
1040 |
set_offline(snode) |
1041 |
try:
|
1042 |
TestInstanceRemove(instance) |
1043 |
finally:
|
1044 |
set_online(snode) |
1045 |
|
1046 |
# Clean up the disks on the offline node, if necessary
|
1047 |
if instance.disk_template not in constants.DTS_EXT_MIRROR: |
1048 |
# FIXME: abstract the cleanup inside the disks
|
1049 |
if info["storage-type"] == constants.ST_LVM_VG: |
1050 |
for minor in info["drbd-minors"][snode.primary]: |
1051 |
# DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
|
1052 |
# relies on the fact that we always create a resources for each minor,
|
1053 |
# and that this resources is always named resource{minor}.
|
1054 |
# As 'drbdsetup 0 down' does return success (even though that's invalid
|
1055 |
# syntax), we always have to perform both commands and ignore the
|
1056 |
# output.
|
1057 |
drbd_shutdown_cmd = \ |
1058 |
"(drbdsetup %d down >/dev/null 2>&1;" \
|
1059 |
" drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
|
1060 |
(minor, minor) |
1061 |
AssertCommand(drbd_shutdown_cmd, node=snode) |
1062 |
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode) |
1063 |
elif info["storage-type"] == constants.ST_FILE: |
1064 |
filestorage = qa_config.get("file-storage-dir",
|
1065 |
pathutils.DEFAULT_FILE_STORAGE_DIR) |
1066 |
disk = os.path.join(filestorage, instance.name) |
1067 |
AssertCommand(["rm", "-rf", disk], node=snode) |
1068 |
|
1069 |
|
1070 |
def TestInstanceCreationRestrictedByDiskTemplates(): |
1071 |
"""Test adding instances for disabled disk templates."""
|
1072 |
if qa_config.TestEnabled("cluster-exclusive-storage"): |
1073 |
# These tests are valid only for non-exclusive storage
|
1074 |
return
|
1075 |
|
1076 |
enabled_disk_templates = qa_config.GetEnabledDiskTemplates() |
1077 |
nodes = qa_config.AcquireManyNodes(2)
|
1078 |
|
1079 |
# Setup the cluster with the enabled_disk_templates
|
1080 |
AssertCommand( |
1081 |
["gnt-cluster", "modify", |
1082 |
"--enabled-disk-templates=%s" % ",".join(enabled_disk_templates), |
1083 |
"--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)], |
1084 |
fail=False)
|
1085 |
|
1086 |
# Test instance creation for enabled disk templates
|
1087 |
for disk_template in enabled_disk_templates: |
1088 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
|
1089 |
TestInstanceRemove(instance) |
1090 |
instance.Release() |
1091 |
|
1092 |
# Test that instance creation fails for disabled disk templates
|
1093 |
disabled_disk_templates = list(constants.DISK_TEMPLATES
|
1094 |
- set(enabled_disk_templates))
|
1095 |
for disk_template in disabled_disk_templates: |
1096 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
|
1097 |
|
1098 |
# Test instance creation for after disabling enabled disk templates
|
1099 |
if (len(enabled_disk_templates) > 1): |
1100 |
# Partition the disk templates, enable them separately and check if the
|
1101 |
# disabled ones cannot be used by instances.
|
1102 |
middle = len(enabled_disk_templates) / 2 |
1103 |
templates1 = enabled_disk_templates[:middle] |
1104 |
templates2 = enabled_disk_templates[middle:] |
1105 |
|
1106 |
for (enabled, disabled) in [(templates1, templates2), |
1107 |
(templates2, templates1)]: |
1108 |
AssertCommand(["gnt-cluster", "modify", |
1109 |
"--enabled-disk-templates=%s" % ",".join(enabled), |
1110 |
"--ipolicy-disk-templates=%s" % ",".join(enabled)], |
1111 |
fail=False)
|
1112 |
for disk_template in disabled: |
1113 |
CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
|
1114 |
elif (len(enabled_disk_templates) == 1): |
1115 |
# If only one disk template is enabled in the QA config, we have to enable
|
1116 |
# some other templates in order to test if the disabling the only enabled
|
1117 |
# disk template prohibits creating instances of that template.
|
1118 |
other_disk_templates = list(
|
1119 |
set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
|
1120 |
set(enabled_disk_templates))
|
1121 |
AssertCommand(["gnt-cluster", "modify", |
1122 |
"--enabled-disk-templates=%s" %
|
1123 |
",".join(other_disk_templates),
|
1124 |
"--ipolicy-disk-templates=%s" %
|
1125 |
",".join(other_disk_templates)],
|
1126 |
fail=False)
|
1127 |
CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True) |
1128 |
else:
|
1129 |
raise qa_error.Error("Please enable at least one disk template" |
1130 |
" in your QA setup.")
|
1131 |
|
1132 |
# Restore initially enabled disk templates
|
1133 |
AssertCommand(["gnt-cluster", "modify", |
1134 |
"--enabled-disk-templates=%s" %
|
1135 |
",".join(enabled_disk_templates),
|
1136 |
"--ipolicy-disk-templates=%s" %
|
1137 |
",".join(enabled_disk_templates)],
|
1138 |
fail=False)
|
1139 |
|
1140 |
|
1141 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1142 |
def _TestInstanceUserDown(instance, master, hv_shutdown_fn): |
1143 |
# Shutdown instance and bring instance status to 'USER_down'
|
1144 |
hv_shutdown_fn() |
1145 |
|
1146 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1147 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1148 |
utils.ShellQuoteArgs(cmd)) |
1149 |
AssertEqual(result_output.strip(), constants.INSTST_USERDOWN) |
1150 |
|
1151 |
# Fail to bring instance status to 'running'
|
1152 |
AssertCommand(["gnt-instance", "start", instance.name], fail=True) |
1153 |
|
1154 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1155 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1156 |
utils.ShellQuoteArgs(cmd)) |
1157 |
AssertEqual(result_output.strip(), constants.INSTST_USERDOWN) |
1158 |
|
1159 |
# Bring instance status to 'ADMIN_down'
|
1160 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
1161 |
|
1162 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1163 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1164 |
utils.ShellQuoteArgs(cmd)) |
1165 |
AssertEqual(result_output.strip(), constants.INSTST_ADMINDOWN) |
1166 |
|
1167 |
# Bring instance status to 'running'
|
1168 |
AssertCommand(["gnt-instance", "start", instance.name]) |
1169 |
|
1170 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1171 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1172 |
utils.ShellQuoteArgs(cmd)) |
1173 |
AssertEqual(result_output.strip(), constants.INSTST_RUNNING) |
1174 |
|
1175 |
# Bring instance status to 'ADMIN_down' forcibly
|
1176 |
AssertCommand(["gnt-instance", "shutdown", "-f", instance.name]) |
1177 |
|
1178 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1179 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1180 |
utils.ShellQuoteArgs(cmd)) |
1181 |
AssertEqual(result_output.strip(), constants.INSTST_ADMINDOWN) |
1182 |
|
1183 |
# Bring instance status to 'running'
|
1184 |
AssertCommand(["gnt-instance", "start", instance.name]) |
1185 |
|
1186 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name] |
1187 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1188 |
utils.ShellQuoteArgs(cmd)) |
1189 |
AssertEqual(result_output.strip(), constants.INSTST_RUNNING) |
1190 |
|
1191 |
|
1192 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1193 |
def _TestInstanceUserDownXen(instance, master): |
1194 |
primary = _GetInstanceField(instance.name, "pnode")
|
1195 |
fn = lambda: AssertCommand(["xm", "shutdown", "-w", instance.name], |
1196 |
node=primary) |
1197 |
_TestInstanceUserDown(instance, master, fn) |
1198 |
|
1199 |
|
1200 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1201 |
def _TestInstanceUserDownKvm(instance, master): |
1202 |
def _StopKVMInstance(): |
1203 |
AssertCommand("pkill -f \"\\-name %s\"" % instance.name, node=primary)
|
1204 |
time.sleep(10)
|
1205 |
|
1206 |
AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true", |
1207 |
instance.name]) |
1208 |
|
1209 |
# The instance needs to reboot not because the 'user_shutdown'
|
1210 |
# parameter was modified but because the KVM daemon need to be
|
1211 |
# started, given that the instance was first created with user
|
1212 |
# shutdown disabled.
|
1213 |
AssertCommand(["gnt-instance", "reboot", instance.name]) |
1214 |
|
1215 |
primary = _GetInstanceField(instance.name, "pnode")
|
1216 |
_TestInstanceUserDown(instance, master, _StopKVMInstance) |
1217 |
|
1218 |
|
1219 |
def TestInstanceUserDown(instance, master): |
1220 |
"""Tests user shutdown"""
|
1221 |
enabled_hypervisors = qa_config.GetEnabledHypervisors() |
1222 |
|
1223 |
for (hv, fn) in [(constants.HT_XEN_PVM, _TestInstanceUserDownXen), |
1224 |
(constants.HT_XEN_HVM, _TestInstanceUserDownXen), |
1225 |
(constants.HT_KVM, _TestInstanceUserDownKvm)]: |
1226 |
if hv in enabled_hypervisors: |
1227 |
qa_daemon.TestPauseWatcher() |
1228 |
fn(instance, master) |
1229 |
qa_daemon.TestResumeWatcher() |
1230 |
else:
|
1231 |
print "%s hypervisor is not enabled, skipping test for this hypervisor" \ |
1232 |
% hv |
1233 |
|
1234 |
|
1235 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1236 |
def TestInstanceCommunication(instance, master): |
1237 |
"""Tests instance communication via 'gnt-instance modify'"""
|
1238 |
|
1239 |
# Enable instance communication network at the cluster level
|
1240 |
network_name = "mynetwork"
|
1241 |
|
1242 |
cmd = ["gnt-cluster", "modify", |
1243 |
"--instance-communication-network=%s" % network_name]
|
1244 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1245 |
utils.ShellQuoteArgs(cmd)) |
1246 |
print result_output
|
1247 |
|
1248 |
# Enable instance communication mechanism for this instance
|
1249 |
AssertCommand(["gnt-instance", "modify", "-c", "yes", instance.name]) |
1250 |
|
1251 |
# Reboot instance for changes to NIC to take effect
|
1252 |
AssertCommand(["gnt-instance", "reboot", instance.name]) |
1253 |
|
1254 |
# Check if the instance is properly configured for instance
|
1255 |
# communication.
|
1256 |
nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
|
1257 |
instance.name) |
1258 |
|
1259 |
## Check the output of 'gnt-instance list'
|
1260 |
nic_names = _GetInstanceField(instance.name, "nic.names")
|
1261 |
nic_names = map(lambda x: x.strip(" '"), nic_names.strip("[]").split(",")) |
1262 |
|
1263 |
AssertIn(nic_name, nic_names, |
1264 |
msg="Looking for instance communication TAP interface")
|
1265 |
|
1266 |
nic_n = nic_names.index(nic_name) |
1267 |
|
1268 |
nic_ip = _GetInstanceField(instance.name, "nic.ip/%d" % nic_n)
|
1269 |
nic_network = _GetInstanceField(instance.name, "nic.network.name/%d" % nic_n)
|
1270 |
nic_mode = _GetInstanceField(instance.name, "nic.mode/%d" % nic_n)
|
1271 |
|
1272 |
AssertEqual(IP4Address.InNetwork(constants.INSTANCE_COMMUNICATION_NETWORK4, |
1273 |
nic_ip), |
1274 |
True,
|
1275 |
msg="Checking if NIC's IP if part of the expected network")
|
1276 |
|
1277 |
AssertEqual(network_name, nic_network, |
1278 |
msg="Checking if NIC's network name matches the expected value")
|
1279 |
|
1280 |
AssertEqual(constants.INSTANCE_COMMUNICATION_NETWORK_MODE, nic_mode, |
1281 |
msg="Checking if NIC's mode name matches the expected value")
|
1282 |
|
1283 |
## Check the output of 'ip route'
|
1284 |
cmd = ["ip", "route", "show", nic_ip] |
1285 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1286 |
utils.ShellQuoteArgs(cmd)) |
1287 |
result = result_output.split() |
1288 |
|
1289 |
AssertEqual(len(result), 5, msg="Checking if the IP route is established") |
1290 |
|
1291 |
route_ip = result[0]
|
1292 |
route_dev = result[1]
|
1293 |
route_tap = result[2]
|
1294 |
route_scope = result[3]
|
1295 |
route_link = result[4]
|
1296 |
|
1297 |
AssertEqual(route_ip, nic_ip, |
1298 |
msg="Checking if IP route shows the expected IP")
|
1299 |
AssertEqual(route_dev, "dev",
|
1300 |
msg="Checking if IP route shows the expected device")
|
1301 |
AssertEqual(route_scope, "scope",
|
1302 |
msg="Checking if IP route shows the expected scope")
|
1303 |
AssertEqual(route_link, "link",
|
1304 |
msg="Checking if IP route shows the expected link-level scope")
|
1305 |
|
1306 |
## Check the output of 'ip address'
|
1307 |
cmd = ["ip", "address", "show", "dev", route_tap] |
1308 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1309 |
utils.ShellQuoteArgs(cmd)) |
1310 |
result = result_output.splitlines() |
1311 |
|
1312 |
AssertEqual(len(result), 3, |
1313 |
msg="Checking if the IP address is established")
|
1314 |
|
1315 |
result = result.pop().split() |
1316 |
|
1317 |
AssertEqual(len(result), 7, |
1318 |
msg="Checking if the IP address has the expected value")
|
1319 |
|
1320 |
address_ip = result[1]
|
1321 |
address_netmask = result[3]
|
1322 |
|
1323 |
AssertEqual(address_ip, "169.254.169.254/32",
|
1324 |
msg="Checking if the TAP interface has the expected IP")
|
1325 |
AssertEqual(address_netmask, "169.254.255.255",
|
1326 |
msg="Checking if the TAP interface has the expected netmask")
|
1327 |
|
1328 |
# Disable instance communication mechanism for this instance
|
1329 |
AssertCommand(["gnt-instance", "modify", "-c", "no", instance.name]) |
1330 |
|
1331 |
# Reboot instance for changes to NIC to take effect
|
1332 |
AssertCommand(["gnt-instance", "reboot", instance.name]) |
1333 |
|
1334 |
# Disable instance communication network at cluster level
|
1335 |
cmd = ["gnt-cluster", "modify", |
1336 |
"--instance-communication-network=%s" % network_name]
|
1337 |
result_output = qa_utils.GetCommandOutput(master.primary, |
1338 |
utils.ShellQuoteArgs(cmd)) |
1339 |
print result_output
|
1340 |
|
1341 |
|
1342 |
available_instance_tests = [ |
1343 |
("instance-add-plain-disk", constants.DT_PLAIN,
|
1344 |
TestInstanceAddWithPlainDisk, 1),
|
1345 |
("instance-add-drbd-disk", constants.DT_DRBD8,
|
1346 |
TestInstanceAddWithDrbdDisk, 2),
|
1347 |
("instance-add-diskless", constants.DT_DISKLESS,
|
1348 |
TestInstanceAddDiskless, 1),
|
1349 |
("instance-add-file", constants.DT_FILE,
|
1350 |
TestInstanceAddFile, 1),
|
1351 |
("instance-add-shared-file", constants.DT_SHARED_FILE,
|
1352 |
TestInstanceAddSharedFile, 1),
|
1353 |
] |