root / qa / qa_instance.py @ 6f665bf7
History | View | Annotate | Download (36.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Instance related QA tests.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import operator |
27 |
import os |
28 |
import re |
29 |
|
30 |
from ganeti import utils |
31 |
from ganeti import constants |
32 |
from ganeti import query |
33 |
from ganeti import pathutils |
34 |
|
35 |
import qa_config |
36 |
import qa_utils |
37 |
import qa_error |
38 |
|
39 |
from qa_utils import AssertIn, AssertCommand, AssertEqual |
40 |
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE |
41 |
|
42 |
|
43 |
def _GetDiskStatePath(disk): |
44 |
return "/sys/block/%s/device/state" % disk |
45 |
|
46 |
|
47 |
def _GetGenericAddParameters(inst, disk_template, force_mac=None): |
48 |
params = ["-B"]
|
49 |
params.append("%s=%s,%s=%s" % (constants.BE_MINMEM,
|
50 |
qa_config.get(constants.BE_MINMEM), |
51 |
constants.BE_MAXMEM, |
52 |
qa_config.get(constants.BE_MAXMEM))) |
53 |
|
54 |
if disk_template != constants.DT_DISKLESS:
|
55 |
for idx, size in enumerate(qa_config.get("disk")): |
56 |
params.extend(["--disk", "%s:size=%s" % (idx, size)]) |
57 |
|
58 |
# Set static MAC address if configured
|
59 |
if force_mac:
|
60 |
nic0_mac = force_mac |
61 |
else:
|
62 |
nic0_mac = inst.GetNicMacAddr(0, None) |
63 |
|
64 |
if nic0_mac:
|
65 |
params.extend(["--net", "0:mac=%s" % nic0_mac]) |
66 |
|
67 |
return params
|
68 |
|
69 |
|
70 |
def _CreateInstanceByDiskTemplateRaw(nodes_spec, disk_template, fail=False): |
71 |
"""Creates an instance with the given disk template on the given nodes(s).
|
72 |
Note that this function does not check if enough nodes are given for
|
73 |
the respective disk template.
|
74 |
|
75 |
@type nodes_spec: string
|
76 |
@param nodes_spec: string specification of one node (by node name) or several
|
77 |
nodes according to the requirements of the disk template
|
78 |
@type disk_template: string
|
79 |
@param disk_template: the disk template to be used by the instance
|
80 |
@return: the created instance
|
81 |
|
82 |
"""
|
83 |
instance = qa_config.AcquireInstance() |
84 |
try:
|
85 |
cmd = (["gnt-instance", "add", |
86 |
"--os-type=%s" % qa_config.get("os"), |
87 |
"--disk-template=%s" % disk_template,
|
88 |
"--node=%s" % nodes_spec] +
|
89 |
_GetGenericAddParameters(instance, disk_template)) |
90 |
cmd.append(instance.name) |
91 |
|
92 |
AssertCommand(cmd, fail=fail) |
93 |
|
94 |
if not fail: |
95 |
_CheckSsconfInstanceList(instance.name) |
96 |
instance.SetDiskTemplate(disk_template) |
97 |
|
98 |
return instance
|
99 |
except:
|
100 |
instance.Release() |
101 |
raise
|
102 |
|
103 |
# Handle the case where creation is expected to fail
|
104 |
assert fail
|
105 |
instance.Release() |
106 |
return None |
107 |
|
108 |
|
109 |
def _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=False): |
110 |
"""Creates an instance using the given disk template for disk templates
|
111 |
for which one given node is sufficient. These templates are for example:
|
112 |
plain, diskless, file, sharedfile, blockdev, rados.
|
113 |
|
114 |
@type nodes: list of nodes
|
115 |
@param nodes: a list of nodes, whose first element is used to create the
|
116 |
instance
|
117 |
@type disk_template: string
|
118 |
@param disk_template: the disk template to be used by the instance
|
119 |
@return: the created instance
|
120 |
|
121 |
"""
|
122 |
assert len(nodes) > 0 |
123 |
return _CreateInstanceByDiskTemplateRaw(nodes[0].primary, disk_template, |
124 |
fail=fail) |
125 |
|
126 |
|
127 |
def _CreateInstanceDrbd8(nodes, fail=False): |
128 |
"""Creates an instance using disk template 'drbd' on the given nodes.
|
129 |
|
130 |
@type nodes: list of nodes
|
131 |
@param nodes: nodes to be used by the instance
|
132 |
@return: the created instance
|
133 |
|
134 |
"""
|
135 |
assert len(nodes) > 1 |
136 |
return _CreateInstanceByDiskTemplateRaw(
|
137 |
":".join(map(operator.attrgetter("primary"), nodes)), |
138 |
constants.DT_DRBD8, fail=fail) |
139 |
|
140 |
|
141 |
def CreateInstanceByDiskTemplate(nodes, disk_template, fail=False): |
142 |
"""Given a disk template, this function creates an instance using
|
143 |
the template. It uses the required number of nodes depending on
|
144 |
the disk template. This function is intended to be used by tests
|
145 |
that don't care about the specifics of the instance other than
|
146 |
that it uses the given disk template.
|
147 |
|
148 |
Note: If you use this function, make sure to call
|
149 |
'TestInstanceRemove' at the end of your tests to avoid orphaned
|
150 |
instances hanging around and interfering with the following tests.
|
151 |
|
152 |
@type nodes: list of nodes
|
153 |
@param nodes: the list of the nodes on which the instance will be placed;
|
154 |
it needs to have sufficiently many elements for the given
|
155 |
disk template
|
156 |
@type disk_template: string
|
157 |
@param disk_template: the disk template to be used by the instance
|
158 |
@return: the created instance
|
159 |
|
160 |
"""
|
161 |
if disk_template == constants.DT_DRBD8:
|
162 |
return _CreateInstanceDrbd8(nodes, fail=fail)
|
163 |
elif disk_template in [constants.DT_DISKLESS, constants.DT_PLAIN, |
164 |
constants.DT_FILE]: |
165 |
return _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=fail)
|
166 |
else:
|
167 |
# FIXME: This assumes that for all other disk templates, we only need one
|
168 |
# node and no disk template specific parameters. This else-branch is
|
169 |
# currently only used in cases where we expect failure. Extend it when
|
170 |
# QA needs for these templates change.
|
171 |
return _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=fail)
|
172 |
|
173 |
|
174 |
def _GetInstanceInfo(instance): |
175 |
"""Return information about the actual state of an instance.
|
176 |
|
177 |
@type instance: string
|
178 |
@param instance: the instance name
|
179 |
@return: a dictionary with the following keys:
|
180 |
- "nodes": instance nodes, a list of strings
|
181 |
- "volumes": instance volume IDs, a list of strings
|
182 |
- "drbd-minors": DRBD minors used by the instance, a dictionary where
|
183 |
keys are nodes, and values are lists of integers (or an empty
|
184 |
dictionary for non-DRBD instances)
|
185 |
- "disk-template": instance disk template
|
186 |
- "storage-type": storage type associated with the instance disk template
|
187 |
|
188 |
"""
|
189 |
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
|
190 |
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
|
191 |
# node1.fqdn
|
192 |
# node2.fqdn,node3.fqdn
|
193 |
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
|
194 |
# FIXME This works with no more than 2 secondaries
|
195 |
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$") |
196 |
|
197 |
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0] |
198 |
nodes = [] |
199 |
for nodeinfo in info["Nodes"]: |
200 |
if "primary" in nodeinfo: |
201 |
nodes.append(nodeinfo["primary"])
|
202 |
elif "secondaries" in nodeinfo: |
203 |
nodestr = nodeinfo["secondaries"]
|
204 |
if nodestr:
|
205 |
m = re_nodelist.match(nodestr) |
206 |
if m:
|
207 |
nodes.extend(filter(None, m.groups())) |
208 |
else:
|
209 |
nodes.append(nodestr) |
210 |
|
211 |
disk_template = info["Disk template"]
|
212 |
if not disk_template: |
213 |
raise qa_error.Error("Can't get instance disk template") |
214 |
storage_type = constants.DISK_TEMPLATES_STORAGE_TYPE[disk_template] |
215 |
|
216 |
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
|
217 |
vols = [] |
218 |
drbd_min = {} |
219 |
for (count, diskinfo) in enumerate(info["Disks"]): |
220 |
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1) |
221 |
if dtype == constants.LD_DRBD8:
|
222 |
for child in diskinfo["child devices"]: |
223 |
vols.append(child["logical_id"])
|
224 |
for key in ["nodeA", "nodeB"]: |
225 |
m = re_drbdnode.match(diskinfo[key]) |
226 |
if not m: |
227 |
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key]) |
228 |
node = m.group(1)
|
229 |
minor = int(m.group(2)) |
230 |
minorlist = drbd_min.setdefault(node, []) |
231 |
minorlist.append(minor) |
232 |
elif dtype == constants.LD_LV:
|
233 |
vols.append(diskinfo["logical_id"])
|
234 |
|
235 |
assert nodes
|
236 |
assert len(nodes) < 2 or vols |
237 |
return {
|
238 |
"nodes": nodes,
|
239 |
"volumes": vols,
|
240 |
"drbd-minors": drbd_min,
|
241 |
"disk-template": disk_template,
|
242 |
"storage-type": storage_type,
|
243 |
} |
244 |
|
245 |
|
246 |
def _DestroyInstanceDisks(instance): |
247 |
"""Remove all the backend disks of an instance.
|
248 |
|
249 |
This is used to simulate HW errors (dead nodes, broken disks...); the
|
250 |
configuration of the instance is not affected.
|
251 |
@type instance: dictionary
|
252 |
@param instance: the instance
|
253 |
|
254 |
"""
|
255 |
info = _GetInstanceInfo(instance.name) |
256 |
# FIXME: destruction/removal should be part of the disk class
|
257 |
if info["storage-type"] == constants.ST_LVM_VG: |
258 |
vols = info["volumes"]
|
259 |
for node in info["nodes"]: |
260 |
AssertCommand(["lvremove", "-f"] + vols, node=node) |
261 |
elif info["storage-type"] == constants.ST_FILE: |
262 |
# FIXME: file storage dir not configurable in qa
|
263 |
# Note that this works for both file and sharedfile, and this is intended.
|
264 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
265 |
idir = os.path.join(filestorage, instance.name) |
266 |
for node in info["nodes"]: |
267 |
AssertCommand(["rm", "-rf", idir], node=node) |
268 |
elif info["storage-type"] == constants.ST_DISKLESS: |
269 |
pass
|
270 |
|
271 |
|
272 |
def _GetInstanceField(instance, field): |
273 |
"""Get the value of a field of an instance.
|
274 |
|
275 |
@type instance: string
|
276 |
@param instance: Instance name
|
277 |
@type field: string
|
278 |
@param field: Name of the field
|
279 |
@rtype: string
|
280 |
|
281 |
"""
|
282 |
master = qa_config.GetMasterNode() |
283 |
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers", |
284 |
"--units", "m", "-o", field, instance]) |
285 |
return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
|
286 |
|
287 |
|
288 |
def _GetBoolInstanceField(instance, field): |
289 |
"""Get the Boolean value of a field of an instance.
|
290 |
|
291 |
@type instance: string
|
292 |
@param instance: Instance name
|
293 |
@type field: string
|
294 |
@param field: Name of the field
|
295 |
@rtype: bool
|
296 |
|
297 |
"""
|
298 |
info_out = _GetInstanceField(instance, field) |
299 |
if info_out == "Y": |
300 |
return True |
301 |
elif info_out == "N": |
302 |
return False |
303 |
else:
|
304 |
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:" |
305 |
" %s" % (field, instance, info_out))
|
306 |
|
307 |
|
308 |
def _GetNumInstanceField(instance, field): |
309 |
"""Get a numeric value of a field of an instance.
|
310 |
|
311 |
@type instance: string
|
312 |
@param instance: Instance name
|
313 |
@type field: string
|
314 |
@param field: Name of the field
|
315 |
@rtype: int or float
|
316 |
|
317 |
"""
|
318 |
info_out = _GetInstanceField(instance, field) |
319 |
try:
|
320 |
ret = int(info_out)
|
321 |
except ValueError: |
322 |
try:
|
323 |
ret = float(info_out)
|
324 |
except ValueError: |
325 |
raise qa_error.Error("Field %s of instance %s has a non-numeric value:" |
326 |
" %s" % (field, instance, info_out))
|
327 |
return ret
|
328 |
|
329 |
|
330 |
def GetInstanceSpec(instance, spec): |
331 |
"""Return the current spec for the given parameter.
|
332 |
|
333 |
@type instance: string
|
334 |
@param instance: Instance name
|
335 |
@type spec: string
|
336 |
@param spec: one of the supported parameters: "mem-size", "cpu-count",
|
337 |
"disk-count", "disk-size", "nic-count"
|
338 |
@rtype: tuple
|
339 |
@return: (minspec, maxspec); minspec and maxspec can be different only for
|
340 |
memory and disk size
|
341 |
|
342 |
"""
|
343 |
specmap = { |
344 |
"mem-size": ["be/minmem", "be/maxmem"], |
345 |
"cpu-count": ["vcpus"], |
346 |
"disk-count": ["disk.count"], |
347 |
"disk-size": ["disk.size/ "], |
348 |
"nic-count": ["nic.count"], |
349 |
} |
350 |
# For disks, first we need the number of disks
|
351 |
if spec == "disk-size": |
352 |
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
|
353 |
fields = ["disk.size/%s" % k for k in range(0, numdisk)] |
354 |
else:
|
355 |
assert spec in specmap, "%s not in %s" % (spec, specmap) |
356 |
fields = specmap[spec] |
357 |
values = [_GetNumInstanceField(instance, f) for f in fields] |
358 |
return (min(values), max(values)) |
359 |
|
360 |
|
361 |
def IsFailoverSupported(instance): |
362 |
return instance.disk_template in constants.DTS_MIRRORED |
363 |
|
364 |
|
365 |
def IsMigrationSupported(instance): |
366 |
return instance.disk_template in constants.DTS_MIRRORED |
367 |
|
368 |
|
369 |
def IsDiskReplacingSupported(instance): |
370 |
return instance.disk_template == constants.DT_DRBD8
|
371 |
|
372 |
|
373 |
def TestInstanceAddWithPlainDisk(nodes, fail=False): |
374 |
"""gnt-instance add -t plain"""
|
375 |
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates(): |
376 |
instance = _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN, |
377 |
fail=fail) |
378 |
if not fail: |
379 |
qa_utils.RunInstanceCheck(instance, True)
|
380 |
return instance
|
381 |
|
382 |
|
383 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
384 |
def TestInstanceAddWithDrbdDisk(nodes): |
385 |
"""gnt-instance add -t drbd"""
|
386 |
if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates(): |
387 |
return _CreateInstanceDrbd8(nodes)
|
388 |
|
389 |
|
390 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
391 |
def TestInstanceAddFile(nodes): |
392 |
"""gnt-instance add -t file"""
|
393 |
assert len(nodes) == 1 |
394 |
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates(): |
395 |
return _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
|
396 |
|
397 |
|
398 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
399 |
def TestInstanceAddDiskless(nodes): |
400 |
"""gnt-instance add -t diskless"""
|
401 |
assert len(nodes) == 1 |
402 |
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates(): |
403 |
return _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
|
404 |
|
405 |
|
406 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
407 |
def TestInstanceRemove(instance): |
408 |
"""gnt-instance remove"""
|
409 |
AssertCommand(["gnt-instance", "remove", "-f", instance.name]) |
410 |
|
411 |
|
412 |
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
|
413 |
def TestInstanceStartup(instance): |
414 |
"""gnt-instance startup"""
|
415 |
AssertCommand(["gnt-instance", "startup", instance.name]) |
416 |
|
417 |
|
418 |
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
|
419 |
def TestInstanceShutdown(instance): |
420 |
"""gnt-instance shutdown"""
|
421 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
422 |
|
423 |
|
424 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
425 |
def TestInstanceReboot(instance): |
426 |
"""gnt-instance reboot"""
|
427 |
options = qa_config.get("options", {})
|
428 |
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
|
429 |
name = instance.name |
430 |
for rtype in reboot_types: |
431 |
AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name]) |
432 |
|
433 |
AssertCommand(["gnt-instance", "shutdown", name]) |
434 |
qa_utils.RunInstanceCheck(instance, False)
|
435 |
AssertCommand(["gnt-instance", "reboot", name]) |
436 |
|
437 |
master = qa_config.GetMasterNode() |
438 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name] |
439 |
result_output = qa_utils.GetCommandOutput(master.primary, |
440 |
utils.ShellQuoteArgs(cmd)) |
441 |
AssertEqual(result_output.strip(), constants.INSTST_RUNNING) |
442 |
|
443 |
|
444 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
445 |
def TestInstanceReinstall(instance): |
446 |
"""gnt-instance reinstall"""
|
447 |
if instance.disk_template == constants.DT_DISKLESS:
|
448 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
449 |
return
|
450 |
|
451 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
452 |
|
453 |
# Test with non-existant OS definition
|
454 |
AssertCommand(["gnt-instance", "reinstall", "-f", |
455 |
"--os-type=NonExistantOsForQa",
|
456 |
instance.name], |
457 |
fail=True)
|
458 |
|
459 |
|
460 |
def _ReadSsconfInstanceList(): |
461 |
"""Reads ssconf_instance_list from the master node.
|
462 |
|
463 |
"""
|
464 |
master = qa_config.GetMasterNode() |
465 |
|
466 |
ssconf_path = utils.PathJoin(pathutils.DATA_DIR, |
467 |
"ssconf_%s" % constants.SS_INSTANCE_LIST)
|
468 |
|
469 |
cmd = ["cat", qa_utils.MakeNodePath(master, ssconf_path)]
|
470 |
|
471 |
return qa_utils.GetCommandOutput(master.primary,
|
472 |
utils.ShellQuoteArgs(cmd)).splitlines() |
473 |
|
474 |
|
475 |
def _CheckSsconfInstanceList(instance): |
476 |
"""Checks if a certain instance is in the ssconf instance list.
|
477 |
|
478 |
@type instance: string
|
479 |
@param instance: Instance name
|
480 |
|
481 |
"""
|
482 |
AssertIn(qa_utils.ResolveInstanceName(instance), |
483 |
_ReadSsconfInstanceList()) |
484 |
|
485 |
|
486 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
487 |
def TestInstanceRenameAndBack(rename_source, rename_target): |
488 |
"""gnt-instance rename
|
489 |
|
490 |
This must leave the instance with the original name, not the target
|
491 |
name.
|
492 |
|
493 |
"""
|
494 |
_CheckSsconfInstanceList(rename_source) |
495 |
|
496 |
# first do a rename to a different actual name, expecting it to fail
|
497 |
qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
|
498 |
try:
|
499 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target], |
500 |
fail=True)
|
501 |
_CheckSsconfInstanceList(rename_source) |
502 |
finally:
|
503 |
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
|
504 |
|
505 |
info = _GetInstanceInfo(rename_source) |
506 |
|
507 |
# Check instance volume tags correctly updated. Note that this check is lvm
|
508 |
# specific, so we skip it for non-lvm-based instances.
|
509 |
# FIXME: This will need updating when instances will be able to have
|
510 |
# different disks living on storage pools with etherogeneous storage types.
|
511 |
# FIXME: This check should be put inside the disk/storage class themselves,
|
512 |
# rather than explicitly called here.
|
513 |
if info["storage-type"] == constants.ST_LVM_VG: |
514 |
# In the lvm world we can check for tags on the logical volume
|
515 |
tags_cmd = ("lvs -o tags --noheadings %s | grep " %
|
516 |
(" ".join(info["volumes"]), )) |
517 |
else:
|
518 |
# Other storage types don't have tags, so we use an always failing command,
|
519 |
# to make sure it never gets executed
|
520 |
tags_cmd = "false"
|
521 |
|
522 |
# and now rename instance to rename_target...
|
523 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target]) |
524 |
_CheckSsconfInstanceList(rename_target) |
525 |
qa_utils.RunInstanceCheck(rename_source, False)
|
526 |
qa_utils.RunInstanceCheck(rename_target, False)
|
527 |
|
528 |
# NOTE: tags might not be the exactly as the instance name, due to
|
529 |
# charset restrictions; hence the test might be flaky
|
530 |
if (rename_source != rename_target and |
531 |
info["storage-type"] == constants.ST_LVM_VG):
|
532 |
for node in info["nodes"]: |
533 |
AssertCommand(tags_cmd + rename_source, node=node, fail=True)
|
534 |
AssertCommand(tags_cmd + rename_target, node=node, fail=False)
|
535 |
|
536 |
# and back
|
537 |
AssertCommand(["gnt-instance", "rename", rename_target, rename_source]) |
538 |
_CheckSsconfInstanceList(rename_source) |
539 |
qa_utils.RunInstanceCheck(rename_target, False)
|
540 |
|
541 |
if (rename_source != rename_target and |
542 |
info["storage-type"] == constants.ST_LVM_VG):
|
543 |
for node in info["nodes"]: |
544 |
AssertCommand(tags_cmd + rename_source, node=node, fail=False)
|
545 |
AssertCommand(tags_cmd + rename_target, node=node, fail=True)
|
546 |
|
547 |
|
548 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
549 |
def TestInstanceFailover(instance): |
550 |
"""gnt-instance failover"""
|
551 |
if not IsFailoverSupported(instance): |
552 |
print qa_utils.FormatInfo("Instance doesn't support failover, skipping" |
553 |
" test")
|
554 |
return
|
555 |
|
556 |
cmd = ["gnt-instance", "failover", "--force", instance.name] |
557 |
|
558 |
# failover ...
|
559 |
AssertCommand(cmd) |
560 |
qa_utils.RunInstanceCheck(instance, True)
|
561 |
|
562 |
# ... and back
|
563 |
AssertCommand(cmd) |
564 |
|
565 |
|
566 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
567 |
def TestInstanceMigrate(instance, toggle_always_failover=True): |
568 |
"""gnt-instance migrate"""
|
569 |
if not IsMigrationSupported(instance): |
570 |
print qa_utils.FormatInfo("Instance doesn't support migration, skipping" |
571 |
" test")
|
572 |
return
|
573 |
|
574 |
cmd = ["gnt-instance", "migrate", "--force", instance.name] |
575 |
af_par = constants.BE_ALWAYS_FAILOVER |
576 |
af_field = "be/" + constants.BE_ALWAYS_FAILOVER
|
577 |
af_init_val = _GetBoolInstanceField(instance.name, af_field) |
578 |
|
579 |
# migrate ...
|
580 |
AssertCommand(cmd) |
581 |
# TODO: Verify the choice between failover and migration
|
582 |
qa_utils.RunInstanceCheck(instance, True)
|
583 |
|
584 |
# ... and back (possibly with always_failover toggled)
|
585 |
if toggle_always_failover:
|
586 |
AssertCommand(["gnt-instance", "modify", "-B", |
587 |
("%s=%s" % (af_par, not af_init_val)), |
588 |
instance.name]) |
589 |
AssertCommand(cmd) |
590 |
# TODO: Verify the choice between failover and migration
|
591 |
qa_utils.RunInstanceCheck(instance, True)
|
592 |
if toggle_always_failover:
|
593 |
AssertCommand(["gnt-instance", "modify", "-B", |
594 |
("%s=%s" % (af_par, af_init_val)), instance.name])
|
595 |
|
596 |
# TODO: Split into multiple tests
|
597 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
598 |
qa_utils.RunInstanceCheck(instance, False)
|
599 |
AssertCommand(cmd, fail=True)
|
600 |
AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover", |
601 |
instance.name]) |
602 |
AssertCommand(["gnt-instance", "start", instance.name]) |
603 |
AssertCommand(cmd) |
604 |
# @InstanceCheck enforces the check that the instance is running
|
605 |
qa_utils.RunInstanceCheck(instance, True)
|
606 |
|
607 |
AssertCommand(["gnt-instance", "modify", "-B", |
608 |
("%s=%s" %
|
609 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)), |
610 |
instance.name]) |
611 |
|
612 |
AssertCommand(cmd) |
613 |
qa_utils.RunInstanceCheck(instance, True)
|
614 |
# TODO: Verify that a failover has been done instead of a migration
|
615 |
|
616 |
# TODO: Verify whether the default value is restored here (not hardcoded)
|
617 |
AssertCommand(["gnt-instance", "modify", "-B", |
618 |
("%s=%s" %
|
619 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)), |
620 |
instance.name]) |
621 |
|
622 |
AssertCommand(cmd) |
623 |
qa_utils.RunInstanceCheck(instance, True)
|
624 |
|
625 |
|
626 |
def TestInstanceInfo(instance): |
627 |
"""gnt-instance info"""
|
628 |
AssertCommand(["gnt-instance", "info", instance.name]) |
629 |
|
630 |
|
631 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
632 |
def TestInstanceModify(instance): |
633 |
"""gnt-instance modify"""
|
634 |
default_hv = qa_config.GetDefaultHypervisor() |
635 |
|
636 |
# Assume /sbin/init exists on all systems
|
637 |
test_kernel = "/sbin/init"
|
638 |
test_initrd = test_kernel |
639 |
|
640 |
orig_maxmem = qa_config.get(constants.BE_MAXMEM) |
641 |
orig_minmem = qa_config.get(constants.BE_MINMEM) |
642 |
#orig_bridge = qa_config.get("bridge", "xen-br0")
|
643 |
|
644 |
args = [ |
645 |
["-B", "%s=128" % constants.BE_MINMEM], |
646 |
["-B", "%s=128" % constants.BE_MAXMEM], |
647 |
["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem, |
648 |
constants.BE_MAXMEM, orig_maxmem)], |
649 |
["-B", "%s=2" % constants.BE_VCPUS], |
650 |
["-B", "%s=1" % constants.BE_VCPUS], |
651 |
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], |
652 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)], |
653 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)], |
654 |
|
655 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], |
656 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], |
657 |
|
658 |
# TODO: bridge tests
|
659 |
#["--bridge", "xen-br1"],
|
660 |
#["--bridge", orig_bridge],
|
661 |
] |
662 |
|
663 |
if default_hv == constants.HT_XEN_PVM:
|
664 |
args.extend([ |
665 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], |
666 |
["-H", "no_%s" % (constants.HV_INITRD_PATH, )], |
667 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)], |
668 |
]) |
669 |
elif default_hv == constants.HT_XEN_HVM:
|
670 |
args.extend([ |
671 |
["-H", "%s=acn" % constants.HV_BOOT_ORDER], |
672 |
["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)], |
673 |
]) |
674 |
|
675 |
for alist in args: |
676 |
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name]) |
677 |
|
678 |
# check no-modify
|
679 |
AssertCommand(["gnt-instance", "modify", instance.name], fail=True) |
680 |
|
681 |
# Marking offline while instance is running must fail...
|
682 |
AssertCommand(["gnt-instance", "modify", "--offline", instance.name], |
683 |
fail=True)
|
684 |
|
685 |
# ...while making it online is ok, and should work
|
686 |
AssertCommand(["gnt-instance", "modify", "--online", instance.name]) |
687 |
|
688 |
|
689 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
690 |
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode): |
691 |
"""gnt-instance modify --new-primary
|
692 |
|
693 |
This will leave the instance on its original primary node, not other node.
|
694 |
|
695 |
"""
|
696 |
if instance.disk_template != constants.DT_FILE:
|
697 |
print qa_utils.FormatInfo("Test only supported for the file disk template") |
698 |
return
|
699 |
|
700 |
cluster_name = qa_config.get("name")
|
701 |
|
702 |
name = instance.name |
703 |
current = currentnode.primary |
704 |
other = othernode.primary |
705 |
|
706 |
# FIXME: the qa doesn't have a customizable file storage dir parameter. As
|
707 |
# such for now we use the default.
|
708 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
709 |
disk = os.path.join(filestorage, name) |
710 |
|
711 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name], |
712 |
fail=True)
|
713 |
AssertCommand(["gnt-instance", "shutdown", name]) |
714 |
AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" % |
715 |
pathutils.SSH_KNOWN_HOSTS_FILE, |
716 |
"-oCheckHostIp=no", "-oStrictHostKeyChecking=yes", |
717 |
"-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name, |
718 |
"-r", disk, "%s:%s" % (other, filestorage)], node=current) |
719 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name]) |
720 |
AssertCommand(["gnt-instance", "startup", name]) |
721 |
|
722 |
# and back
|
723 |
AssertCommand(["gnt-instance", "shutdown", name]) |
724 |
AssertCommand(["rm", "-rf", disk], node=other) |
725 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name]) |
726 |
AssertCommand(["gnt-instance", "startup", name]) |
727 |
|
728 |
|
729 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
730 |
def TestInstanceStoppedModify(instance): |
731 |
"""gnt-instance modify (stopped instance)"""
|
732 |
name = instance.name |
733 |
|
734 |
# Instance was not marked offline; try marking it online once more
|
735 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
736 |
|
737 |
# Mark instance as offline
|
738 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
739 |
|
740 |
# When the instance is offline shutdown should only work with --force,
|
741 |
# while start should never work
|
742 |
AssertCommand(["gnt-instance", "shutdown", name], fail=True) |
743 |
AssertCommand(["gnt-instance", "shutdown", "--force", name]) |
744 |
AssertCommand(["gnt-instance", "start", name], fail=True) |
745 |
AssertCommand(["gnt-instance", "start", "--force", name], fail=True) |
746 |
|
747 |
# Also do offline to offline
|
748 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
749 |
|
750 |
# And online again
|
751 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
752 |
|
753 |
|
754 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
755 |
def TestInstanceConvertDiskToPlain(instance, inodes): |
756 |
"""gnt-instance modify -t"""
|
757 |
name = instance.name |
758 |
|
759 |
template = instance.disk_template |
760 |
if template != constants.DT_DRBD8:
|
761 |
print qa_utils.FormatInfo("Unsupported template %s, skipping conversion" |
762 |
" test" % template)
|
763 |
return
|
764 |
|
765 |
assert len(inodes) == 2 |
766 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name]) |
767 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8, |
768 |
"-n", inodes[1].primary, name]) |
769 |
|
770 |
|
771 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
772 |
def TestInstanceGrowDisk(instance): |
773 |
"""gnt-instance grow-disk"""
|
774 |
if qa_config.GetExclusiveStorage():
|
775 |
print qa_utils.FormatInfo("Test not supported with exclusive_storage") |
776 |
return
|
777 |
|
778 |
if instance.disk_template == constants.DT_DISKLESS:
|
779 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
780 |
return
|
781 |
|
782 |
name = instance.name |
783 |
all_size = qa_config.get("disk")
|
784 |
all_grow = qa_config.get("disk-growth")
|
785 |
|
786 |
if not all_grow: |
787 |
# missing disk sizes but instance grow disk has been enabled,
|
788 |
# let's set fixed/nomimal growth
|
789 |
all_grow = ["128M" for _ in all_size] |
790 |
|
791 |
for idx, (size, grow) in enumerate(zip(all_size, all_grow)): |
792 |
# succeed in grow by amount
|
793 |
AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow]) |
794 |
# fail in grow to the old size
|
795 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
796 |
size], fail=True)
|
797 |
# succeed to grow to old size + 2 * growth
|
798 |
int_size = utils.ParseUnit(size) |
799 |
int_grow = utils.ParseUnit(grow) |
800 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
801 |
str(int_size + 2 * int_grow)]) |
802 |
|
803 |
|
804 |
def TestInstanceList(): |
805 |
"""gnt-instance list"""
|
806 |
qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
807 |
|
808 |
|
809 |
def TestInstanceListFields(): |
810 |
"""gnt-instance list-fields"""
|
811 |
qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
812 |
|
813 |
|
814 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
815 |
def TestInstanceConsole(instance): |
816 |
"""gnt-instance console"""
|
817 |
AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name]) |
818 |
|
819 |
|
820 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
821 |
def TestReplaceDisks(instance, curr_nodes, other_nodes): |
822 |
"""gnt-instance replace-disks"""
|
823 |
def buildcmd(args): |
824 |
cmd = ["gnt-instance", "replace-disks"] |
825 |
cmd.extend(args) |
826 |
cmd.append(instance.name) |
827 |
return cmd
|
828 |
|
829 |
if not IsDiskReplacingSupported(instance): |
830 |
print qa_utils.FormatInfo("Instance doesn't support disk replacing," |
831 |
" skipping test")
|
832 |
return
|
833 |
|
834 |
# Currently all supported templates have one primary and one secondary node
|
835 |
assert len(curr_nodes) == 2 |
836 |
snode = curr_nodes[1]
|
837 |
assert len(other_nodes) == 1 |
838 |
othernode = other_nodes[0]
|
839 |
|
840 |
options = qa_config.get("options", {})
|
841 |
use_ialloc = options.get("use-iallocators", True) |
842 |
for data in [ |
843 |
["-p"],
|
844 |
["-s"],
|
845 |
# A placeholder; the actual command choice depends on use_ialloc
|
846 |
None,
|
847 |
# Restore the original secondary
|
848 |
["--new-secondary=%s" % snode.primary],
|
849 |
]: |
850 |
if data is None: |
851 |
if use_ialloc:
|
852 |
data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
|
853 |
else:
|
854 |
data = ["--new-secondary=%s" % othernode.primary]
|
855 |
AssertCommand(buildcmd(data)) |
856 |
|
857 |
AssertCommand(buildcmd(["-a"]))
|
858 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
859 |
AssertCommand(buildcmd(["-a"]), fail=True) |
860 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
861 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
862 |
instance.name]) |
863 |
AssertCommand(buildcmd(["-a"]))
|
864 |
AssertCommand(["gnt-instance", "start", instance.name]) |
865 |
|
866 |
|
867 |
def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True, |
868 |
destroy=True):
|
869 |
"""Execute gnt-instance recreate-disks and check the result
|
870 |
|
871 |
@param cmdargs: Arguments (instance name excluded)
|
872 |
@param instance: Instance to operate on
|
873 |
@param fail: True if the command is expected to fail
|
874 |
@param check: If True and fail is False, check that the disks work
|
875 |
@prama destroy: If True, destroy the old disks first
|
876 |
|
877 |
"""
|
878 |
if destroy:
|
879 |
_DestroyInstanceDisks(instance) |
880 |
AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs + |
881 |
[instance.name]), fail) |
882 |
if not fail and check: |
883 |
# Quick check that the disks are there
|
884 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
885 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
886 |
instance.name]) |
887 |
AssertCommand(["gnt-instance", "deactivate-disks", instance.name]) |
888 |
|
889 |
|
890 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
891 |
def TestRecreateDisks(instance, inodes, othernodes): |
892 |
"""gnt-instance recreate-disks
|
893 |
|
894 |
@param instance: Instance to work on
|
895 |
@param inodes: List of the current nodes of the instance
|
896 |
@param othernodes: list/tuple of nodes where to temporarily recreate disks
|
897 |
|
898 |
"""
|
899 |
options = qa_config.get("options", {})
|
900 |
use_ialloc = options.get("use-iallocators", True) |
901 |
other_seq = ":".join([n.primary for n in othernodes]) |
902 |
orig_seq = ":".join([n.primary for n in inodes]) |
903 |
# These fail because the instance is running
|
904 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
905 |
if use_ialloc:
|
906 |
_AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False) |
907 |
else:
|
908 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
909 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
910 |
# Disks exist: this should fail
|
911 |
_AssertRecreateDisks([], instance, fail=True, destroy=False) |
912 |
# Recreate disks in place
|
913 |
_AssertRecreateDisks([], instance) |
914 |
# Move disks away
|
915 |
if use_ialloc:
|
916 |
_AssertRecreateDisks(["-I", "hail"], instance) |
917 |
# Move disks somewhere else
|
918 |
_AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
|
919 |
instance) |
920 |
else:
|
921 |
_AssertRecreateDisks(["-n", other_seq], instance)
|
922 |
# Move disks back
|
923 |
_AssertRecreateDisks(["-n", orig_seq], instance, check=False) |
924 |
# This and InstanceCheck decoration check that the disks are working
|
925 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
926 |
AssertCommand(["gnt-instance", "start", instance.name]) |
927 |
|
928 |
|
929 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
930 |
def TestInstanceExport(instance, node): |
931 |
"""gnt-backup export -n ..."""
|
932 |
name = instance.name |
933 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, name]) |
934 |
return qa_utils.ResolveInstanceName(name)
|
935 |
|
936 |
|
937 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
938 |
def TestInstanceExportWithRemove(instance, node): |
939 |
"""gnt-backup export --remove-instance"""
|
940 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, |
941 |
"--remove-instance", instance.name])
|
942 |
|
943 |
|
944 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
945 |
def TestInstanceExportNoTarget(instance): |
946 |
"""gnt-backup export (without target node, should fail)"""
|
947 |
AssertCommand(["gnt-backup", "export", instance.name], fail=True) |
948 |
|
949 |
|
950 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
951 |
def TestInstanceImport(newinst, node, expnode, name): |
952 |
"""gnt-backup import"""
|
953 |
templ = constants.DT_PLAIN |
954 |
cmd = (["gnt-backup", "import", |
955 |
"--disk-template=%s" % templ,
|
956 |
"--no-ip-check",
|
957 |
"--src-node=%s" % expnode.primary,
|
958 |
"--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
|
959 |
"--node=%s" % node.primary] +
|
960 |
_GetGenericAddParameters(newinst, templ, |
961 |
force_mac=constants.VALUE_GENERATE)) |
962 |
cmd.append(newinst.name) |
963 |
AssertCommand(cmd) |
964 |
newinst.SetDiskTemplate(templ) |
965 |
|
966 |
|
967 |
def TestBackupList(expnode): |
968 |
"""gnt-backup list"""
|
969 |
AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary]) |
970 |
|
971 |
qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
|
972 |
namefield=None, test_unknown=False) |
973 |
|
974 |
|
975 |
def TestBackupListFields(): |
976 |
"""gnt-backup list-fields"""
|
977 |
qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
|
978 |
|
979 |
|
980 |
def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online): |
981 |
"""gnt-instance remove with an off-line node
|
982 |
|
983 |
@param instance: instance
|
984 |
@param snode: secondary node, to be set offline
|
985 |
@param set_offline: function to call to set the node off-line
|
986 |
@param set_online: function to call to set the node on-line
|
987 |
|
988 |
"""
|
989 |
info = _GetInstanceInfo(instance.name) |
990 |
set_offline(snode) |
991 |
try:
|
992 |
TestInstanceRemove(instance) |
993 |
finally:
|
994 |
set_online(snode) |
995 |
|
996 |
# Clean up the disks on the offline node, if necessary
|
997 |
if instance.disk_template not in constants.DTS_EXT_MIRROR: |
998 |
# FIXME: abstract the cleanup inside the disks
|
999 |
if info["storage-type"] == constants.ST_LVM_VG: |
1000 |
for minor in info["drbd-minors"][snode.primary]: |
1001 |
AssertCommand(["drbdsetup", str(minor), "down"], node=snode) |
1002 |
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode) |
1003 |
elif info["storage-type"] == constants.ST_FILE: |
1004 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
1005 |
disk = os.path.join(filestorage, instance.name) |
1006 |
AssertCommand(["rm", "-rf", disk], node=snode) |
1007 |
|
1008 |
|
1009 |
def TestInstanceCreationRestrictedByDiskTemplates(): |
1010 |
"""Test if adding instances is only possible if they use an enabled
|
1011 |
disk template."""
|
1012 |
enabled_disk_templates = qa_config.GetEnabledDiskTemplates() |
1013 |
nodes = qa_config.AcquireManyNodes(2)
|
1014 |
|
1015 |
# Setup the cluster with the enabled_disk_templates
|
1016 |
AssertCommand( |
1017 |
["gnt-cluster", "modify", |
1018 |
"--enabled-disk-template=%s" %
|
1019 |
",".join(enabled_disk_templates)],
|
1020 |
fail=False)
|
1021 |
|
1022 |
# Test instance creation for enabled disk templates
|
1023 |
for disk_template in enabled_disk_templates: |
1024 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, False)
|
1025 |
TestInstanceRemove(instance) |
1026 |
|
1027 |
# Test that instance creation fails for disabled disk templates
|
1028 |
disabled_disk_templates = list(constants.DISK_TEMPLATES
|
1029 |
- set(enabled_disk_templates))
|
1030 |
for disk_template in disabled_disk_templates: |
1031 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, True)
|
1032 |
|
1033 |
# Test instance creation for after disabling enabled disk templates
|
1034 |
if (len(enabled_disk_templates) > 1): |
1035 |
# Partition the disk templates, enable them separately and check if the
|
1036 |
# disabled ones cannot be used by instances.
|
1037 |
middle = len(enabled_disk_templates) / 2 |
1038 |
templates1 = enabled_disk_templates[:middle] |
1039 |
templates2 = enabled_disk_templates[middle:] |
1040 |
|
1041 |
for (enabled, disabled) in [(templates1, templates2), |
1042 |
(templates2, templates1)]: |
1043 |
AssertCommand(["gnt-cluster", "modify", |
1044 |
"--enabled-disk-template=%s" %
|
1045 |
",".join(enabled)],
|
1046 |
fail=False)
|
1047 |
for disk_template in disabled: |
1048 |
CreateInstanceByDiskTemplate(nodes, disk_template, True)
|
1049 |
elif (len(enabled_disk_templates) == 1): |
1050 |
# If only one disk template is enabled in the QA config, we have to enable
|
1051 |
# some of the disabled disk templates in order to test if the disabling the
|
1052 |
# only enabled disk template prohibits creating instances of that template.
|
1053 |
AssertCommand(["gnt-cluster", "modify", |
1054 |
"--enabled-disk-template=%s" %
|
1055 |
",".join(disabled_disk_templates)],
|
1056 |
fail=False)
|
1057 |
CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], True) |
1058 |
else:
|
1059 |
raise qa_error.Error("Please enable at least one disk template" |
1060 |
" in your QA setup.")
|
1061 |
|
1062 |
# Restore initially enabled disk templates
|
1063 |
AssertCommand(["gnt-cluster", "modify", |
1064 |
"--enabled-disk-template=%s" %
|
1065 |
",".join(enabled_disk_templates)],
|
1066 |
fail=False)
|