root / qa / qa_instance.py @ e75f80b9
History | View | Annotate | Download (39.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Instance related QA tests.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import operator |
27 |
import os |
28 |
import re |
29 |
|
30 |
from ganeti import utils |
31 |
from ganeti import constants |
32 |
from ganeti import query |
33 |
from ganeti import pathutils |
34 |
|
35 |
import qa_config |
36 |
import qa_utils |
37 |
import qa_error |
38 |
|
39 |
from qa_utils import AssertIn, AssertCommand, AssertEqual |
40 |
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE |
41 |
|
42 |
|
43 |
def _GetDiskStatePath(disk): |
44 |
return "/sys/block/%s/device/state" % disk |
45 |
|
46 |
|
47 |
def _GetGenericAddParameters(inst, disk_template, force_mac=None): |
48 |
params = ["-B"]
|
49 |
params.append("%s=%s,%s=%s" % (constants.BE_MINMEM,
|
50 |
qa_config.get(constants.BE_MINMEM), |
51 |
constants.BE_MAXMEM, |
52 |
qa_config.get(constants.BE_MAXMEM))) |
53 |
|
54 |
if disk_template != constants.DT_DISKLESS:
|
55 |
for idx, disk in enumerate(qa_config.GetDiskOptions()): |
56 |
size = disk.get("size")
|
57 |
name = disk.get("name")
|
58 |
diskparams = "%s:size=%s" % (idx, size)
|
59 |
if name:
|
60 |
diskparams += ",name=%s" % name
|
61 |
params.extend(["--disk", diskparams])
|
62 |
|
63 |
# Set static MAC address if configured
|
64 |
if force_mac:
|
65 |
nic0_mac = force_mac |
66 |
else:
|
67 |
nic0_mac = inst.GetNicMacAddr(0, None) |
68 |
|
69 |
if nic0_mac:
|
70 |
params.extend(["--net", "0:mac=%s" % nic0_mac]) |
71 |
|
72 |
return params
|
73 |
|
74 |
|
75 |
def _CreateInstanceByDiskTemplateRaw(nodes_spec, disk_template, fail=False): |
76 |
"""Creates an instance with the given disk template on the given nodes(s).
|
77 |
Note that this function does not check if enough nodes are given for
|
78 |
the respective disk template.
|
79 |
|
80 |
@type nodes_spec: string
|
81 |
@param nodes_spec: string specification of one node (by node name) or several
|
82 |
nodes according to the requirements of the disk template
|
83 |
@type disk_template: string
|
84 |
@param disk_template: the disk template to be used by the instance
|
85 |
@return: the created instance
|
86 |
|
87 |
"""
|
88 |
instance = qa_config.AcquireInstance() |
89 |
try:
|
90 |
cmd = (["gnt-instance", "add", |
91 |
"--os-type=%s" % qa_config.get("os"), |
92 |
"--disk-template=%s" % disk_template,
|
93 |
"--node=%s" % nodes_spec] +
|
94 |
_GetGenericAddParameters(instance, disk_template)) |
95 |
cmd.append(instance.name) |
96 |
|
97 |
AssertCommand(cmd, fail=fail) |
98 |
|
99 |
if not fail: |
100 |
_CheckSsconfInstanceList(instance.name) |
101 |
instance.SetDiskTemplate(disk_template) |
102 |
|
103 |
return instance
|
104 |
except:
|
105 |
instance.Release() |
106 |
raise
|
107 |
|
108 |
# Handle the case where creation is expected to fail
|
109 |
assert fail
|
110 |
instance.Release() |
111 |
return None |
112 |
|
113 |
|
114 |
def _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=False): |
115 |
"""Creates an instance using the given disk template for disk templates
|
116 |
for which one given node is sufficient. These templates are for example:
|
117 |
plain, diskless, file, sharedfile, blockdev, rados.
|
118 |
|
119 |
@type nodes: list of nodes
|
120 |
@param nodes: a list of nodes, whose first element is used to create the
|
121 |
instance
|
122 |
@type disk_template: string
|
123 |
@param disk_template: the disk template to be used by the instance
|
124 |
@return: the created instance
|
125 |
|
126 |
"""
|
127 |
assert len(nodes) > 0 |
128 |
return _CreateInstanceByDiskTemplateRaw(nodes[0].primary, disk_template, |
129 |
fail=fail) |
130 |
|
131 |
|
132 |
def _CreateInstanceDrbd8(nodes, fail=False): |
133 |
"""Creates an instance using disk template 'drbd' on the given nodes.
|
134 |
|
135 |
@type nodes: list of nodes
|
136 |
@param nodes: nodes to be used by the instance
|
137 |
@return: the created instance
|
138 |
|
139 |
"""
|
140 |
assert len(nodes) > 1 |
141 |
return _CreateInstanceByDiskTemplateRaw(
|
142 |
":".join(map(operator.attrgetter("primary"), nodes)), |
143 |
constants.DT_DRBD8, fail=fail) |
144 |
|
145 |
|
146 |
def CreateInstanceByDiskTemplate(nodes, disk_template, fail=False): |
147 |
"""Given a disk template, this function creates an instance using
|
148 |
the template. It uses the required number of nodes depending on
|
149 |
the disk template. This function is intended to be used by tests
|
150 |
that don't care about the specifics of the instance other than
|
151 |
that it uses the given disk template.
|
152 |
|
153 |
Note: If you use this function, make sure to call
|
154 |
'TestInstanceRemove' at the end of your tests to avoid orphaned
|
155 |
instances hanging around and interfering with the following tests.
|
156 |
|
157 |
@type nodes: list of nodes
|
158 |
@param nodes: the list of the nodes on which the instance will be placed;
|
159 |
it needs to have sufficiently many elements for the given
|
160 |
disk template
|
161 |
@type disk_template: string
|
162 |
@param disk_template: the disk template to be used by the instance
|
163 |
@return: the created instance
|
164 |
|
165 |
"""
|
166 |
if disk_template == constants.DT_DRBD8:
|
167 |
return _CreateInstanceDrbd8(nodes, fail=fail)
|
168 |
elif disk_template in [constants.DT_DISKLESS, constants.DT_PLAIN, |
169 |
constants.DT_FILE]: |
170 |
return _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=fail)
|
171 |
else:
|
172 |
# FIXME: This assumes that for all other disk templates, we only need one
|
173 |
# node and no disk template specific parameters. This else-branch is
|
174 |
# currently only used in cases where we expect failure. Extend it when
|
175 |
# QA needs for these templates change.
|
176 |
return _CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=fail)
|
177 |
|
178 |
|
179 |
def _GetInstanceInfo(instance): |
180 |
"""Return information about the actual state of an instance.
|
181 |
|
182 |
@type instance: string
|
183 |
@param instance: the instance name
|
184 |
@return: a dictionary with the following keys:
|
185 |
- "nodes": instance nodes, a list of strings
|
186 |
- "volumes": instance volume IDs, a list of strings
|
187 |
- "drbd-minors": DRBD minors used by the instance, a dictionary where
|
188 |
keys are nodes, and values are lists of integers (or an empty
|
189 |
dictionary for non-DRBD instances)
|
190 |
- "disk-template": instance disk template
|
191 |
- "storage-type": storage type associated with the instance disk template
|
192 |
|
193 |
"""
|
194 |
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
|
195 |
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
|
196 |
# node1.fqdn
|
197 |
# node2.fqdn,node3.fqdn
|
198 |
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
|
199 |
# FIXME This works with no more than 2 secondaries
|
200 |
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$") |
201 |
|
202 |
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0] |
203 |
nodes = [] |
204 |
for nodeinfo in info["Nodes"]: |
205 |
if "primary" in nodeinfo: |
206 |
nodes.append(nodeinfo["primary"])
|
207 |
elif "secondaries" in nodeinfo: |
208 |
nodestr = nodeinfo["secondaries"]
|
209 |
if nodestr:
|
210 |
m = re_nodelist.match(nodestr) |
211 |
if m:
|
212 |
nodes.extend(filter(None, m.groups())) |
213 |
else:
|
214 |
nodes.append(nodestr) |
215 |
|
216 |
disk_template = info["Disk template"]
|
217 |
if not disk_template: |
218 |
raise qa_error.Error("Can't get instance disk template") |
219 |
storage_type = constants.DISK_TEMPLATES_STORAGE_TYPE[disk_template] |
220 |
|
221 |
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
|
222 |
vols = [] |
223 |
drbd_min = {} |
224 |
for (count, diskinfo) in enumerate(info["Disks"]): |
225 |
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1) |
226 |
if dtype == constants.LD_DRBD8:
|
227 |
for child in diskinfo["child devices"]: |
228 |
vols.append(child["logical_id"])
|
229 |
for key in ["nodeA", "nodeB"]: |
230 |
m = re_drbdnode.match(diskinfo[key]) |
231 |
if not m: |
232 |
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key]) |
233 |
node = m.group(1)
|
234 |
minor = int(m.group(2)) |
235 |
minorlist = drbd_min.setdefault(node, []) |
236 |
minorlist.append(minor) |
237 |
elif dtype == constants.LD_LV:
|
238 |
vols.append(diskinfo["logical_id"])
|
239 |
|
240 |
assert nodes
|
241 |
assert len(nodes) < 2 or vols |
242 |
return {
|
243 |
"nodes": nodes,
|
244 |
"volumes": vols,
|
245 |
"drbd-minors": drbd_min,
|
246 |
"disk-template": disk_template,
|
247 |
"storage-type": storage_type,
|
248 |
} |
249 |
|
250 |
|
251 |
def _DestroyInstanceDisks(instance): |
252 |
"""Remove all the backend disks of an instance.
|
253 |
|
254 |
This is used to simulate HW errors (dead nodes, broken disks...); the
|
255 |
configuration of the instance is not affected.
|
256 |
@type instance: dictionary
|
257 |
@param instance: the instance
|
258 |
|
259 |
"""
|
260 |
info = _GetInstanceInfo(instance.name) |
261 |
# FIXME: destruction/removal should be part of the disk class
|
262 |
if info["storage-type"] == constants.ST_LVM_VG: |
263 |
vols = info["volumes"]
|
264 |
for node in info["nodes"]: |
265 |
AssertCommand(["lvremove", "-f"] + vols, node=node) |
266 |
elif info["storage-type"] == constants.ST_FILE: |
267 |
# FIXME: file storage dir not configurable in qa
|
268 |
# Note that this works for both file and sharedfile, and this is intended.
|
269 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
270 |
idir = os.path.join(filestorage, instance.name) |
271 |
for node in info["nodes"]: |
272 |
AssertCommand(["rm", "-rf", idir], node=node) |
273 |
elif info["storage-type"] == constants.ST_DISKLESS: |
274 |
pass
|
275 |
|
276 |
|
277 |
def _GetInstanceField(instance, field): |
278 |
"""Get the value of a field of an instance.
|
279 |
|
280 |
@type instance: string
|
281 |
@param instance: Instance name
|
282 |
@type field: string
|
283 |
@param field: Name of the field
|
284 |
@rtype: string
|
285 |
|
286 |
"""
|
287 |
master = qa_config.GetMasterNode() |
288 |
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers", |
289 |
"--units", "m", "-o", field, instance]) |
290 |
return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
|
291 |
|
292 |
|
293 |
def _GetBoolInstanceField(instance, field): |
294 |
"""Get the Boolean value of a field of an instance.
|
295 |
|
296 |
@type instance: string
|
297 |
@param instance: Instance name
|
298 |
@type field: string
|
299 |
@param field: Name of the field
|
300 |
@rtype: bool
|
301 |
|
302 |
"""
|
303 |
info_out = _GetInstanceField(instance, field) |
304 |
if info_out == "Y": |
305 |
return True |
306 |
elif info_out == "N": |
307 |
return False |
308 |
else:
|
309 |
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:" |
310 |
" %s" % (field, instance, info_out))
|
311 |
|
312 |
|
313 |
def _GetNumInstanceField(instance, field): |
314 |
"""Get a numeric value of a field of an instance.
|
315 |
|
316 |
@type instance: string
|
317 |
@param instance: Instance name
|
318 |
@type field: string
|
319 |
@param field: Name of the field
|
320 |
@rtype: int or float
|
321 |
|
322 |
"""
|
323 |
info_out = _GetInstanceField(instance, field) |
324 |
try:
|
325 |
ret = int(info_out)
|
326 |
except ValueError: |
327 |
try:
|
328 |
ret = float(info_out)
|
329 |
except ValueError: |
330 |
raise qa_error.Error("Field %s of instance %s has a non-numeric value:" |
331 |
" %s" % (field, instance, info_out))
|
332 |
return ret
|
333 |
|
334 |
|
335 |
def GetInstanceSpec(instance, spec): |
336 |
"""Return the current spec for the given parameter.
|
337 |
|
338 |
@type instance: string
|
339 |
@param instance: Instance name
|
340 |
@type spec: string
|
341 |
@param spec: one of the supported parameters: "memory-size", "cpu-count",
|
342 |
"disk-count", "disk-size", "nic-count"
|
343 |
@rtype: tuple
|
344 |
@return: (minspec, maxspec); minspec and maxspec can be different only for
|
345 |
memory and disk size
|
346 |
|
347 |
"""
|
348 |
specmap = { |
349 |
"memory-size": ["be/minmem", "be/maxmem"], |
350 |
"cpu-count": ["vcpus"], |
351 |
"disk-count": ["disk.count"], |
352 |
"disk-size": ["disk.size/ "], |
353 |
"nic-count": ["nic.count"], |
354 |
} |
355 |
# For disks, first we need the number of disks
|
356 |
if spec == "disk-size": |
357 |
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
|
358 |
fields = ["disk.size/%s" % k for k in range(0, numdisk)] |
359 |
else:
|
360 |
assert spec in specmap, "%s not in %s" % (spec, specmap) |
361 |
fields = specmap[spec] |
362 |
values = [_GetNumInstanceField(instance, f) for f in fields] |
363 |
return (min(values), max(values)) |
364 |
|
365 |
|
366 |
def IsFailoverSupported(instance): |
367 |
return instance.disk_template in constants.DTS_MIRRORED |
368 |
|
369 |
|
370 |
def IsMigrationSupported(instance): |
371 |
return instance.disk_template in constants.DTS_MIRRORED |
372 |
|
373 |
|
374 |
def IsDiskReplacingSupported(instance): |
375 |
return instance.disk_template == constants.DT_DRBD8
|
376 |
|
377 |
|
378 |
def IsDiskSupported(instance): |
379 |
return instance.disk_template != constants.DT_DISKLESS
|
380 |
|
381 |
|
382 |
def TestInstanceAddWithPlainDisk(nodes, fail=False): |
383 |
"""gnt-instance add -t plain"""
|
384 |
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates(): |
385 |
instance = _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN, |
386 |
fail=fail) |
387 |
if not fail: |
388 |
qa_utils.RunInstanceCheck(instance, True)
|
389 |
return instance
|
390 |
|
391 |
|
392 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
393 |
def TestInstanceAddWithDrbdDisk(nodes): |
394 |
"""gnt-instance add -t drbd"""
|
395 |
if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates(): |
396 |
return _CreateInstanceDrbd8(nodes)
|
397 |
|
398 |
|
399 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
400 |
def TestInstanceAddFile(nodes): |
401 |
"""gnt-instance add -t file"""
|
402 |
assert len(nodes) == 1 |
403 |
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates(): |
404 |
return _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
|
405 |
|
406 |
|
407 |
@InstanceCheck(None, INST_UP, RETURN_VALUE) |
408 |
def TestInstanceAddDiskless(nodes): |
409 |
"""gnt-instance add -t diskless"""
|
410 |
assert len(nodes) == 1 |
411 |
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates(): |
412 |
return _CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
|
413 |
|
414 |
|
415 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
416 |
def TestInstanceRemove(instance): |
417 |
"""gnt-instance remove"""
|
418 |
AssertCommand(["gnt-instance", "remove", "-f", instance.name]) |
419 |
|
420 |
|
421 |
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
|
422 |
def TestInstanceStartup(instance): |
423 |
"""gnt-instance startup"""
|
424 |
AssertCommand(["gnt-instance", "startup", instance.name]) |
425 |
|
426 |
|
427 |
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
|
428 |
def TestInstanceShutdown(instance): |
429 |
"""gnt-instance shutdown"""
|
430 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
431 |
|
432 |
|
433 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
434 |
def TestInstanceReboot(instance): |
435 |
"""gnt-instance reboot"""
|
436 |
options = qa_config.get("options", {})
|
437 |
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
|
438 |
name = instance.name |
439 |
for rtype in reboot_types: |
440 |
AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name]) |
441 |
|
442 |
AssertCommand(["gnt-instance", "shutdown", name]) |
443 |
qa_utils.RunInstanceCheck(instance, False)
|
444 |
AssertCommand(["gnt-instance", "reboot", name]) |
445 |
|
446 |
master = qa_config.GetMasterNode() |
447 |
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name] |
448 |
result_output = qa_utils.GetCommandOutput(master.primary, |
449 |
utils.ShellQuoteArgs(cmd)) |
450 |
AssertEqual(result_output.strip(), constants.INSTST_RUNNING) |
451 |
|
452 |
|
453 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
454 |
def TestInstanceReinstall(instance): |
455 |
"""gnt-instance reinstall"""
|
456 |
if instance.disk_template == constants.DT_DISKLESS:
|
457 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
458 |
return
|
459 |
|
460 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
461 |
|
462 |
# Test with non-existant OS definition
|
463 |
AssertCommand(["gnt-instance", "reinstall", "-f", |
464 |
"--os-type=NonExistantOsForQa",
|
465 |
instance.name], |
466 |
fail=True)
|
467 |
|
468 |
|
469 |
def _ReadSsconfInstanceList(): |
470 |
"""Reads ssconf_instance_list from the master node.
|
471 |
|
472 |
"""
|
473 |
master = qa_config.GetMasterNode() |
474 |
|
475 |
ssconf_path = utils.PathJoin(pathutils.DATA_DIR, |
476 |
"ssconf_%s" % constants.SS_INSTANCE_LIST)
|
477 |
|
478 |
cmd = ["cat", qa_utils.MakeNodePath(master, ssconf_path)]
|
479 |
|
480 |
return qa_utils.GetCommandOutput(master.primary,
|
481 |
utils.ShellQuoteArgs(cmd)).splitlines() |
482 |
|
483 |
|
484 |
def _CheckSsconfInstanceList(instance): |
485 |
"""Checks if a certain instance is in the ssconf instance list.
|
486 |
|
487 |
@type instance: string
|
488 |
@param instance: Instance name
|
489 |
|
490 |
"""
|
491 |
AssertIn(qa_utils.ResolveInstanceName(instance), |
492 |
_ReadSsconfInstanceList()) |
493 |
|
494 |
|
495 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
496 |
def TestInstanceRenameAndBack(rename_source, rename_target): |
497 |
"""gnt-instance rename
|
498 |
|
499 |
This must leave the instance with the original name, not the target
|
500 |
name.
|
501 |
|
502 |
"""
|
503 |
_CheckSsconfInstanceList(rename_source) |
504 |
|
505 |
# first do a rename to a different actual name, expecting it to fail
|
506 |
qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
|
507 |
try:
|
508 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target], |
509 |
fail=True)
|
510 |
_CheckSsconfInstanceList(rename_source) |
511 |
finally:
|
512 |
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
|
513 |
|
514 |
info = _GetInstanceInfo(rename_source) |
515 |
|
516 |
# Check instance volume tags correctly updated. Note that this check is lvm
|
517 |
# specific, so we skip it for non-lvm-based instances.
|
518 |
# FIXME: This will need updating when instances will be able to have
|
519 |
# different disks living on storage pools with etherogeneous storage types.
|
520 |
# FIXME: This check should be put inside the disk/storage class themselves,
|
521 |
# rather than explicitly called here.
|
522 |
if info["storage-type"] == constants.ST_LVM_VG: |
523 |
# In the lvm world we can check for tags on the logical volume
|
524 |
tags_cmd = ("lvs -o tags --noheadings %s | grep " %
|
525 |
(" ".join(info["volumes"]), )) |
526 |
else:
|
527 |
# Other storage types don't have tags, so we use an always failing command,
|
528 |
# to make sure it never gets executed
|
529 |
tags_cmd = "false"
|
530 |
|
531 |
# and now rename instance to rename_target...
|
532 |
AssertCommand(["gnt-instance", "rename", rename_source, rename_target]) |
533 |
_CheckSsconfInstanceList(rename_target) |
534 |
qa_utils.RunInstanceCheck(rename_source, False)
|
535 |
qa_utils.RunInstanceCheck(rename_target, False)
|
536 |
|
537 |
# NOTE: tags might not be the exactly as the instance name, due to
|
538 |
# charset restrictions; hence the test might be flaky
|
539 |
if (rename_source != rename_target and |
540 |
info["storage-type"] == constants.ST_LVM_VG):
|
541 |
for node in info["nodes"]: |
542 |
AssertCommand(tags_cmd + rename_source, node=node, fail=True)
|
543 |
AssertCommand(tags_cmd + rename_target, node=node, fail=False)
|
544 |
|
545 |
# and back
|
546 |
AssertCommand(["gnt-instance", "rename", rename_target, rename_source]) |
547 |
_CheckSsconfInstanceList(rename_source) |
548 |
qa_utils.RunInstanceCheck(rename_target, False)
|
549 |
|
550 |
if (rename_source != rename_target and |
551 |
info["storage-type"] == constants.ST_LVM_VG):
|
552 |
for node in info["nodes"]: |
553 |
AssertCommand(tags_cmd + rename_source, node=node, fail=False)
|
554 |
AssertCommand(tags_cmd + rename_target, node=node, fail=True)
|
555 |
|
556 |
|
557 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
558 |
def TestInstanceFailover(instance): |
559 |
"""gnt-instance failover"""
|
560 |
if not IsFailoverSupported(instance): |
561 |
print qa_utils.FormatInfo("Instance doesn't support failover, skipping" |
562 |
" test")
|
563 |
return
|
564 |
|
565 |
cmd = ["gnt-instance", "failover", "--force", instance.name] |
566 |
|
567 |
# failover ...
|
568 |
AssertCommand(cmd) |
569 |
qa_utils.RunInstanceCheck(instance, True)
|
570 |
|
571 |
# ... and back
|
572 |
AssertCommand(cmd) |
573 |
|
574 |
|
575 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
576 |
def TestInstanceMigrate(instance, toggle_always_failover=True): |
577 |
"""gnt-instance migrate"""
|
578 |
if not IsMigrationSupported(instance): |
579 |
print qa_utils.FormatInfo("Instance doesn't support migration, skipping" |
580 |
" test")
|
581 |
return
|
582 |
|
583 |
cmd = ["gnt-instance", "migrate", "--force", instance.name] |
584 |
af_par = constants.BE_ALWAYS_FAILOVER |
585 |
af_field = "be/" + constants.BE_ALWAYS_FAILOVER
|
586 |
af_init_val = _GetBoolInstanceField(instance.name, af_field) |
587 |
|
588 |
# migrate ...
|
589 |
AssertCommand(cmd) |
590 |
# TODO: Verify the choice between failover and migration
|
591 |
qa_utils.RunInstanceCheck(instance, True)
|
592 |
|
593 |
# ... and back (possibly with always_failover toggled)
|
594 |
if toggle_always_failover:
|
595 |
AssertCommand(["gnt-instance", "modify", "-B", |
596 |
("%s=%s" % (af_par, not af_init_val)), |
597 |
instance.name]) |
598 |
AssertCommand(cmd) |
599 |
# TODO: Verify the choice between failover and migration
|
600 |
qa_utils.RunInstanceCheck(instance, True)
|
601 |
if toggle_always_failover:
|
602 |
AssertCommand(["gnt-instance", "modify", "-B", |
603 |
("%s=%s" % (af_par, af_init_val)), instance.name])
|
604 |
|
605 |
# TODO: Split into multiple tests
|
606 |
AssertCommand(["gnt-instance", "shutdown", instance.name]) |
607 |
qa_utils.RunInstanceCheck(instance, False)
|
608 |
AssertCommand(cmd, fail=True)
|
609 |
AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover", |
610 |
instance.name]) |
611 |
AssertCommand(["gnt-instance", "start", instance.name]) |
612 |
AssertCommand(cmd) |
613 |
# @InstanceCheck enforces the check that the instance is running
|
614 |
qa_utils.RunInstanceCheck(instance, True)
|
615 |
|
616 |
AssertCommand(["gnt-instance", "modify", "-B", |
617 |
("%s=%s" %
|
618 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)), |
619 |
instance.name]) |
620 |
|
621 |
AssertCommand(cmd) |
622 |
qa_utils.RunInstanceCheck(instance, True)
|
623 |
# TODO: Verify that a failover has been done instead of a migration
|
624 |
|
625 |
# TODO: Verify whether the default value is restored here (not hardcoded)
|
626 |
AssertCommand(["gnt-instance", "modify", "-B", |
627 |
("%s=%s" %
|
628 |
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)), |
629 |
instance.name]) |
630 |
|
631 |
AssertCommand(cmd) |
632 |
qa_utils.RunInstanceCheck(instance, True)
|
633 |
|
634 |
|
635 |
def TestInstanceInfo(instance): |
636 |
"""gnt-instance info"""
|
637 |
AssertCommand(["gnt-instance", "info", instance.name]) |
638 |
|
639 |
|
640 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
641 |
def TestInstanceModify(instance): |
642 |
"""gnt-instance modify"""
|
643 |
default_hv = qa_config.GetDefaultHypervisor() |
644 |
|
645 |
# Assume /sbin/init exists on all systems
|
646 |
test_kernel = "/sbin/init"
|
647 |
test_initrd = test_kernel |
648 |
|
649 |
orig_maxmem = qa_config.get(constants.BE_MAXMEM) |
650 |
orig_minmem = qa_config.get(constants.BE_MINMEM) |
651 |
#orig_bridge = qa_config.get("bridge", "xen-br0")
|
652 |
|
653 |
args = [ |
654 |
["-B", "%s=128" % constants.BE_MINMEM], |
655 |
["-B", "%s=128" % constants.BE_MAXMEM], |
656 |
["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem, |
657 |
constants.BE_MAXMEM, orig_maxmem)], |
658 |
["-B", "%s=2" % constants.BE_VCPUS], |
659 |
["-B", "%s=1" % constants.BE_VCPUS], |
660 |
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], |
661 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)], |
662 |
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)], |
663 |
|
664 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], |
665 |
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], |
666 |
|
667 |
# TODO: bridge tests
|
668 |
#["--bridge", "xen-br1"],
|
669 |
#["--bridge", orig_bridge],
|
670 |
] |
671 |
|
672 |
if default_hv == constants.HT_XEN_PVM:
|
673 |
args.extend([ |
674 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], |
675 |
["-H", "no_%s" % (constants.HV_INITRD_PATH, )], |
676 |
["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)], |
677 |
]) |
678 |
elif default_hv == constants.HT_XEN_HVM:
|
679 |
args.extend([ |
680 |
["-H", "%s=acn" % constants.HV_BOOT_ORDER], |
681 |
["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)], |
682 |
]) |
683 |
|
684 |
for alist in args: |
685 |
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name]) |
686 |
|
687 |
# check no-modify
|
688 |
AssertCommand(["gnt-instance", "modify", instance.name], fail=True) |
689 |
|
690 |
# Marking offline while instance is running must fail...
|
691 |
AssertCommand(["gnt-instance", "modify", "--offline", instance.name], |
692 |
fail=True)
|
693 |
|
694 |
# ...while making it online is ok, and should work
|
695 |
AssertCommand(["gnt-instance", "modify", "--online", instance.name]) |
696 |
|
697 |
|
698 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
699 |
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode): |
700 |
"""gnt-instance modify --new-primary
|
701 |
|
702 |
This will leave the instance on its original primary node, not other node.
|
703 |
|
704 |
"""
|
705 |
if instance.disk_template != constants.DT_FILE:
|
706 |
print qa_utils.FormatInfo("Test only supported for the file disk template") |
707 |
return
|
708 |
|
709 |
cluster_name = qa_config.get("name")
|
710 |
|
711 |
name = instance.name |
712 |
current = currentnode.primary |
713 |
other = othernode.primary |
714 |
|
715 |
# FIXME: the qa doesn't have a customizable file storage dir parameter. As
|
716 |
# such for now we use the default.
|
717 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
718 |
disk = os.path.join(filestorage, name) |
719 |
|
720 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name], |
721 |
fail=True)
|
722 |
AssertCommand(["gnt-instance", "shutdown", name]) |
723 |
AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" % |
724 |
pathutils.SSH_KNOWN_HOSTS_FILE, |
725 |
"-oCheckHostIp=no", "-oStrictHostKeyChecking=yes", |
726 |
"-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name, |
727 |
"-r", disk, "%s:%s" % (other, filestorage)], node=current) |
728 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name]) |
729 |
AssertCommand(["gnt-instance", "startup", name]) |
730 |
|
731 |
# and back
|
732 |
AssertCommand(["gnt-instance", "shutdown", name]) |
733 |
AssertCommand(["rm", "-rf", disk], node=other) |
734 |
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name]) |
735 |
AssertCommand(["gnt-instance", "startup", name]) |
736 |
|
737 |
|
738 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
739 |
def TestInstanceStoppedModify(instance): |
740 |
"""gnt-instance modify (stopped instance)"""
|
741 |
name = instance.name |
742 |
|
743 |
# Instance was not marked offline; try marking it online once more
|
744 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
745 |
|
746 |
# Mark instance as offline
|
747 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
748 |
|
749 |
# When the instance is offline shutdown should only work with --force,
|
750 |
# while start should never work
|
751 |
AssertCommand(["gnt-instance", "shutdown", name], fail=True) |
752 |
AssertCommand(["gnt-instance", "shutdown", "--force", name]) |
753 |
AssertCommand(["gnt-instance", "start", name], fail=True) |
754 |
AssertCommand(["gnt-instance", "start", "--force", name], fail=True) |
755 |
|
756 |
# Also do offline to offline
|
757 |
AssertCommand(["gnt-instance", "modify", "--offline", name]) |
758 |
|
759 |
# And online again
|
760 |
AssertCommand(["gnt-instance", "modify", "--online", name]) |
761 |
|
762 |
|
763 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
764 |
def TestInstanceConvertDiskToPlain(instance, inodes): |
765 |
"""gnt-instance modify -t"""
|
766 |
name = instance.name |
767 |
|
768 |
template = instance.disk_template |
769 |
if template != constants.DT_DRBD8:
|
770 |
print qa_utils.FormatInfo("Unsupported template %s, skipping conversion" |
771 |
" test" % template)
|
772 |
return
|
773 |
|
774 |
assert len(inodes) == 2 |
775 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name]) |
776 |
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8, |
777 |
"-n", inodes[1].primary, name]) |
778 |
|
779 |
|
780 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
781 |
def TestInstanceModifyDisks(instance): |
782 |
"""gnt-instance modify --disk"""
|
783 |
if not IsDiskSupported(instance): |
784 |
print qa_utils.FormatInfo("Instance doesn't support disks, skipping test") |
785 |
return
|
786 |
|
787 |
size = qa_config.GetDiskOptions()[-1].get("size") |
788 |
name = instance.name |
789 |
build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name] |
790 |
AssertCommand(build_cmd("add:size=%s" % size))
|
791 |
AssertCommand(build_cmd("remove"))
|
792 |
|
793 |
|
794 |
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
|
795 |
def TestInstanceGrowDisk(instance): |
796 |
"""gnt-instance grow-disk"""
|
797 |
if qa_config.GetExclusiveStorage():
|
798 |
print qa_utils.FormatInfo("Test not supported with exclusive_storage") |
799 |
return
|
800 |
|
801 |
if instance.disk_template == constants.DT_DISKLESS:
|
802 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
803 |
return
|
804 |
|
805 |
name = instance.name |
806 |
disks = qa_config.GetDiskOptions() |
807 |
all_size = [d.get("size") for d in disks] |
808 |
all_grow = [d.get("growth") for d in disks] |
809 |
|
810 |
if not all_grow: |
811 |
# missing disk sizes but instance grow disk has been enabled,
|
812 |
# let's set fixed/nomimal growth
|
813 |
all_grow = ["128M" for _ in all_size] |
814 |
|
815 |
for idx, (size, grow) in enumerate(zip(all_size, all_grow)): |
816 |
# succeed in grow by amount
|
817 |
AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow]) |
818 |
# fail in grow to the old size
|
819 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
820 |
size], fail=True)
|
821 |
# succeed to grow to old size + 2 * growth
|
822 |
int_size = utils.ParseUnit(size) |
823 |
int_grow = utils.ParseUnit(grow) |
824 |
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx), |
825 |
str(int_size + 2 * int_grow)]) |
826 |
|
827 |
|
828 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
829 |
def TestInstanceDeviceNames(instance): |
830 |
if instance.disk_template == constants.DT_DISKLESS:
|
831 |
print qa_utils.FormatInfo("Test not supported for diskless instances") |
832 |
return
|
833 |
|
834 |
name = instance.name |
835 |
for dev_type in ["disk", "net"]: |
836 |
if dev_type == "disk": |
837 |
options = ",size=512M"
|
838 |
else:
|
839 |
options = ""
|
840 |
# succeed in adding a device named 'test_device'
|
841 |
AssertCommand(["gnt-instance", "modify", |
842 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
843 |
name]) |
844 |
# succeed in removing the 'test_device'
|
845 |
AssertCommand(["gnt-instance", "modify", |
846 |
"--%s=test_device:remove" % dev_type,
|
847 |
name]) |
848 |
# fail to add two devices with the same name
|
849 |
AssertCommand(["gnt-instance", "modify", |
850 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
851 |
"--%s=-1:add,name=test_device%s" % (dev_type, options),
|
852 |
name], fail=True)
|
853 |
# fail to add a device with invalid name
|
854 |
AssertCommand(["gnt-instance", "modify", |
855 |
"--%s=-1:add,name=2%s" % (dev_type, options),
|
856 |
name], fail=True)
|
857 |
# Rename disks
|
858 |
disks = qa_config.GetDiskOptions() |
859 |
disk_names = [d.get("name") for d in disks] |
860 |
for idx, disk_name in enumerate(disk_names): |
861 |
# Refer to disk by idx
|
862 |
AssertCommand(["gnt-instance", "modify", |
863 |
"--disk=%s:modify,name=renamed" % idx,
|
864 |
name]) |
865 |
# Refer to by name and rename to original name
|
866 |
AssertCommand(["gnt-instance", "modify", |
867 |
"--disk=renamed:modify,name=%s" % disk_name,
|
868 |
name]) |
869 |
if len(disks) >= 2: |
870 |
# fail in renaming to disks to the same name
|
871 |
AssertCommand(["gnt-instance", "modify", |
872 |
"--disk=0:modify,name=same_name",
|
873 |
"--disk=1:modify,name=same_name",
|
874 |
name], fail=True)
|
875 |
|
876 |
|
877 |
def TestInstanceList(): |
878 |
"""gnt-instance list"""
|
879 |
qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
880 |
|
881 |
|
882 |
def TestInstanceListFields(): |
883 |
"""gnt-instance list-fields"""
|
884 |
qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
|
885 |
|
886 |
|
887 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
888 |
def TestInstanceConsole(instance): |
889 |
"""gnt-instance console"""
|
890 |
AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name]) |
891 |
|
892 |
|
893 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
894 |
def TestReplaceDisks(instance, curr_nodes, other_nodes): |
895 |
"""gnt-instance replace-disks"""
|
896 |
def buildcmd(args): |
897 |
cmd = ["gnt-instance", "replace-disks"] |
898 |
cmd.extend(args) |
899 |
cmd.append(instance.name) |
900 |
return cmd
|
901 |
|
902 |
if not IsDiskReplacingSupported(instance): |
903 |
print qa_utils.FormatInfo("Instance doesn't support disk replacing," |
904 |
" skipping test")
|
905 |
return
|
906 |
|
907 |
# Currently all supported templates have one primary and one secondary node
|
908 |
assert len(curr_nodes) == 2 |
909 |
snode = curr_nodes[1]
|
910 |
assert len(other_nodes) == 1 |
911 |
othernode = other_nodes[0]
|
912 |
|
913 |
options = qa_config.get("options", {})
|
914 |
use_ialloc = options.get("use-iallocators", True) |
915 |
for data in [ |
916 |
["-p"],
|
917 |
["-s"],
|
918 |
# A placeholder; the actual command choice depends on use_ialloc
|
919 |
None,
|
920 |
# Restore the original secondary
|
921 |
["--new-secondary=%s" % snode.primary],
|
922 |
]: |
923 |
if data is None: |
924 |
if use_ialloc:
|
925 |
data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
|
926 |
else:
|
927 |
data = ["--new-secondary=%s" % othernode.primary]
|
928 |
AssertCommand(buildcmd(data)) |
929 |
|
930 |
AssertCommand(buildcmd(["-a"]))
|
931 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
932 |
AssertCommand(buildcmd(["-a"]), fail=True) |
933 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
934 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
935 |
instance.name]) |
936 |
AssertCommand(buildcmd(["-a"]))
|
937 |
AssertCommand(["gnt-instance", "start", instance.name]) |
938 |
|
939 |
|
940 |
def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True, |
941 |
destroy=True):
|
942 |
"""Execute gnt-instance recreate-disks and check the result
|
943 |
|
944 |
@param cmdargs: Arguments (instance name excluded)
|
945 |
@param instance: Instance to operate on
|
946 |
@param fail: True if the command is expected to fail
|
947 |
@param check: If True and fail is False, check that the disks work
|
948 |
@prama destroy: If True, destroy the old disks first
|
949 |
|
950 |
"""
|
951 |
if destroy:
|
952 |
_DestroyInstanceDisks(instance) |
953 |
AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs + |
954 |
[instance.name]), fail) |
955 |
if not fail and check: |
956 |
# Quick check that the disks are there
|
957 |
AssertCommand(["gnt-instance", "activate-disks", instance.name]) |
958 |
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync", |
959 |
instance.name]) |
960 |
AssertCommand(["gnt-instance", "deactivate-disks", instance.name]) |
961 |
|
962 |
|
963 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
964 |
def TestRecreateDisks(instance, inodes, othernodes): |
965 |
"""gnt-instance recreate-disks
|
966 |
|
967 |
@param instance: Instance to work on
|
968 |
@param inodes: List of the current nodes of the instance
|
969 |
@param othernodes: list/tuple of nodes where to temporarily recreate disks
|
970 |
|
971 |
"""
|
972 |
options = qa_config.get("options", {})
|
973 |
use_ialloc = options.get("use-iallocators", True) |
974 |
other_seq = ":".join([n.primary for n in othernodes]) |
975 |
orig_seq = ":".join([n.primary for n in inodes]) |
976 |
# These fail because the instance is running
|
977 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
978 |
if use_ialloc:
|
979 |
_AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False) |
980 |
else:
|
981 |
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False) |
982 |
AssertCommand(["gnt-instance", "stop", instance.name]) |
983 |
# Disks exist: this should fail
|
984 |
_AssertRecreateDisks([], instance, fail=True, destroy=False) |
985 |
# Recreate disks in place
|
986 |
_AssertRecreateDisks([], instance) |
987 |
# Move disks away
|
988 |
if use_ialloc:
|
989 |
_AssertRecreateDisks(["-I", "hail"], instance) |
990 |
# Move disks somewhere else
|
991 |
_AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
|
992 |
instance) |
993 |
else:
|
994 |
_AssertRecreateDisks(["-n", other_seq], instance)
|
995 |
# Move disks back
|
996 |
_AssertRecreateDisks(["-n", orig_seq], instance)
|
997 |
# Recreate the disks one by one
|
998 |
for idx in range(0, len(qa_config.GetDiskOptions())): |
999 |
# Only the first call should destroy all the disk
|
1000 |
destroy = (idx == 0)
|
1001 |
_AssertRecreateDisks(["--disk=%s" % idx], instance, destroy=destroy,
|
1002 |
check=False)
|
1003 |
# This and InstanceCheck decoration check that the disks are working
|
1004 |
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) |
1005 |
AssertCommand(["gnt-instance", "start", instance.name]) |
1006 |
|
1007 |
|
1008 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1009 |
def TestInstanceExport(instance, node): |
1010 |
"""gnt-backup export -n ..."""
|
1011 |
name = instance.name |
1012 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, name]) |
1013 |
return qa_utils.ResolveInstanceName(name)
|
1014 |
|
1015 |
|
1016 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
1017 |
def TestInstanceExportWithRemove(instance, node): |
1018 |
"""gnt-backup export --remove-instance"""
|
1019 |
AssertCommand(["gnt-backup", "export", "-n", node.primary, |
1020 |
"--remove-instance", instance.name])
|
1021 |
|
1022 |
|
1023 |
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
|
1024 |
def TestInstanceExportNoTarget(instance): |
1025 |
"""gnt-backup export (without target node, should fail)"""
|
1026 |
AssertCommand(["gnt-backup", "export", instance.name], fail=True) |
1027 |
|
1028 |
|
1029 |
@InstanceCheck(None, INST_DOWN, FIRST_ARG) |
1030 |
def TestInstanceImport(newinst, node, expnode, name): |
1031 |
"""gnt-backup import"""
|
1032 |
templ = constants.DT_PLAIN |
1033 |
cmd = (["gnt-backup", "import", |
1034 |
"--disk-template=%s" % templ,
|
1035 |
"--no-ip-check",
|
1036 |
"--src-node=%s" % expnode.primary,
|
1037 |
"--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
|
1038 |
"--node=%s" % node.primary] +
|
1039 |
_GetGenericAddParameters(newinst, templ, |
1040 |
force_mac=constants.VALUE_GENERATE)) |
1041 |
cmd.append(newinst.name) |
1042 |
AssertCommand(cmd) |
1043 |
newinst.SetDiskTemplate(templ) |
1044 |
|
1045 |
|
1046 |
def TestBackupList(expnode): |
1047 |
"""gnt-backup list"""
|
1048 |
AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary]) |
1049 |
|
1050 |
qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
|
1051 |
namefield=None, test_unknown=False) |
1052 |
|
1053 |
|
1054 |
def TestBackupListFields(): |
1055 |
"""gnt-backup list-fields"""
|
1056 |
qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
|
1057 |
|
1058 |
|
1059 |
def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online): |
1060 |
"""gnt-instance remove with an off-line node
|
1061 |
|
1062 |
@param instance: instance
|
1063 |
@param snode: secondary node, to be set offline
|
1064 |
@param set_offline: function to call to set the node off-line
|
1065 |
@param set_online: function to call to set the node on-line
|
1066 |
|
1067 |
"""
|
1068 |
info = _GetInstanceInfo(instance.name) |
1069 |
set_offline(snode) |
1070 |
try:
|
1071 |
TestInstanceRemove(instance) |
1072 |
finally:
|
1073 |
set_online(snode) |
1074 |
|
1075 |
# Clean up the disks on the offline node, if necessary
|
1076 |
if instance.disk_template not in constants.DTS_EXT_MIRROR: |
1077 |
# FIXME: abstract the cleanup inside the disks
|
1078 |
if info["storage-type"] == constants.ST_LVM_VG: |
1079 |
for minor in info["drbd-minors"][snode.primary]: |
1080 |
AssertCommand(["drbdsetup", str(minor), "down"], node=snode) |
1081 |
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode) |
1082 |
elif info["storage-type"] == constants.ST_FILE: |
1083 |
filestorage = pathutils.DEFAULT_FILE_STORAGE_DIR |
1084 |
disk = os.path.join(filestorage, instance.name) |
1085 |
AssertCommand(["rm", "-rf", disk], node=snode) |
1086 |
|
1087 |
|
1088 |
def TestInstanceCreationRestrictedByDiskTemplates(): |
1089 |
"""Test adding instances for disabled disk templates."""
|
1090 |
enabled_disk_templates = qa_config.GetEnabledDiskTemplates() |
1091 |
nodes = qa_config.AcquireManyNodes(2)
|
1092 |
|
1093 |
# Setup the cluster with the enabled_disk_templates
|
1094 |
AssertCommand( |
1095 |
["gnt-cluster", "modify", |
1096 |
"--enabled-disk-template=%s" %
|
1097 |
",".join(enabled_disk_templates)],
|
1098 |
fail=False)
|
1099 |
|
1100 |
# Test instance creation for enabled disk templates
|
1101 |
for disk_template in enabled_disk_templates: |
1102 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
|
1103 |
TestInstanceRemove(instance) |
1104 |
instance.Release() |
1105 |
|
1106 |
# Test that instance creation fails for disabled disk templates
|
1107 |
disabled_disk_templates = list(constants.DISK_TEMPLATES
|
1108 |
- set(enabled_disk_templates))
|
1109 |
for disk_template in disabled_disk_templates: |
1110 |
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
|
1111 |
|
1112 |
# Test instance creation for after disabling enabled disk templates
|
1113 |
if (len(enabled_disk_templates) > 1): |
1114 |
# Partition the disk templates, enable them separately and check if the
|
1115 |
# disabled ones cannot be used by instances.
|
1116 |
middle = len(enabled_disk_templates) / 2 |
1117 |
templates1 = enabled_disk_templates[:middle] |
1118 |
templates2 = enabled_disk_templates[middle:] |
1119 |
|
1120 |
for (enabled, disabled) in [(templates1, templates2), |
1121 |
(templates2, templates1)]: |
1122 |
AssertCommand(["gnt-cluster", "modify", |
1123 |
"--enabled-disk-template=%s" %
|
1124 |
",".join(enabled)],
|
1125 |
fail=False)
|
1126 |
for disk_template in disabled: |
1127 |
CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
|
1128 |
elif (len(enabled_disk_templates) == 1): |
1129 |
# If only one disk template is enabled in the QA config, we have to enable
|
1130 |
# some of the disabled disk templates in order to test if the disabling the
|
1131 |
# only enabled disk template prohibits creating instances of that template.
|
1132 |
AssertCommand(["gnt-cluster", "modify", |
1133 |
"--enabled-disk-template=%s" %
|
1134 |
",".join(disabled_disk_templates)],
|
1135 |
fail=False)
|
1136 |
CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True) |
1137 |
else:
|
1138 |
raise qa_error.Error("Please enable at least one disk template" |
1139 |
" in your QA setup.")
|
1140 |
|
1141 |
# Restore initially enabled disk templates
|
1142 |
AssertCommand(["gnt-cluster", "modify", |
1143 |
"--enabled-disk-template=%s" %
|
1144 |
",".join(enabled_disk_templates)],
|
1145 |
fail=False)
|