root / test / py / cmdlib / instance_unittest.py @ 1c4910f7
History | View | Annotate | Download (85.8 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUInstance*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import copy |
27 |
import itertools |
28 |
import re |
29 |
import unittest |
30 |
import mock |
31 |
import operator |
32 |
|
33 |
from ganeti import compat |
34 |
from ganeti import constants |
35 |
from ganeti import errors |
36 |
from ganeti import ht |
37 |
from ganeti import opcodes |
38 |
from ganeti import objects |
39 |
from ganeti.rpc import node as rpc |
40 |
from ganeti import utils |
41 |
from ganeti.cmdlib import instance |
42 |
from ganeti.cmdlib import instance_utils |
43 |
|
44 |
from cmdlib.cmdlib_unittest import _StubComputeIPolicySpecViolation, _FakeLU |
45 |
|
46 |
from testsupport import * |
47 |
|
48 |
import testutils |
49 |
|
50 |
|
51 |
class TestComputeIPolicyInstanceSpecViolation(unittest.TestCase): |
52 |
def test(self): |
53 |
ispec = { |
54 |
constants.ISPEC_MEM_SIZE: 2048,
|
55 |
constants.ISPEC_CPU_COUNT: 2,
|
56 |
constants.ISPEC_DISK_COUNT: 1,
|
57 |
constants.ISPEC_DISK_SIZE: [512],
|
58 |
constants.ISPEC_NIC_COUNT: 0,
|
59 |
constants.ISPEC_SPINDLE_USE: 1,
|
60 |
} |
61 |
stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 1, |
62 |
constants.DT_PLAIN) |
63 |
ret = instance._ComputeIPolicyInstanceSpecViolation(NotImplemented, ispec,
|
64 |
constants.DT_PLAIN, |
65 |
_compute_fn=stub) |
66 |
self.assertEqual(ret, [])
|
67 |
|
68 |
|
69 |
class TestLUInstanceCreate(CmdlibTestCase): |
70 |
def setUp(self): |
71 |
super(TestLUInstanceCreate, self).setUp() |
72 |
|
73 |
self.net = self.cfg.AddNewNetwork() |
74 |
self.cfg.ConnectNetworkToGroup(self.net, self.group) |
75 |
|
76 |
self.node1 = self.cfg.AddNewNode() |
77 |
self.node2 = self.cfg.AddNewNode() |
78 |
|
79 |
self.rpc.call_os_get.side_effect = \
|
80 |
lambda node, _: self.RpcResultsBuilder() \ |
81 |
.CreateSuccessfulNodeResult(node, self.os)
|
82 |
|
83 |
hv_info = ("bootid",
|
84 |
[{ |
85 |
"type": constants.ST_LVM_VG,
|
86 |
"storage_free": 10000 |
87 |
}], |
88 |
({"memory_free": 10000}, )) |
89 |
self.rpc.call_node_info.return_value = \
|
90 |
self.RpcResultsBuilder() \
|
91 |
.AddSuccessfulNode(self.master, hv_info) \
|
92 |
.AddSuccessfulNode(self.node1, hv_info) \
|
93 |
.AddSuccessfulNode(self.node2, hv_info) \
|
94 |
.Build() |
95 |
|
96 |
self.rpc.call_blockdev_getmirrorstatus.side_effect = \
|
97 |
lambda node, _: self.RpcResultsBuilder() \ |
98 |
.CreateSuccessfulNodeResult(node, []) |
99 |
|
100 |
self.iallocator_cls.return_value.result = [self.node1.name, self.node2.name] |
101 |
|
102 |
self.diskless_op = opcodes.OpInstanceCreate(
|
103 |
instance_name="diskless.test.com",
|
104 |
pnode=self.master.name,
|
105 |
disk_template=constants.DT_DISKLESS, |
106 |
mode=constants.INSTANCE_CREATE, |
107 |
nics=[{}], |
108 |
disks=[], |
109 |
os_type=self.os_name_variant)
|
110 |
|
111 |
self.plain_op = opcodes.OpInstanceCreate(
|
112 |
instance_name="plain.test.com",
|
113 |
pnode=self.master.name,
|
114 |
disk_template=constants.DT_PLAIN, |
115 |
mode=constants.INSTANCE_CREATE, |
116 |
nics=[{}], |
117 |
disks=[{ |
118 |
constants.IDISK_SIZE: 1024
|
119 |
}], |
120 |
os_type=self.os_name_variant)
|
121 |
|
122 |
self.block_op = opcodes.OpInstanceCreate(
|
123 |
instance_name="block.test.com",
|
124 |
pnode=self.master.name,
|
125 |
disk_template=constants.DT_BLOCK, |
126 |
mode=constants.INSTANCE_CREATE, |
127 |
nics=[{}], |
128 |
disks=[{ |
129 |
constants.IDISK_SIZE: 1024,
|
130 |
constants.IDISK_ADOPT: "/dev/disk/block0"
|
131 |
}], |
132 |
os_type=self.os_name_variant)
|
133 |
|
134 |
self.drbd_op = opcodes.OpInstanceCreate(
|
135 |
instance_name="drbd.test.com",
|
136 |
pnode=self.node1.name,
|
137 |
snode=self.node2.name,
|
138 |
disk_template=constants.DT_DRBD8, |
139 |
mode=constants.INSTANCE_CREATE, |
140 |
nics=[{}], |
141 |
disks=[{ |
142 |
constants.IDISK_SIZE: 1024
|
143 |
}], |
144 |
os_type=self.os_name_variant)
|
145 |
|
146 |
self.file_op = opcodes.OpInstanceCreate(
|
147 |
instance_name="file.test.com",
|
148 |
pnode=self.node1.name,
|
149 |
disk_template=constants.DT_FILE, |
150 |
mode=constants.INSTANCE_CREATE, |
151 |
nics=[{}], |
152 |
disks=[{ |
153 |
constants.IDISK_SIZE: 1024
|
154 |
}], |
155 |
os_type=self.os_name_variant)
|
156 |
|
157 |
def testSimpleCreate(self): |
158 |
op = self.CopyOpCode(self.diskless_op) |
159 |
self.ExecOpCode(op)
|
160 |
|
161 |
def testStrangeHostnameResolve(self): |
162 |
op = self.CopyOpCode(self.diskless_op) |
163 |
self.netutils_mod.GetHostname.return_value = \
|
164 |
HostnameMock("random.host.example.com", "203.0.113.1") |
165 |
self.ExecOpCodeExpectOpPrereqError(
|
166 |
op, "Resolved hostname .* does not look the same as given hostname")
|
167 |
|
168 |
def testOpportunisticLockingNoIAllocator(self): |
169 |
op = self.CopyOpCode(self.diskless_op, |
170 |
opportunistic_locking=True,
|
171 |
iallocator=None)
|
172 |
self.ExecOpCodeExpectOpPrereqError(
|
173 |
op, "Opportunistic locking is only available in combination with an"
|
174 |
" instance allocator")
|
175 |
|
176 |
def testNicWithNetAndMode(self): |
177 |
op = self.CopyOpCode(self.diskless_op, |
178 |
nics=[{ |
179 |
constants.INIC_NETWORK: self.net.name,
|
180 |
constants.INIC_MODE: constants.NIC_MODE_BRIDGED |
181 |
}]) |
182 |
self.ExecOpCodeExpectOpPrereqError(
|
183 |
op, "If network is given, no mode or link is allowed to be passed")
|
184 |
|
185 |
def testAutoIpNoNameCheck(self): |
186 |
op = self.CopyOpCode(self.diskless_op, |
187 |
nics=[{ |
188 |
constants.INIC_IP: constants.VALUE_AUTO |
189 |
}], |
190 |
ip_check=False,
|
191 |
name_check=False)
|
192 |
self.ExecOpCodeExpectOpPrereqError(
|
193 |
op, "IP address set to auto but name checks have been skipped")
|
194 |
|
195 |
def testAutoIp(self): |
196 |
op = self.CopyOpCode(self.diskless_op, |
197 |
nics=[{ |
198 |
constants.INIC_IP: constants.VALUE_AUTO |
199 |
}]) |
200 |
self.ExecOpCode(op)
|
201 |
|
202 |
def testPoolIpNoNetwork(self): |
203 |
op = self.CopyOpCode(self.diskless_op, |
204 |
nics=[{ |
205 |
constants.INIC_IP: constants.NIC_IP_POOL |
206 |
}]) |
207 |
self.ExecOpCodeExpectOpPrereqError(
|
208 |
op, "if ip=pool, parameter network must be passed too")
|
209 |
|
210 |
def testValidIp(self): |
211 |
op = self.CopyOpCode(self.diskless_op, |
212 |
nics=[{ |
213 |
constants.INIC_IP: "203.0.113.1"
|
214 |
}]) |
215 |
self.ExecOpCode(op)
|
216 |
|
217 |
def testRoutedNoIp(self): |
218 |
op = self.CopyOpCode(self.diskless_op, |
219 |
nics=[{ |
220 |
constants.INIC_MODE: constants.NIC_MODE_ROUTED |
221 |
}]) |
222 |
self.ExecOpCodeExpectOpPrereqError(
|
223 |
op, "Routed nic mode requires an ip address")
|
224 |
|
225 |
def testValicMac(self): |
226 |
op = self.CopyOpCode(self.diskless_op, |
227 |
nics=[{ |
228 |
constants.INIC_MAC: "f0:df:f4:a3:d1:cf"
|
229 |
}]) |
230 |
self.ExecOpCode(op)
|
231 |
|
232 |
def testValidNicParams(self): |
233 |
op = self.CopyOpCode(self.diskless_op, |
234 |
nics=[{ |
235 |
constants.INIC_MODE: constants.NIC_MODE_BRIDGED, |
236 |
constants.INIC_LINK: "br_mock"
|
237 |
}]) |
238 |
self.ExecOpCode(op)
|
239 |
|
240 |
def testValidNicParamsOpenVSwitch(self): |
241 |
op = self.CopyOpCode(self.diskless_op, |
242 |
nics=[{ |
243 |
constants.INIC_MODE: constants.NIC_MODE_OVS, |
244 |
constants.INIC_VLAN: "1"
|
245 |
}]) |
246 |
self.ExecOpCode(op)
|
247 |
|
248 |
def testNicNoneName(self): |
249 |
op = self.CopyOpCode(self.diskless_op, |
250 |
nics=[{ |
251 |
constants.INIC_NAME: constants.VALUE_NONE |
252 |
}]) |
253 |
self.ExecOpCode(op)
|
254 |
|
255 |
def testConflictingIP(self): |
256 |
op = self.CopyOpCode(self.diskless_op, |
257 |
nics=[{ |
258 |
constants.INIC_IP: self.net.gateway[:-1] + "2" |
259 |
}]) |
260 |
self.ExecOpCodeExpectOpPrereqError(
|
261 |
op, "The requested IP address .* belongs to network .*, but the target"
|
262 |
" NIC does not.")
|
263 |
|
264 |
def testVLanFormat(self): |
265 |
for vlan in [".pinky", ":bunny", ":1:pinky", "bunny"]: |
266 |
self.ResetMocks()
|
267 |
op = self.CopyOpCode(self.diskless_op, |
268 |
nics=[{ |
269 |
constants.INIC_VLAN: vlan |
270 |
}]) |
271 |
self.ExecOpCodeExpectOpPrereqError(
|
272 |
op, "Specified VLAN parameter is invalid")
|
273 |
|
274 |
def testPoolIp(self): |
275 |
op = self.CopyOpCode(self.diskless_op, |
276 |
nics=[{ |
277 |
constants.INIC_IP: constants.NIC_IP_POOL, |
278 |
constants.INIC_NETWORK: self.net.name
|
279 |
}]) |
280 |
self.ExecOpCode(op)
|
281 |
|
282 |
def testPoolIpUnconnectedNetwork(self): |
283 |
net = self.cfg.AddNewNetwork()
|
284 |
op = self.CopyOpCode(self.diskless_op, |
285 |
nics=[{ |
286 |
constants.INIC_IP: constants.NIC_IP_POOL, |
287 |
constants.INIC_NETWORK: net.name |
288 |
}]) |
289 |
self.ExecOpCodeExpectOpPrereqError(
|
290 |
op, "No netparams found for network .*.")
|
291 |
|
292 |
def testIpNotInNetwork(self): |
293 |
op = self.CopyOpCode(self.diskless_op, |
294 |
nics=[{ |
295 |
constants.INIC_IP: "203.0.113.1",
|
296 |
constants.INIC_NETWORK: self.net.name
|
297 |
}]) |
298 |
self.ExecOpCodeExpectOpPrereqError(
|
299 |
op, "IP address .* already in use or does not belong to network .*")
|
300 |
|
301 |
def testMixAdoptAndNotAdopt(self): |
302 |
op = self.CopyOpCode(self.diskless_op, |
303 |
disk_template=constants.DT_PLAIN, |
304 |
disks=[{ |
305 |
constants.IDISK_ADOPT: "lv1"
|
306 |
}, {}]) |
307 |
self.ExecOpCodeExpectOpPrereqError(
|
308 |
op, "Either all disks are adopted or none is")
|
309 |
|
310 |
def testMustAdoptWithoutAdopt(self): |
311 |
op = self.CopyOpCode(self.diskless_op, |
312 |
disk_template=constants.DT_BLOCK, |
313 |
disks=[{}]) |
314 |
self.ExecOpCodeExpectOpPrereqError(
|
315 |
op, "Disk template blockdev requires disk adoption, but no 'adopt'"
|
316 |
" parameter given")
|
317 |
|
318 |
def testDontAdoptWithAdopt(self): |
319 |
op = self.CopyOpCode(self.diskless_op, |
320 |
disk_template=constants.DT_DRBD8, |
321 |
disks=[{ |
322 |
constants.IDISK_ADOPT: "lv1"
|
323 |
}]) |
324 |
self.ExecOpCodeExpectOpPrereqError(
|
325 |
op, "Disk adoption is not supported for the 'drbd' disk template")
|
326 |
|
327 |
def testAdoptWithIAllocator(self): |
328 |
op = self.CopyOpCode(self.diskless_op, |
329 |
disk_template=constants.DT_PLAIN, |
330 |
disks=[{ |
331 |
constants.IDISK_ADOPT: "lv1"
|
332 |
}], |
333 |
iallocator="mock")
|
334 |
self.ExecOpCodeExpectOpPrereqError(
|
335 |
op, "Disk adoption not allowed with an iallocator script")
|
336 |
|
337 |
def testAdoptWithImport(self): |
338 |
op = self.CopyOpCode(self.diskless_op, |
339 |
disk_template=constants.DT_PLAIN, |
340 |
disks=[{ |
341 |
constants.IDISK_ADOPT: "lv1"
|
342 |
}], |
343 |
mode=constants.INSTANCE_IMPORT) |
344 |
self.ExecOpCodeExpectOpPrereqError(
|
345 |
op, "Disk adoption not allowed for instance import")
|
346 |
|
347 |
def testArgumentCombinations(self): |
348 |
op = self.CopyOpCode(self.diskless_op, |
349 |
# start flag will be flipped
|
350 |
no_install=True,
|
351 |
start=True,
|
352 |
# no allowed combination
|
353 |
ip_check=True,
|
354 |
name_check=False)
|
355 |
self.ExecOpCodeExpectOpPrereqError(
|
356 |
op, "Cannot do IP address check without a name check")
|
357 |
|
358 |
def testInvalidFileDriver(self): |
359 |
op = self.CopyOpCode(self.diskless_op, |
360 |
file_driver="invalid_file_driver")
|
361 |
self.ExecOpCodeExpectOpPrereqError(
|
362 |
op, "Parameter 'OP_INSTANCE_CREATE.file_driver' fails validation")
|
363 |
|
364 |
def testMissingSecondaryNode(self): |
365 |
op = self.CopyOpCode(self.diskless_op, |
366 |
pnode=self.master.name,
|
367 |
disk_template=constants.DT_DRBD8) |
368 |
self.ExecOpCodeExpectOpPrereqError(
|
369 |
op, "The networked disk templates need a mirror node")
|
370 |
|
371 |
def testIgnoredSecondaryNode(self): |
372 |
op = self.CopyOpCode(self.diskless_op, |
373 |
pnode=self.master.name,
|
374 |
snode=self.node1.name,
|
375 |
disk_template=constants.DT_PLAIN) |
376 |
try:
|
377 |
self.ExecOpCode(op)
|
378 |
except Exception: |
379 |
pass
|
380 |
self.mcpu.assertLogContainsRegex(
|
381 |
"Secondary node will be ignored on non-mirrored disk template")
|
382 |
|
383 |
def testMissingOsType(self): |
384 |
op = self.CopyOpCode(self.diskless_op, |
385 |
os_type=self.REMOVE)
|
386 |
self.ExecOpCodeExpectOpPrereqError(op, "No guest OS or OS image specified") |
387 |
|
388 |
def testBlacklistedOs(self): |
389 |
self.cluster.blacklisted_os = [self.os_name_variant] |
390 |
op = self.CopyOpCode(self.diskless_op) |
391 |
self.ExecOpCodeExpectOpPrereqError(
|
392 |
op, "Guest OS .* is not allowed for installation")
|
393 |
|
394 |
def testMissingDiskTemplate(self): |
395 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
396 |
op = self.CopyOpCode(self.diskless_op, |
397 |
disk_template=self.REMOVE)
|
398 |
self.ExecOpCode(op)
|
399 |
|
400 |
def testExistingInstance(self): |
401 |
inst = self.cfg.AddNewInstance()
|
402 |
op = self.CopyOpCode(self.diskless_op, |
403 |
instance_name=inst.name) |
404 |
self.ExecOpCodeExpectOpPrereqError(
|
405 |
op, "Instance .* is already in the cluster")
|
406 |
|
407 |
def testPlainInstance(self): |
408 |
op = self.CopyOpCode(self.plain_op) |
409 |
self.ExecOpCode(op)
|
410 |
|
411 |
def testPlainIAllocator(self): |
412 |
op = self.CopyOpCode(self.plain_op, |
413 |
pnode=self.REMOVE,
|
414 |
iallocator="mock")
|
415 |
self.ExecOpCode(op)
|
416 |
|
417 |
def testIAllocatorOpportunisticLocking(self): |
418 |
op = self.CopyOpCode(self.plain_op, |
419 |
pnode=self.REMOVE,
|
420 |
iallocator="mock",
|
421 |
opportunistic_locking=True)
|
422 |
self.ExecOpCode(op)
|
423 |
|
424 |
def testFailingIAllocator(self): |
425 |
self.iallocator_cls.return_value.success = False |
426 |
op = self.CopyOpCode(self.plain_op, |
427 |
pnode=self.REMOVE,
|
428 |
iallocator="mock")
|
429 |
self.ExecOpCodeExpectOpPrereqError(
|
430 |
op, "Can't compute nodes using iallocator")
|
431 |
|
432 |
def testDrbdInstance(self): |
433 |
op = self.CopyOpCode(self.drbd_op) |
434 |
self.ExecOpCode(op)
|
435 |
|
436 |
def testDrbdIAllocator(self): |
437 |
op = self.CopyOpCode(self.drbd_op, |
438 |
pnode=self.REMOVE,
|
439 |
snode=self.REMOVE,
|
440 |
iallocator="mock")
|
441 |
self.ExecOpCode(op)
|
442 |
|
443 |
def testFileInstance(self): |
444 |
op = self.CopyOpCode(self.file_op) |
445 |
self.ExecOpCode(op)
|
446 |
|
447 |
def testFileInstanceNoClusterStorage(self): |
448 |
self.cluster.file_storage_dir = None |
449 |
op = self.CopyOpCode(self.file_op) |
450 |
self.ExecOpCodeExpectOpPrereqError(
|
451 |
op, "Cluster file storage dir for 'file' storage type not defined")
|
452 |
|
453 |
def testFileInstanceAdditionalPath(self): |
454 |
op = self.CopyOpCode(self.file_op, |
455 |
file_storage_dir="mock_dir")
|
456 |
self.ExecOpCode(op)
|
457 |
|
458 |
def testIdentifyDefaults(self): |
459 |
op = self.CopyOpCode(self.plain_op, |
460 |
hvparams={ |
461 |
constants.HV_BOOT_ORDER: "cd"
|
462 |
}, |
463 |
beparams=constants.BEC_DEFAULTS.copy(), |
464 |
nics=[{ |
465 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED |
466 |
}], |
467 |
osparams={ |
468 |
self.os_name_variant: {}
|
469 |
}, |
470 |
osparams_private={}, |
471 |
identify_defaults=True)
|
472 |
self.ExecOpCode(op)
|
473 |
|
474 |
inst = self.cfg.GetAllInstancesInfo().values()[0] |
475 |
self.assertEqual(0, len(inst.hvparams)) |
476 |
self.assertEqual(0, len(inst.beparams)) |
477 |
assert self.os_name_variant not in inst.osparams or \ |
478 |
len(inst.osparams[self.os_name_variant]) == 0 |
479 |
|
480 |
def testOfflineNode(self): |
481 |
self.node1.offline = True |
482 |
op = self.CopyOpCode(self.diskless_op, |
483 |
pnode=self.node1.name)
|
484 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot use offline primary node") |
485 |
|
486 |
def testDrainedNode(self): |
487 |
self.node1.drained = True |
488 |
op = self.CopyOpCode(self.diskless_op, |
489 |
pnode=self.node1.name)
|
490 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot use drained primary node") |
491 |
|
492 |
def testNonVmCapableNode(self): |
493 |
self.node1.vm_capable = False |
494 |
op = self.CopyOpCode(self.diskless_op, |
495 |
pnode=self.node1.name)
|
496 |
self.ExecOpCodeExpectOpPrereqError(
|
497 |
op, "Cannot use non-vm_capable primary node")
|
498 |
|
499 |
def testNonEnabledHypervisor(self): |
500 |
self.cluster.enabled_hypervisors = [constants.HT_XEN_HVM]
|
501 |
op = self.CopyOpCode(self.diskless_op, |
502 |
hypervisor=constants.HT_FAKE) |
503 |
self.ExecOpCodeExpectOpPrereqError(
|
504 |
op, "Selected hypervisor .* not enabled in the cluster")
|
505 |
|
506 |
def testAddTag(self): |
507 |
op = self.CopyOpCode(self.diskless_op, |
508 |
tags=["tag"])
|
509 |
self.ExecOpCode(op)
|
510 |
|
511 |
def testInvalidTag(self): |
512 |
op = self.CopyOpCode(self.diskless_op, |
513 |
tags=["too_long" * 20]) |
514 |
self.ExecOpCodeExpectException(op, errors.TagError, "Tag too long") |
515 |
|
516 |
def testPingableInstanceName(self): |
517 |
self.netutils_mod.TcpPing.return_value = True |
518 |
op = self.CopyOpCode(self.diskless_op) |
519 |
self.ExecOpCodeExpectOpPrereqError(
|
520 |
op, "IP .* of instance diskless.test.com already in use")
|
521 |
|
522 |
def testPrimaryIsSecondaryNode(self): |
523 |
op = self.CopyOpCode(self.drbd_op, |
524 |
snode=self.drbd_op.pnode)
|
525 |
self.ExecOpCodeExpectOpPrereqError(
|
526 |
op, "The secondary node cannot be the primary node")
|
527 |
|
528 |
def testPrimarySecondaryDifferentNodeGroups(self): |
529 |
group = self.cfg.AddNewNodeGroup()
|
530 |
self.node2.group = group.uuid
|
531 |
op = self.CopyOpCode(self.drbd_op) |
532 |
self.ExecOpCode(op)
|
533 |
self.mcpu.assertLogContainsRegex(
|
534 |
"The primary and secondary nodes are in two different node groups")
|
535 |
|
536 |
def testExclusiveStorageUnsupportedDiskTemplate(self): |
537 |
self.node1.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
538 |
op = self.CopyOpCode(self.drbd_op) |
539 |
self.ExecOpCodeExpectOpPrereqError(
|
540 |
op, "Disk template drbd not supported with exclusive storage")
|
541 |
|
542 |
def testAdoptPlain(self): |
543 |
self.rpc.call_lv_list.return_value = \
|
544 |
self.RpcResultsBuilder() \
|
545 |
.AddSuccessfulNode(self.master, {
|
546 |
"xenvg/mock_disk_1": (10000, None, False) |
547 |
}) \ |
548 |
.Build() |
549 |
op = self.CopyOpCode(self.plain_op) |
550 |
op.disks[0].update({constants.IDISK_ADOPT: "mock_disk_1"}) |
551 |
self.ExecOpCode(op)
|
552 |
|
553 |
def testAdoptPlainMissingLv(self): |
554 |
self.rpc.call_lv_list.return_value = \
|
555 |
self.RpcResultsBuilder() \
|
556 |
.AddSuccessfulNode(self.master, {}) \
|
557 |
.Build() |
558 |
op = self.CopyOpCode(self.plain_op) |
559 |
op.disks[0].update({constants.IDISK_ADOPT: "mock_disk_1"}) |
560 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing logical volume") |
561 |
|
562 |
def testAdoptPlainOnlineLv(self): |
563 |
self.rpc.call_lv_list.return_value = \
|
564 |
self.RpcResultsBuilder() \
|
565 |
.AddSuccessfulNode(self.master, {
|
566 |
"xenvg/mock_disk_1": (10000, None, True) |
567 |
}) \ |
568 |
.Build() |
569 |
op = self.CopyOpCode(self.plain_op) |
570 |
op.disks[0].update({constants.IDISK_ADOPT: "mock_disk_1"}) |
571 |
self.ExecOpCodeExpectOpPrereqError(
|
572 |
op, "Online logical volumes found, cannot adopt")
|
573 |
|
574 |
def testAdoptBlock(self): |
575 |
self.rpc.call_bdev_sizes.return_value = \
|
576 |
self.RpcResultsBuilder() \
|
577 |
.AddSuccessfulNode(self.master, {
|
578 |
"/dev/disk/block0": 10000 |
579 |
}) \ |
580 |
.Build() |
581 |
op = self.CopyOpCode(self.block_op) |
582 |
self.ExecOpCode(op)
|
583 |
|
584 |
def testAdoptBlockDuplicateNames(self): |
585 |
op = self.CopyOpCode(self.block_op, |
586 |
disks=[{ |
587 |
constants.IDISK_SIZE: 0,
|
588 |
constants.IDISK_ADOPT: "/dev/disk/block0"
|
589 |
}, { |
590 |
constants.IDISK_SIZE: 0,
|
591 |
constants.IDISK_ADOPT: "/dev/disk/block0"
|
592 |
}]) |
593 |
self.ExecOpCodeExpectOpPrereqError(
|
594 |
op, "Duplicate disk names given for adoption")
|
595 |
|
596 |
def testAdoptBlockInvalidNames(self): |
597 |
op = self.CopyOpCode(self.block_op, |
598 |
disks=[{ |
599 |
constants.IDISK_SIZE: 0,
|
600 |
constants.IDISK_ADOPT: "/invalid/block0"
|
601 |
}]) |
602 |
self.ExecOpCodeExpectOpPrereqError(
|
603 |
op, "Device node.* lie outside .* and cannot be adopted")
|
604 |
|
605 |
def testAdoptBlockMissingDisk(self): |
606 |
self.rpc.call_bdev_sizes.return_value = \
|
607 |
self.RpcResultsBuilder() \
|
608 |
.AddSuccessfulNode(self.master, {}) \
|
609 |
.Build() |
610 |
op = self.CopyOpCode(self.block_op) |
611 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing block device") |
612 |
|
613 |
def testNoWaitForSyncDrbd(self): |
614 |
op = self.CopyOpCode(self.drbd_op, |
615 |
wait_for_sync=False)
|
616 |
self.ExecOpCode(op)
|
617 |
|
618 |
def testNoWaitForSyncPlain(self): |
619 |
op = self.CopyOpCode(self.plain_op, |
620 |
wait_for_sync=False)
|
621 |
self.ExecOpCode(op)
|
622 |
|
623 |
def testImportPlainFromGivenSrcNode(self): |
624 |
exp_info = """
|
625 |
[export]
|
626 |
version=0
|
627 |
os=mock_os
|
628 |
[instance]
|
629 |
name=old_name.example.com
|
630 |
"""
|
631 |
|
632 |
self.rpc.call_export_info.return_value = \
|
633 |
self.RpcResultsBuilder() \
|
634 |
.CreateSuccessfulNodeResult(self.master, exp_info)
|
635 |
op = self.CopyOpCode(self.plain_op, |
636 |
mode=constants.INSTANCE_IMPORT, |
637 |
src_node=self.master.name)
|
638 |
self.ExecOpCode(op)
|
639 |
|
640 |
def testImportPlainWithoutSrcNodeNotFound(self): |
641 |
op = self.CopyOpCode(self.plain_op, |
642 |
mode=constants.INSTANCE_IMPORT) |
643 |
self.ExecOpCodeExpectOpPrereqError(
|
644 |
op, "No export found for relative path")
|
645 |
|
646 |
def testImportPlainWithoutSrcNode(self): |
647 |
exp_info = """
|
648 |
[export]
|
649 |
version=0
|
650 |
os=mock_os
|
651 |
[instance]
|
652 |
name=old_name.example.com
|
653 |
"""
|
654 |
|
655 |
self.rpc.call_export_list.return_value = \
|
656 |
self.RpcResultsBuilder() \
|
657 |
.AddSuccessfulNode(self.master, {"mock_path": {}}) \ |
658 |
.Build() |
659 |
self.rpc.call_export_info.return_value = \
|
660 |
self.RpcResultsBuilder() \
|
661 |
.CreateSuccessfulNodeResult(self.master, exp_info)
|
662 |
|
663 |
op = self.CopyOpCode(self.plain_op, |
664 |
mode=constants.INSTANCE_IMPORT, |
665 |
src_path="mock_path")
|
666 |
self.ExecOpCode(op)
|
667 |
|
668 |
def testImportPlainCorruptExportInfo(self): |
669 |
exp_info = ""
|
670 |
self.rpc.call_export_info.return_value = \
|
671 |
self.RpcResultsBuilder() \
|
672 |
.CreateSuccessfulNodeResult(self.master, exp_info)
|
673 |
op = self.CopyOpCode(self.plain_op, |
674 |
mode=constants.INSTANCE_IMPORT, |
675 |
src_node=self.master.name)
|
676 |
self.ExecOpCodeExpectException(op, errors.ProgrammerError,
|
677 |
"Corrupted export config")
|
678 |
|
679 |
def testImportPlainWrongExportInfoVersion(self): |
680 |
exp_info = """
|
681 |
[export]
|
682 |
version=1
|
683 |
"""
|
684 |
self.rpc.call_export_info.return_value = \
|
685 |
self.RpcResultsBuilder() \
|
686 |
.CreateSuccessfulNodeResult(self.master, exp_info)
|
687 |
op = self.CopyOpCode(self.plain_op, |
688 |
mode=constants.INSTANCE_IMPORT, |
689 |
src_node=self.master.name)
|
690 |
self.ExecOpCodeExpectOpPrereqError(op, "Wrong export version") |
691 |
|
692 |
def testImportPlainWithParametersAndImport(self): |
693 |
exp_info = """
|
694 |
[export]
|
695 |
version=0
|
696 |
os=mock_os
|
697 |
[instance]
|
698 |
name=old_name.example.com
|
699 |
disk0_size=1024
|
700 |
disk1_size=1500
|
701 |
disk1_dump=mock_path
|
702 |
nic0_mode=bridged
|
703 |
nic0_link=br_mock
|
704 |
nic0_mac=f6:ab:f4:45:d1:af
|
705 |
nic0_ip=192.0.2.1
|
706 |
tags=tag1 tag2
|
707 |
hypervisor=xen-hvm
|
708 |
[hypervisor]
|
709 |
boot_order=cd
|
710 |
[backend]
|
711 |
memory=1024
|
712 |
vcpus=8
|
713 |
[os]
|
714 |
param1=val1
|
715 |
"""
|
716 |
|
717 |
self.rpc.call_export_info.return_value = \
|
718 |
self.RpcResultsBuilder() \
|
719 |
.CreateSuccessfulNodeResult(self.master, exp_info)
|
720 |
self.rpc.call_import_start.return_value = \
|
721 |
self.RpcResultsBuilder() \
|
722 |
.CreateSuccessfulNodeResult(self.master, "daemon_name") |
723 |
self.rpc.call_impexp_status.return_value = \
|
724 |
self.RpcResultsBuilder() \
|
725 |
.CreateSuccessfulNodeResult(self.master,
|
726 |
[ |
727 |
objects.ImportExportStatus(exit_status=0)
|
728 |
]) |
729 |
self.rpc.call_impexp_cleanup.return_value = \
|
730 |
self.RpcResultsBuilder() \
|
731 |
.CreateSuccessfulNodeResult(self.master, True) |
732 |
|
733 |
op = self.CopyOpCode(self.plain_op, |
734 |
disks=[], |
735 |
nics=[], |
736 |
tags=[], |
737 |
hypervisor=None,
|
738 |
hvparams={}, |
739 |
mode=constants.INSTANCE_IMPORT, |
740 |
src_node=self.master.name)
|
741 |
self.ExecOpCode(op)
|
742 |
|
743 |
|
744 |
class TestCheckOSVariant(CmdlibTestCase): |
745 |
def testNoVariantsSupported(self): |
746 |
os = self.cfg.CreateOs(supported_variants=[])
|
747 |
self.assertRaises(errors.OpPrereqError, instance_utils._CheckOSVariant,
|
748 |
os, "os+variant")
|
749 |
|
750 |
def testNoVariantGiven(self): |
751 |
os = self.cfg.CreateOs(supported_variants=["default"]) |
752 |
self.assertRaises(errors.OpPrereqError, instance_utils._CheckOSVariant,
|
753 |
os, "os")
|
754 |
|
755 |
def testWrongVariantGiven(self): |
756 |
os = self.cfg.CreateOs(supported_variants=["default"]) |
757 |
self.assertRaises(errors.OpPrereqError, instance_utils._CheckOSVariant,
|
758 |
os, "os+wrong_variant")
|
759 |
|
760 |
def testOkWithVariant(self): |
761 |
os = self.cfg.CreateOs(supported_variants=["default"]) |
762 |
instance_utils._CheckOSVariant(os, "os+default")
|
763 |
|
764 |
def testOkWithoutVariant(self): |
765 |
os = self.cfg.CreateOs(supported_variants=[])
|
766 |
instance_utils._CheckOSVariant(os, "os")
|
767 |
|
768 |
|
769 |
class TestCheckTargetNodeIPolicy(TestLUInstanceCreate): |
770 |
def setUp(self): |
771 |
super(TestCheckTargetNodeIPolicy, self).setUp() |
772 |
|
773 |
self.op = self.diskless_op |
774 |
|
775 |
self.instance = self.cfg.AddNewInstance() |
776 |
self.target_group = self.cfg.AddNewNodeGroup() |
777 |
self.target_node = self.cfg.AddNewNode(group=self.target_group) |
778 |
|
779 |
@withLockedLU
|
780 |
def testNoViolation(self, lu): |
781 |
compute_recoder = mock.Mock(return_value=[]) |
782 |
instance.CheckTargetNodeIPolicy(lu, NotImplemented, self.instance, |
783 |
self.target_node, NotImplemented, |
784 |
_compute_fn=compute_recoder) |
785 |
self.assertTrue(compute_recoder.called)
|
786 |
self.mcpu.assertLogIsEmpty()
|
787 |
|
788 |
@withLockedLU
|
789 |
def testNoIgnore(self, lu): |
790 |
compute_recoder = mock.Mock(return_value=["mem_size not in range"])
|
791 |
self.assertRaises(errors.OpPrereqError, instance.CheckTargetNodeIPolicy,
|
792 |
lu, NotImplemented, self.instance, |
793 |
self.target_node, NotImplemented, |
794 |
_compute_fn=compute_recoder) |
795 |
self.assertTrue(compute_recoder.called)
|
796 |
self.mcpu.assertLogIsEmpty()
|
797 |
|
798 |
@withLockedLU
|
799 |
def testIgnoreViolation(self, lu): |
800 |
compute_recoder = mock.Mock(return_value=["mem_size not in range"])
|
801 |
instance.CheckTargetNodeIPolicy(lu, NotImplemented, self.instance, |
802 |
self.target_node, NotImplemented, |
803 |
ignore=True, _compute_fn=compute_recoder)
|
804 |
self.assertTrue(compute_recoder.called)
|
805 |
msg = ("Instance does not meet target node group's .* instance policy:"
|
806 |
" mem_size not in range")
|
807 |
self.mcpu.assertLogContainsRegex(msg)
|
808 |
|
809 |
|
810 |
class TestApplyContainerMods(unittest.TestCase): |
811 |
def testEmptyContainer(self): |
812 |
container = [] |
813 |
chgdesc = [] |
814 |
instance._ApplyContainerMods("test", container, chgdesc, [], None, None, |
815 |
None)
|
816 |
self.assertEqual(container, [])
|
817 |
self.assertEqual(chgdesc, [])
|
818 |
|
819 |
def testAdd(self): |
820 |
container = [] |
821 |
chgdesc = [] |
822 |
mods = instance._PrepareContainerMods([ |
823 |
(constants.DDM_ADD, -1, "Hello"), |
824 |
(constants.DDM_ADD, -1, "World"), |
825 |
(constants.DDM_ADD, 0, "Start"), |
826 |
(constants.DDM_ADD, -1, "End"), |
827 |
], None)
|
828 |
instance._ApplyContainerMods("test", container, chgdesc, mods,
|
829 |
None, None, None) |
830 |
self.assertEqual(container, ["Start", "Hello", "World", "End"]) |
831 |
self.assertEqual(chgdesc, [])
|
832 |
|
833 |
mods = instance._PrepareContainerMods([ |
834 |
(constants.DDM_ADD, 0, "zero"), |
835 |
(constants.DDM_ADD, 3, "Added"), |
836 |
(constants.DDM_ADD, 5, "four"), |
837 |
(constants.DDM_ADD, 7, "xyz"), |
838 |
], None)
|
839 |
instance._ApplyContainerMods("test", container, chgdesc, mods,
|
840 |
None, None, None) |
841 |
self.assertEqual(container,
|
842 |
["zero", "Start", "Hello", "Added", "World", "four", |
843 |
"End", "xyz"]) |
844 |
self.assertEqual(chgdesc, [])
|
845 |
|
846 |
for idx in [-2, len(container) + 1]: |
847 |
mods = instance._PrepareContainerMods([ |
848 |
(constants.DDM_ADD, idx, "error"),
|
849 |
], None)
|
850 |
self.assertRaises(IndexError, instance._ApplyContainerMods, |
851 |
"test", container, None, mods, None, None, None) |
852 |
|
853 |
def testRemoveError(self): |
854 |
for idx in [0, 1, 2, 100, -1, -4]: |
855 |
mods = instance._PrepareContainerMods([ |
856 |
(constants.DDM_REMOVE, idx, None),
|
857 |
], None)
|
858 |
self.assertRaises(IndexError, instance._ApplyContainerMods, |
859 |
"test", [], None, mods, None, None, None) |
860 |
|
861 |
mods = instance._PrepareContainerMods([ |
862 |
(constants.DDM_REMOVE, 0, object()), |
863 |
], None)
|
864 |
self.assertRaises(AssertionError, instance._ApplyContainerMods, |
865 |
"test", [""], None, mods, None, None, None) |
866 |
|
867 |
def testAddError(self): |
868 |
for idx in range(-100, -1) + [100]: |
869 |
mods = instance._PrepareContainerMods([ |
870 |
(constants.DDM_ADD, idx, None),
|
871 |
], None)
|
872 |
self.assertRaises(IndexError, instance._ApplyContainerMods, |
873 |
"test", [], None, mods, None, None, None) |
874 |
|
875 |
def testRemove(self): |
876 |
container = ["item 1", "item 2"] |
877 |
mods = instance._PrepareContainerMods([ |
878 |
(constants.DDM_ADD, -1, "aaa"), |
879 |
(constants.DDM_REMOVE, -1, None), |
880 |
(constants.DDM_ADD, -1, "bbb"), |
881 |
], None)
|
882 |
chgdesc = [] |
883 |
instance._ApplyContainerMods("test", container, chgdesc, mods,
|
884 |
None, None, None) |
885 |
self.assertEqual(container, ["item 1", "item 2", "bbb"]) |
886 |
self.assertEqual(chgdesc, [
|
887 |
("test/2", "remove"), |
888 |
]) |
889 |
|
890 |
def testModify(self): |
891 |
container = ["item 1", "item 2"] |
892 |
mods = instance._PrepareContainerMods([ |
893 |
(constants.DDM_MODIFY, -1, "a"), |
894 |
(constants.DDM_MODIFY, 0, "b"), |
895 |
(constants.DDM_MODIFY, 1, "c"), |
896 |
], None)
|
897 |
chgdesc = [] |
898 |
instance._ApplyContainerMods("test", container, chgdesc, mods,
|
899 |
None, None, None) |
900 |
self.assertEqual(container, ["item 1", "item 2"]) |
901 |
self.assertEqual(chgdesc, [])
|
902 |
|
903 |
for idx in [-2, len(container) + 1]: |
904 |
mods = instance._PrepareContainerMods([ |
905 |
(constants.DDM_MODIFY, idx, "error"),
|
906 |
], None)
|
907 |
self.assertRaises(IndexError, instance._ApplyContainerMods, |
908 |
"test", container, None, mods, None, None, None) |
909 |
|
910 |
@staticmethod
|
911 |
def _CreateTestFn(idx, params, private): |
912 |
private.data = ("add", idx, params)
|
913 |
return ((100 * idx, params), [ |
914 |
("test/%s" % idx, hex(idx)), |
915 |
]) |
916 |
|
917 |
@staticmethod
|
918 |
def _ModifyTestFn(idx, item, params, private): |
919 |
private.data = ("modify", idx, params)
|
920 |
return [
|
921 |
("test/%s" % idx, "modify %s" % params), |
922 |
] |
923 |
|
924 |
@staticmethod
|
925 |
def _RemoveTestFn(idx, item, private): |
926 |
private.data = ("remove", idx, item)
|
927 |
|
928 |
def testAddWithCreateFunction(self): |
929 |
container = [] |
930 |
chgdesc = [] |
931 |
mods = instance._PrepareContainerMods([ |
932 |
(constants.DDM_ADD, -1, "Hello"), |
933 |
(constants.DDM_ADD, -1, "World"), |
934 |
(constants.DDM_ADD, 0, "Start"), |
935 |
(constants.DDM_ADD, -1, "End"), |
936 |
(constants.DDM_REMOVE, 2, None), |
937 |
(constants.DDM_MODIFY, -1, "foobar"), |
938 |
(constants.DDM_REMOVE, 2, None), |
939 |
(constants.DDM_ADD, 1, "More"), |
940 |
], mock.Mock) |
941 |
instance._ApplyContainerMods("test", container, chgdesc, mods,
|
942 |
self._CreateTestFn, self._ModifyTestFn, |
943 |
self._RemoveTestFn)
|
944 |
self.assertEqual(container, [
|
945 |
(000, "Start"), |
946 |
(100, "More"), |
947 |
(000, "Hello"), |
948 |
]) |
949 |
self.assertEqual(chgdesc, [
|
950 |
("test/0", "0x0"), |
951 |
("test/1", "0x1"), |
952 |
("test/0", "0x0"), |
953 |
("test/3", "0x3"), |
954 |
("test/2", "remove"), |
955 |
("test/2", "modify foobar"), |
956 |
("test/2", "remove"), |
957 |
("test/1", "0x1") |
958 |
]) |
959 |
self.assertTrue(compat.all(op == private.data[0] |
960 |
for (op, _, _, private) in mods)) |
961 |
self.assertEqual([private.data for (op, _, _, private) in mods], [ |
962 |
("add", 0, "Hello"), |
963 |
("add", 1, "World"), |
964 |
("add", 0, "Start"), |
965 |
("add", 3, "End"), |
966 |
("remove", 2, (100, "World")), |
967 |
("modify", 2, "foobar"), |
968 |
("remove", 2, (300, "End")), |
969 |
("add", 1, "More"), |
970 |
]) |
971 |
|
972 |
|
973 |
class _FakeConfigForGenDiskTemplate(ConfigMock): |
974 |
def __init__(self): |
975 |
super(_FakeConfigForGenDiskTemplate, self).__init__() |
976 |
|
977 |
self._unique_id = itertools.count()
|
978 |
self._drbd_minor = itertools.count(20) |
979 |
self._port = itertools.count(constants.FIRST_DRBD_PORT)
|
980 |
self._secret = itertools.count()
|
981 |
|
982 |
def GenerateUniqueID(self, ec_id): |
983 |
return "ec%s-uq%s" % (ec_id, self._unique_id.next()) |
984 |
|
985 |
def AllocateDRBDMinor(self, nodes, instance): |
986 |
return [self._drbd_minor.next() |
987 |
for _ in nodes] |
988 |
|
989 |
def AllocatePort(self): |
990 |
return self._port.next() |
991 |
|
992 |
def GenerateDRBDSecret(self, ec_id): |
993 |
return "ec%s-secret%s" % (ec_id, self._secret.next()) |
994 |
|
995 |
|
996 |
class TestGenerateDiskTemplate(CmdlibTestCase): |
997 |
def setUp(self): |
998 |
super(TestGenerateDiskTemplate, self).setUp() |
999 |
|
1000 |
self.cfg = _FakeConfigForGenDiskTemplate()
|
1001 |
self.cluster.enabled_disk_templates = list(constants.DISK_TEMPLATES) |
1002 |
|
1003 |
self.nodegroup = self.cfg.AddNewNodeGroup(name="ng") |
1004 |
|
1005 |
self.lu = self.GetMockLU() |
1006 |
|
1007 |
@staticmethod
|
1008 |
def GetDiskParams(): |
1009 |
return copy.deepcopy(constants.DISK_DT_DEFAULTS)
|
1010 |
|
1011 |
def testWrongDiskTemplate(self): |
1012 |
gdt = instance.GenerateDiskTemplate |
1013 |
disk_template = "##unknown##"
|
1014 |
|
1015 |
assert disk_template not in constants.DISK_TEMPLATES |
1016 |
|
1017 |
self.assertRaises(errors.OpPrereqError, gdt, self.lu, disk_template, |
1018 |
"inst26831.example.com", "node30113.example.com", [], [], |
1019 |
NotImplemented, NotImplemented, 0, self.lu.LogInfo, |
1020 |
self.GetDiskParams())
|
1021 |
|
1022 |
def testDiskless(self): |
1023 |
gdt = instance.GenerateDiskTemplate |
1024 |
|
1025 |
result = gdt(self.lu, constants.DT_DISKLESS, "inst27734.example.com", |
1026 |
"node30113.example.com", [], [],
|
1027 |
NotImplemented, NotImplemented, 0, self.lu.LogInfo, |
1028 |
self.GetDiskParams())
|
1029 |
self.assertEqual(result, [])
|
1030 |
|
1031 |
def _TestTrivialDisk(self, template, disk_info, base_index, exp_dev_type, |
1032 |
file_storage_dir=NotImplemented,
|
1033 |
file_driver=NotImplemented):
|
1034 |
gdt = instance.GenerateDiskTemplate |
1035 |
|
1036 |
map(lambda params: utils.ForceDictType(params, |
1037 |
constants.IDISK_PARAMS_TYPES), |
1038 |
disk_info) |
1039 |
|
1040 |
# Check if non-empty list of secondaries is rejected
|
1041 |
self.assertRaises(errors.ProgrammerError, gdt, self.lu, |
1042 |
template, "inst25088.example.com",
|
1043 |
"node185.example.com", ["node323.example.com"], [], |
1044 |
NotImplemented, NotImplemented, base_index, |
1045 |
self.lu.LogInfo, self.GetDiskParams()) |
1046 |
|
1047 |
result = gdt(self.lu, template, "inst21662.example.com", |
1048 |
"node21741.example.com", [],
|
1049 |
disk_info, file_storage_dir, file_driver, base_index, |
1050 |
self.lu.LogInfo, self.GetDiskParams()) |
1051 |
|
1052 |
for (idx, disk) in enumerate(result): |
1053 |
self.assertTrue(isinstance(disk, objects.Disk)) |
1054 |
self.assertEqual(disk.dev_type, exp_dev_type)
|
1055 |
self.assertEqual(disk.size, disk_info[idx][constants.IDISK_SIZE])
|
1056 |
self.assertEqual(disk.mode, disk_info[idx][constants.IDISK_MODE])
|
1057 |
self.assertTrue(disk.children is None) |
1058 |
|
1059 |
self._CheckIvNames(result, base_index, base_index + len(disk_info)) |
1060 |
instance._UpdateIvNames(base_index, result) |
1061 |
self._CheckIvNames(result, base_index, base_index + len(disk_info)) |
1062 |
|
1063 |
return result
|
1064 |
|
1065 |
def _CheckIvNames(self, disks, base_index, end_index): |
1066 |
self.assertEqual(map(operator.attrgetter("iv_name"), disks), |
1067 |
["disk/%s" % i for i in range(base_index, end_index)]) |
1068 |
|
1069 |
def testPlain(self): |
1070 |
disk_info = [{ |
1071 |
constants.IDISK_SIZE: 1024,
|
1072 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1073 |
}, { |
1074 |
constants.IDISK_SIZE: 4096,
|
1075 |
constants.IDISK_VG: "othervg",
|
1076 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1077 |
}] |
1078 |
|
1079 |
result = self._TestTrivialDisk(constants.DT_PLAIN, disk_info, 3, |
1080 |
constants.DT_PLAIN) |
1081 |
|
1082 |
self.assertEqual(map(operator.attrgetter("logical_id"), result), [ |
1083 |
("xenvg", "ec1-uq0.disk3"), |
1084 |
("othervg", "ec1-uq1.disk4"), |
1085 |
]) |
1086 |
|
1087 |
def testFile(self): |
1088 |
# anything != DT_FILE would do here
|
1089 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
1090 |
self.assertRaises(errors.OpPrereqError, self._TestTrivialDisk, |
1091 |
constants.DT_FILE, [], 0, NotImplemented) |
1092 |
self.assertRaises(errors.OpPrereqError, self._TestTrivialDisk, |
1093 |
constants.DT_SHARED_FILE, [], 0, NotImplemented) |
1094 |
|
1095 |
for disk_template in constants.DTS_FILEBASED: |
1096 |
disk_info = [{ |
1097 |
constants.IDISK_SIZE: 80 * 1024, |
1098 |
constants.IDISK_MODE: constants.DISK_RDONLY, |
1099 |
}, { |
1100 |
constants.IDISK_SIZE: 4096,
|
1101 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1102 |
}, { |
1103 |
constants.IDISK_SIZE: 6 * 1024, |
1104 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1105 |
}] |
1106 |
|
1107 |
self.cluster.enabled_disk_templates = [disk_template]
|
1108 |
result = self._TestTrivialDisk(
|
1109 |
disk_template, disk_info, 2, disk_template,
|
1110 |
file_storage_dir="/tmp", file_driver=constants.FD_BLKTAP)
|
1111 |
|
1112 |
if disk_template == constants.DT_GLUSTER:
|
1113 |
# Here "inst21662.example.com" is actually the instance UUID, not its
|
1114 |
# name, so while this result looks wrong, it is actually correct.
|
1115 |
expected = [(constants.FD_BLKTAP, |
1116 |
'ganeti/inst21662.example.com.%d' % x)
|
1117 |
for x in (2,3,4)] |
1118 |
self.assertEqual(map(operator.attrgetter("logical_id"), result), |
1119 |
expected) |
1120 |
else:
|
1121 |
for (idx, disk) in enumerate(result): |
1122 |
(file_driver, file_storage_dir) = disk.logical_id |
1123 |
dir_fmt = r"^/tmp/.*\.%s\.disk%d$" % (disk_template, idx + 2) |
1124 |
self.assertEqual(file_driver, constants.FD_BLKTAP)
|
1125 |
# FIXME: use assertIsNotNone when py 2.7 is minimum supported version
|
1126 |
self.assertNotEqual(re.match(dir_fmt, file_storage_dir), None) |
1127 |
|
1128 |
def testBlock(self): |
1129 |
disk_info = [{ |
1130 |
constants.IDISK_SIZE: 8 * 1024, |
1131 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1132 |
constants.IDISK_ADOPT: "/tmp/some/block/dev",
|
1133 |
}] |
1134 |
|
1135 |
result = self._TestTrivialDisk(constants.DT_BLOCK, disk_info, 10, |
1136 |
constants.DT_BLOCK) |
1137 |
|
1138 |
self.assertEqual(map(operator.attrgetter("logical_id"), result), [ |
1139 |
(constants.BLOCKDEV_DRIVER_MANUAL, "/tmp/some/block/dev"),
|
1140 |
]) |
1141 |
|
1142 |
def testRbd(self): |
1143 |
disk_info = [{ |
1144 |
constants.IDISK_SIZE: 8 * 1024, |
1145 |
constants.IDISK_MODE: constants.DISK_RDONLY, |
1146 |
}, { |
1147 |
constants.IDISK_SIZE: 100 * 1024, |
1148 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1149 |
}] |
1150 |
|
1151 |
result = self._TestTrivialDisk(constants.DT_RBD, disk_info, 0, |
1152 |
constants.DT_RBD) |
1153 |
|
1154 |
self.assertEqual(map(operator.attrgetter("logical_id"), result), [ |
1155 |
("rbd", "ec1-uq0.rbd.disk0"), |
1156 |
("rbd", "ec1-uq1.rbd.disk1"), |
1157 |
]) |
1158 |
|
1159 |
def testDrbd8(self): |
1160 |
gdt = instance.GenerateDiskTemplate |
1161 |
drbd8_defaults = constants.DISK_LD_DEFAULTS[constants.DT_DRBD8] |
1162 |
drbd8_default_metavg = drbd8_defaults[constants.LDP_DEFAULT_METAVG] |
1163 |
|
1164 |
disk_info = [{ |
1165 |
constants.IDISK_SIZE: 1024,
|
1166 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1167 |
}, { |
1168 |
constants.IDISK_SIZE: 100 * 1024, |
1169 |
constants.IDISK_MODE: constants.DISK_RDONLY, |
1170 |
constants.IDISK_METAVG: "metavg",
|
1171 |
}, { |
1172 |
constants.IDISK_SIZE: 4096,
|
1173 |
constants.IDISK_MODE: constants.DISK_RDWR, |
1174 |
constants.IDISK_VG: "vgxyz",
|
1175 |
}, |
1176 |
] |
1177 |
|
1178 |
exp_logical_ids = [ |
1179 |
[ |
1180 |
(self.lu.cfg.GetVGName(), "ec1-uq0.disk0_data"), |
1181 |
(drbd8_default_metavg, "ec1-uq0.disk0_meta"),
|
1182 |
], [ |
1183 |
(self.lu.cfg.GetVGName(), "ec1-uq1.disk1_data"), |
1184 |
("metavg", "ec1-uq1.disk1_meta"), |
1185 |
], [ |
1186 |
("vgxyz", "ec1-uq2.disk2_data"), |
1187 |
(drbd8_default_metavg, "ec1-uq2.disk2_meta"),
|
1188 |
]] |
1189 |
|
1190 |
assert len(exp_logical_ids) == len(disk_info) |
1191 |
|
1192 |
map(lambda params: utils.ForceDictType(params, |
1193 |
constants.IDISK_PARAMS_TYPES), |
1194 |
disk_info) |
1195 |
|
1196 |
# Check if empty list of secondaries is rejected
|
1197 |
self.assertRaises(errors.ProgrammerError, gdt, self.lu, constants.DT_DRBD8, |
1198 |
"inst827.example.com", "node1334.example.com", [], |
1199 |
disk_info, NotImplemented, NotImplemented, 0, |
1200 |
self.lu.LogInfo, self.GetDiskParams()) |
1201 |
|
1202 |
result = gdt(self.lu, constants.DT_DRBD8, "inst827.example.com", |
1203 |
"node1334.example.com", ["node12272.example.com"], |
1204 |
disk_info, NotImplemented, NotImplemented, 0, self.lu.LogInfo, |
1205 |
self.GetDiskParams())
|
1206 |
|
1207 |
for (idx, disk) in enumerate(result): |
1208 |
self.assertTrue(isinstance(disk, objects.Disk)) |
1209 |
self.assertEqual(disk.dev_type, constants.DT_DRBD8)
|
1210 |
self.assertEqual(disk.size, disk_info[idx][constants.IDISK_SIZE])
|
1211 |
self.assertEqual(disk.mode, disk_info[idx][constants.IDISK_MODE])
|
1212 |
|
1213 |
for child in disk.children: |
1214 |
self.assertTrue(isinstance(disk, objects.Disk)) |
1215 |
self.assertEqual(child.dev_type, constants.DT_PLAIN)
|
1216 |
self.assertTrue(child.children is None) |
1217 |
|
1218 |
self.assertEqual(map(operator.attrgetter("logical_id"), disk.children), |
1219 |
exp_logical_ids[idx]) |
1220 |
|
1221 |
self.assertEqual(len(disk.children), 2) |
1222 |
self.assertEqual(disk.children[0].size, disk.size) |
1223 |
self.assertEqual(disk.children[1].size, constants.DRBD_META_SIZE) |
1224 |
|
1225 |
self._CheckIvNames(result, 0, len(disk_info)) |
1226 |
instance._UpdateIvNames(0, result)
|
1227 |
self._CheckIvNames(result, 0, len(disk_info)) |
1228 |
|
1229 |
self.assertEqual(map(operator.attrgetter("logical_id"), result), [ |
1230 |
("node1334.example.com", "node12272.example.com", |
1231 |
constants.FIRST_DRBD_PORT, 20, 21, "ec1-secret0"), |
1232 |
("node1334.example.com", "node12272.example.com", |
1233 |
constants.FIRST_DRBD_PORT + 1, 22, 23, "ec1-secret1"), |
1234 |
("node1334.example.com", "node12272.example.com", |
1235 |
constants.FIRST_DRBD_PORT + 2, 24, 25, "ec1-secret2"), |
1236 |
]) |
1237 |
|
1238 |
|
1239 |
class _DiskPauseTracker: |
1240 |
def __init__(self): |
1241 |
self.history = []
|
1242 |
|
1243 |
def __call__(self, (disks, instance), pause): |
1244 |
assert not (set(disks) - set(instance.disks)) |
1245 |
|
1246 |
self.history.extend((i.logical_id, i.size, pause)
|
1247 |
for i in disks) |
1248 |
|
1249 |
return (True, [True] * len(disks)) |
1250 |
|
1251 |
|
1252 |
class _ConfigForDiskWipe: |
1253 |
def __init__(self, exp_node_uuid): |
1254 |
self._exp_node_uuid = exp_node_uuid
|
1255 |
|
1256 |
def GetNodeName(self, node_uuid): |
1257 |
assert node_uuid == self._exp_node_uuid |
1258 |
return "name.of.expected.node" |
1259 |
|
1260 |
|
1261 |
class _RpcForDiskWipe: |
1262 |
def __init__(self, exp_node, pause_cb, wipe_cb): |
1263 |
self._exp_node = exp_node
|
1264 |
self._pause_cb = pause_cb
|
1265 |
self._wipe_cb = wipe_cb
|
1266 |
|
1267 |
def call_blockdev_pause_resume_sync(self, node, disks, pause): |
1268 |
assert node == self._exp_node |
1269 |
return rpc.RpcResult(data=self._pause_cb(disks, pause)) |
1270 |
|
1271 |
def call_blockdev_wipe(self, node, bdev, offset, size): |
1272 |
assert node == self._exp_node |
1273 |
return rpc.RpcResult(data=self._wipe_cb(bdev, offset, size)) |
1274 |
|
1275 |
|
1276 |
class _DiskWipeProgressTracker: |
1277 |
def __init__(self, start_offset): |
1278 |
self._start_offset = start_offset
|
1279 |
self.progress = {}
|
1280 |
|
1281 |
def __call__(self, (disk, _), offset, size): |
1282 |
assert isinstance(offset, (long, int)) |
1283 |
assert isinstance(size, (long, int)) |
1284 |
|
1285 |
max_chunk_size = (disk.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT)
|
1286 |
|
1287 |
assert offset >= self._start_offset |
1288 |
assert (offset + size) <= disk.size
|
1289 |
|
1290 |
assert size > 0 |
1291 |
assert size <= constants.MAX_WIPE_CHUNK
|
1292 |
assert size <= max_chunk_size
|
1293 |
|
1294 |
assert offset == self._start_offset or disk.logical_id in self.progress |
1295 |
|
1296 |
# Keep track of progress
|
1297 |
cur_progress = self.progress.setdefault(disk.logical_id, self._start_offset) |
1298 |
|
1299 |
assert cur_progress == offset
|
1300 |
|
1301 |
# Record progress
|
1302 |
self.progress[disk.logical_id] += size
|
1303 |
|
1304 |
return (True, None) |
1305 |
|
1306 |
|
1307 |
class TestWipeDisks(unittest.TestCase): |
1308 |
def _FailingPauseCb(self, (disks, _), pause): |
1309 |
self.assertEqual(len(disks), 3) |
1310 |
self.assertTrue(pause)
|
1311 |
# Simulate an RPC error
|
1312 |
return (False, "error") |
1313 |
|
1314 |
def testPauseFailure(self): |
1315 |
node_name = "node1372.example.com"
|
1316 |
|
1317 |
lu = _FakeLU(rpc=_RpcForDiskWipe(node_name, self._FailingPauseCb,
|
1318 |
NotImplemented),
|
1319 |
cfg=_ConfigForDiskWipe(node_name)) |
1320 |
|
1321 |
disks = [ |
1322 |
objects.Disk(dev_type=constants.DT_PLAIN), |
1323 |
objects.Disk(dev_type=constants.DT_PLAIN), |
1324 |
objects.Disk(dev_type=constants.DT_PLAIN), |
1325 |
] |
1326 |
|
1327 |
inst = objects.Instance(name="inst21201",
|
1328 |
primary_node=node_name, |
1329 |
disk_template=constants.DT_PLAIN, |
1330 |
disks=disks) |
1331 |
|
1332 |
self.assertRaises(errors.OpExecError, instance.WipeDisks, lu, inst)
|
1333 |
|
1334 |
def _FailingWipeCb(self, (disk, _), offset, size): |
1335 |
# This should only ever be called for the first disk
|
1336 |
self.assertEqual(disk.logical_id, "disk0") |
1337 |
return (False, None) |
1338 |
|
1339 |
def testFailingWipe(self): |
1340 |
node_uuid = "node13445-uuid"
|
1341 |
pt = _DiskPauseTracker() |
1342 |
|
1343 |
lu = _FakeLU(rpc=_RpcForDiskWipe(node_uuid, pt, self._FailingWipeCb),
|
1344 |
cfg=_ConfigForDiskWipe(node_uuid)) |
1345 |
|
1346 |
disks = [ |
1347 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk0",
|
1348 |
size=100 * 1024), |
1349 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk1",
|
1350 |
size=500 * 1024), |
1351 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk2", size=256), |
1352 |
] |
1353 |
|
1354 |
inst = objects.Instance(name="inst562",
|
1355 |
primary_node=node_uuid, |
1356 |
disk_template=constants.DT_PLAIN, |
1357 |
disks=disks) |
1358 |
|
1359 |
try:
|
1360 |
instance.WipeDisks(lu, inst) |
1361 |
except errors.OpExecError, err:
|
1362 |
self.assertTrue(str(err), "Could not wipe disk 0 at offset 0 ") |
1363 |
else:
|
1364 |
self.fail("Did not raise exception") |
1365 |
|
1366 |
# Check if all disks were paused and resumed
|
1367 |
self.assertEqual(pt.history, [
|
1368 |
("disk0", 100 * 1024, True), |
1369 |
("disk1", 500 * 1024, True), |
1370 |
("disk2", 256, True), |
1371 |
("disk0", 100 * 1024, False), |
1372 |
("disk1", 500 * 1024, False), |
1373 |
("disk2", 256, False), |
1374 |
]) |
1375 |
|
1376 |
def _PrepareWipeTest(self, start_offset, disks): |
1377 |
node_name = "node-with-offset%s.example.com" % start_offset
|
1378 |
pauset = _DiskPauseTracker() |
1379 |
progresst = _DiskWipeProgressTracker(start_offset) |
1380 |
|
1381 |
lu = _FakeLU(rpc=_RpcForDiskWipe(node_name, pauset, progresst), |
1382 |
cfg=_ConfigForDiskWipe(node_name)) |
1383 |
|
1384 |
instance = objects.Instance(name="inst3560",
|
1385 |
primary_node=node_name, |
1386 |
disk_template=constants.DT_PLAIN, |
1387 |
disks=disks) |
1388 |
|
1389 |
return (lu, instance, pauset, progresst)
|
1390 |
|
1391 |
def testNormalWipe(self): |
1392 |
disks = [ |
1393 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk0", size=1024), |
1394 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk1",
|
1395 |
size=500 * 1024), |
1396 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk2", size=128), |
1397 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk3",
|
1398 |
size=constants.MAX_WIPE_CHUNK), |
1399 |
] |
1400 |
|
1401 |
(lu, inst, pauset, progresst) = self._PrepareWipeTest(0, disks) |
1402 |
|
1403 |
instance.WipeDisks(lu, inst) |
1404 |
|
1405 |
self.assertEqual(pauset.history, [
|
1406 |
("disk0", 1024, True), |
1407 |
("disk1", 500 * 1024, True), |
1408 |
("disk2", 128, True), |
1409 |
("disk3", constants.MAX_WIPE_CHUNK, True), |
1410 |
("disk0", 1024, False), |
1411 |
("disk1", 500 * 1024, False), |
1412 |
("disk2", 128, False), |
1413 |
("disk3", constants.MAX_WIPE_CHUNK, False), |
1414 |
]) |
1415 |
|
1416 |
# Ensure the complete disk has been wiped
|
1417 |
self.assertEqual(progresst.progress,
|
1418 |
dict((i.logical_id, i.size) for i in disks)) |
1419 |
|
1420 |
def testWipeWithStartOffset(self): |
1421 |
for start_offset in [0, 280, 8895, 1563204]: |
1422 |
disks = [ |
1423 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk0",
|
1424 |
size=128),
|
1425 |
objects.Disk(dev_type=constants.DT_PLAIN, logical_id="disk1",
|
1426 |
size=start_offset + (100 * 1024)), |
1427 |
] |
1428 |
|
1429 |
(lu, inst, pauset, progresst) = \ |
1430 |
self._PrepareWipeTest(start_offset, disks)
|
1431 |
|
1432 |
# Test start offset with only one disk
|
1433 |
instance.WipeDisks(lu, inst, |
1434 |
disks=[(1, disks[1], start_offset)]) |
1435 |
|
1436 |
# Only the second disk may have been paused and wiped
|
1437 |
self.assertEqual(pauset.history, [
|
1438 |
("disk1", start_offset + (100 * 1024), True), |
1439 |
("disk1", start_offset + (100 * 1024), False), |
1440 |
]) |
1441 |
self.assertEqual(progresst.progress, {
|
1442 |
"disk1": disks[1].size, |
1443 |
}) |
1444 |
|
1445 |
|
1446 |
class TestCheckOpportunisticLocking(unittest.TestCase): |
1447 |
class OpTest(opcodes.OpCode): |
1448 |
OP_PARAMS = [ |
1449 |
("opportunistic_locking", False, ht.TBool, None), |
1450 |
("iallocator", None, ht.TMaybe(ht.TNonEmptyString), "") |
1451 |
] |
1452 |
|
1453 |
@classmethod
|
1454 |
def _MakeOp(cls, **kwargs): |
1455 |
op = cls.OpTest(**kwargs) |
1456 |
op.Validate(True)
|
1457 |
return op
|
1458 |
|
1459 |
def testMissingAttributes(self): |
1460 |
self.assertRaises(AttributeError, instance._CheckOpportunisticLocking, |
1461 |
object())
|
1462 |
|
1463 |
def testDefaults(self): |
1464 |
op = self._MakeOp()
|
1465 |
instance._CheckOpportunisticLocking(op) |
1466 |
|
1467 |
def test(self): |
1468 |
for iallocator in [None, "something", "other"]: |
1469 |
for opplock in [False, True]: |
1470 |
op = self._MakeOp(iallocator=iallocator,
|
1471 |
opportunistic_locking=opplock) |
1472 |
if opplock and not iallocator: |
1473 |
self.assertRaises(errors.OpPrereqError,
|
1474 |
instance._CheckOpportunisticLocking, op) |
1475 |
else:
|
1476 |
instance._CheckOpportunisticLocking(op) |
1477 |
|
1478 |
|
1479 |
class TestLUInstanceRemove(CmdlibTestCase): |
1480 |
def testRemoveMissingInstance(self): |
1481 |
op = opcodes.OpInstanceRemove(instance_name="missing.inst")
|
1482 |
self.ExecOpCodeExpectOpPrereqError(op, "Instance 'missing.inst' not known") |
1483 |
|
1484 |
def testRemoveInst(self): |
1485 |
inst = self.cfg.AddNewInstance(disks=[])
|
1486 |
op = opcodes.OpInstanceRemove(instance_name=inst.name) |
1487 |
self.ExecOpCode(op)
|
1488 |
|
1489 |
|
1490 |
class TestLUInstanceMove(CmdlibTestCase): |
1491 |
def setUp(self): |
1492 |
super(TestLUInstanceMove, self).setUp() |
1493 |
|
1494 |
self.node = self.cfg.AddNewNode() |
1495 |
|
1496 |
self.rpc.call_blockdev_assemble.return_value = \
|
1497 |
self.RpcResultsBuilder() \
|
1498 |
.CreateSuccessfulNodeResult(self.node, ("/dev/mocked_path", |
1499 |
"/var/run/ganeti/instance-disks/mocked_d"))
|
1500 |
self.rpc.call_blockdev_remove.return_value = \
|
1501 |
self.RpcResultsBuilder() \
|
1502 |
.CreateSuccessfulNodeResult(self.master, "") |
1503 |
|
1504 |
def ImportStart(node_uuid, opt, inst, component, args): |
1505 |
return self.RpcResultsBuilder() \ |
1506 |
.CreateSuccessfulNodeResult(node_uuid, |
1507 |
"deamon_on_%s" % node_uuid)
|
1508 |
self.rpc.call_import_start.side_effect = ImportStart
|
1509 |
|
1510 |
def ImpExpStatus(node_uuid, name): |
1511 |
return self.RpcResultsBuilder() \ |
1512 |
.CreateSuccessfulNodeResult(node_uuid, |
1513 |
[objects.ImportExportStatus( |
1514 |
exit_status=0
|
1515 |
)]) |
1516 |
self.rpc.call_impexp_status.side_effect = ImpExpStatus
|
1517 |
|
1518 |
def ImpExpCleanup(node_uuid, name): |
1519 |
return self.RpcResultsBuilder() \ |
1520 |
.CreateSuccessfulNodeResult(node_uuid) |
1521 |
self.rpc.call_impexp_cleanup.side_effect = ImpExpCleanup
|
1522 |
|
1523 |
def testMissingInstance(self): |
1524 |
op = opcodes.OpInstanceMove(instance_name="missing.inst",
|
1525 |
target_node=self.node.name)
|
1526 |
self.ExecOpCodeExpectOpPrereqError(op, "Instance 'missing.inst' not known") |
1527 |
|
1528 |
def testUncopyableDiskTemplate(self): |
1529 |
inst = self.cfg.AddNewInstance(disk_template=constants.DT_SHARED_FILE)
|
1530 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1531 |
target_node=self.node.name)
|
1532 |
self.ExecOpCodeExpectOpPrereqError(
|
1533 |
op, "Disk template sharedfile not suitable for copying")
|
1534 |
|
1535 |
def testAlreadyOnTargetNode(self): |
1536 |
inst = self.cfg.AddNewInstance()
|
1537 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1538 |
target_node=self.master.name)
|
1539 |
self.ExecOpCodeExpectOpPrereqError(
|
1540 |
op, "Instance .* is already on the node .*")
|
1541 |
|
1542 |
def testMoveStoppedInstance(self): |
1543 |
inst = self.cfg.AddNewInstance()
|
1544 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1545 |
target_node=self.node.name)
|
1546 |
self.ExecOpCode(op)
|
1547 |
|
1548 |
def testMoveRunningInstance(self): |
1549 |
self.rpc.call_node_info.return_value = \
|
1550 |
self.RpcResultsBuilder() \
|
1551 |
.AddSuccessfulNode(self.node,
|
1552 |
(NotImplemented, NotImplemented, |
1553 |
({"memory_free": 10000}, ))) \ |
1554 |
.Build() |
1555 |
self.rpc.call_instance_start.return_value = \
|
1556 |
self.RpcResultsBuilder() \
|
1557 |
.CreateSuccessfulNodeResult(self.node, "") |
1558 |
|
1559 |
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
|
1560 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1561 |
target_node=self.node.name)
|
1562 |
self.ExecOpCode(op)
|
1563 |
|
1564 |
def testMoveFailingStartInstance(self): |
1565 |
self.rpc.call_node_info.return_value = \
|
1566 |
self.RpcResultsBuilder() \
|
1567 |
.AddSuccessfulNode(self.node,
|
1568 |
(NotImplemented, NotImplemented, |
1569 |
({"memory_free": 10000}, ))) \ |
1570 |
.Build() |
1571 |
self.rpc.call_instance_start.return_value = \
|
1572 |
self.RpcResultsBuilder() \
|
1573 |
.CreateFailedNodeResult(self.node)
|
1574 |
|
1575 |
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
|
1576 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1577 |
target_node=self.node.name)
|
1578 |
self.ExecOpCodeExpectOpExecError(
|
1579 |
op, "Could not start instance .* on node .*")
|
1580 |
|
1581 |
def testMoveFailingImpExpDaemonExitCode(self): |
1582 |
inst = self.cfg.AddNewInstance()
|
1583 |
self.rpc.call_impexp_status.side_effect = None |
1584 |
self.rpc.call_impexp_status.return_value = \
|
1585 |
self.RpcResultsBuilder() \
|
1586 |
.CreateSuccessfulNodeResult(self.node,
|
1587 |
[objects.ImportExportStatus( |
1588 |
exit_status=1,
|
1589 |
recent_output=["mock output"]
|
1590 |
)]) |
1591 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1592 |
target_node=self.node.name)
|
1593 |
self.ExecOpCodeExpectOpExecError(op, "Errors during disk copy") |
1594 |
|
1595 |
def testMoveFailingStartImpExpDaemon(self): |
1596 |
inst = self.cfg.AddNewInstance()
|
1597 |
self.rpc.call_import_start.side_effect = None |
1598 |
self.rpc.call_import_start.return_value = \
|
1599 |
self.RpcResultsBuilder() \
|
1600 |
.CreateFailedNodeResult(self.node)
|
1601 |
op = opcodes.OpInstanceMove(instance_name=inst.name, |
1602 |
target_node=self.node.name)
|
1603 |
self.ExecOpCodeExpectOpExecError(op, "Errors during disk copy") |
1604 |
|
1605 |
|
1606 |
class TestLUInstanceRename(CmdlibTestCase): |
1607 |
def setUp(self): |
1608 |
super(TestLUInstanceRename, self).setUp() |
1609 |
|
1610 |
self.inst = self.cfg.AddNewInstance() |
1611 |
|
1612 |
self.op = opcodes.OpInstanceRename(instance_name=self.inst.name, |
1613 |
new_name="new_name.example.com")
|
1614 |
|
1615 |
def testIpCheckWithoutNameCheck(self): |
1616 |
op = self.CopyOpCode(self.op, |
1617 |
ip_check=True,
|
1618 |
name_check=False)
|
1619 |
self.ExecOpCodeExpectOpPrereqError(
|
1620 |
op, "IP address check requires a name check")
|
1621 |
|
1622 |
def testIpAlreadyInUse(self): |
1623 |
self.netutils_mod.TcpPing.return_value = True |
1624 |
op = self.CopyOpCode(self.op) |
1625 |
self.ExecOpCodeExpectOpPrereqError(
|
1626 |
op, "IP .* of instance .* already in use")
|
1627 |
|
1628 |
def testExistingInstanceName(self): |
1629 |
self.cfg.AddNewInstance(name="new_name.example.com") |
1630 |
op = self.CopyOpCode(self.op) |
1631 |
self.ExecOpCodeExpectOpPrereqError(
|
1632 |
op, "Instance .* is already in the cluster")
|
1633 |
|
1634 |
def testFileInstance(self): |
1635 |
self.rpc.call_blockdev_assemble.return_value = \
|
1636 |
self.RpcResultsBuilder() \
|
1637 |
.CreateSuccessfulNodeResult(self.master, (None, None)) |
1638 |
self.rpc.call_blockdev_shutdown.return_value = \
|
1639 |
self.RpcResultsBuilder() \
|
1640 |
.CreateSuccessfulNodeResult(self.master, (None, None)) |
1641 |
|
1642 |
inst = self.cfg.AddNewInstance(disk_template=constants.DT_FILE)
|
1643 |
op = self.CopyOpCode(self.op, |
1644 |
instance_name=inst.name) |
1645 |
self.ExecOpCode(op)
|
1646 |
|
1647 |
|
1648 |
class TestLUInstanceMultiAlloc(CmdlibTestCase): |
1649 |
def setUp(self): |
1650 |
super(TestLUInstanceMultiAlloc, self).setUp() |
1651 |
|
1652 |
self.inst_op = opcodes.OpInstanceCreate(instance_name="inst.example.com", |
1653 |
disk_template=constants.DT_DRBD8, |
1654 |
disks=[], |
1655 |
nics=[], |
1656 |
os_type="mock_os",
|
1657 |
hypervisor=constants.HT_XEN_HVM, |
1658 |
mode=constants.INSTANCE_CREATE) |
1659 |
|
1660 |
def testInstanceWithIAllocator(self): |
1661 |
inst = self.CopyOpCode(self.inst_op, |
1662 |
iallocator="mock")
|
1663 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst]) |
1664 |
self.ExecOpCodeExpectOpPrereqError(
|
1665 |
op, "iallocator are not allowed to be set on instance objects")
|
1666 |
|
1667 |
def testOnlySomeNodesGiven(self): |
1668 |
inst1 = self.CopyOpCode(self.inst_op, |
1669 |
pnode=self.master.name)
|
1670 |
inst2 = self.CopyOpCode(self.inst_op) |
1671 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst1, inst2]) |
1672 |
self.ExecOpCodeExpectOpPrereqError(
|
1673 |
op, "There are instance objects providing pnode/snode while others"
|
1674 |
" do not")
|
1675 |
|
1676 |
def testMissingIAllocator(self): |
1677 |
self.cluster.default_iallocator = None |
1678 |
inst = self.CopyOpCode(self.inst_op) |
1679 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst]) |
1680 |
self.ExecOpCodeExpectOpPrereqError(
|
1681 |
op, "No iallocator or nodes on the instances given and no cluster-wide"
|
1682 |
" default iallocator found")
|
1683 |
|
1684 |
def testDuplicateInstanceNames(self): |
1685 |
inst1 = self.CopyOpCode(self.inst_op) |
1686 |
inst2 = self.CopyOpCode(self.inst_op) |
1687 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst1, inst2]) |
1688 |
self.ExecOpCodeExpectOpPrereqError(
|
1689 |
op, "There are duplicate instance names")
|
1690 |
|
1691 |
def testWithGivenNodes(self): |
1692 |
snode = self.cfg.AddNewNode()
|
1693 |
inst = self.CopyOpCode(self.inst_op, |
1694 |
pnode=self.master.name,
|
1695 |
snode=snode.name) |
1696 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst]) |
1697 |
self.ExecOpCode(op)
|
1698 |
|
1699 |
def testDryRun(self): |
1700 |
snode = self.cfg.AddNewNode()
|
1701 |
inst = self.CopyOpCode(self.inst_op, |
1702 |
pnode=self.master.name,
|
1703 |
snode=snode.name) |
1704 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst], |
1705 |
dry_run=True)
|
1706 |
self.ExecOpCode(op)
|
1707 |
|
1708 |
def testWithIAllocator(self): |
1709 |
snode = self.cfg.AddNewNode()
|
1710 |
self.iallocator_cls.return_value.result = \
|
1711 |
([("inst.example.com", [self.master.name, snode.name])], []) |
1712 |
|
1713 |
inst = self.CopyOpCode(self.inst_op) |
1714 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst], |
1715 |
iallocator="mock_ialloc")
|
1716 |
self.ExecOpCode(op)
|
1717 |
|
1718 |
def testWithIAllocatorOpportunisticLocking(self): |
1719 |
snode = self.cfg.AddNewNode()
|
1720 |
self.iallocator_cls.return_value.result = \
|
1721 |
([("inst.example.com", [self.master.name, snode.name])], []) |
1722 |
|
1723 |
inst = self.CopyOpCode(self.inst_op) |
1724 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst], |
1725 |
iallocator="mock_ialloc",
|
1726 |
opportunistic_locking=True)
|
1727 |
self.ExecOpCode(op)
|
1728 |
|
1729 |
def testFailingIAllocator(self): |
1730 |
self.iallocator_cls.return_value.success = False |
1731 |
|
1732 |
inst = self.CopyOpCode(self.inst_op) |
1733 |
op = opcodes.OpInstanceMultiAlloc(instances=[inst], |
1734 |
iallocator="mock_ialloc")
|
1735 |
self.ExecOpCodeExpectOpPrereqError(
|
1736 |
op, "Can't compute nodes using iallocator")
|
1737 |
|
1738 |
|
1739 |
class TestLUInstanceSetParams(CmdlibTestCase): |
1740 |
def setUp(self): |
1741 |
super(TestLUInstanceSetParams, self).setUp() |
1742 |
|
1743 |
self.inst = self.cfg.AddNewInstance() |
1744 |
self.op = opcodes.OpInstanceSetParams(instance_name=self.inst.name) |
1745 |
|
1746 |
self.running_inst = \
|
1747 |
self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
|
1748 |
self.running_op = \
|
1749 |
opcodes.OpInstanceSetParams(instance_name=self.running_inst.name)
|
1750 |
|
1751 |
self.snode = self.cfg.AddNewNode() |
1752 |
|
1753 |
self.mocked_storage_type = constants.ST_LVM_VG
|
1754 |
self.mocked_storage_free = 10000 |
1755 |
self.mocked_master_cpu_total = 16 |
1756 |
self.mocked_master_memory_free = 2048 |
1757 |
self.mocked_snode_cpu_total = 16 |
1758 |
self.mocked_snode_memory_free = 512 |
1759 |
|
1760 |
self.mocked_running_inst_memory = 1024 |
1761 |
self.mocked_running_inst_vcpus = 8 |
1762 |
self.mocked_running_inst_state = "running" |
1763 |
self.mocked_running_inst_time = 10938474 |
1764 |
|
1765 |
bootid = "mock_bootid"
|
1766 |
storage_info = [ |
1767 |
{ |
1768 |
"type": self.mocked_storage_type, |
1769 |
"storage_free": self.mocked_storage_free |
1770 |
} |
1771 |
] |
1772 |
hv_info_master = { |
1773 |
"cpu_total": self.mocked_master_cpu_total, |
1774 |
"memory_free": self.mocked_master_memory_free |
1775 |
} |
1776 |
hv_info_snode = { |
1777 |
"cpu_total": self.mocked_snode_cpu_total, |
1778 |
"memory_free": self.mocked_snode_memory_free |
1779 |
} |
1780 |
|
1781 |
self.rpc.call_node_info.return_value = \
|
1782 |
self.RpcResultsBuilder() \
|
1783 |
.AddSuccessfulNode(self.master,
|
1784 |
(bootid, storage_info, (hv_info_master, ))) \ |
1785 |
.AddSuccessfulNode(self.snode,
|
1786 |
(bootid, storage_info, (hv_info_snode, ))) \ |
1787 |
.Build() |
1788 |
|
1789 |
def _InstanceInfo(_, instance, __, ___): |
1790 |
if instance == self.inst.name: |
1791 |
return self.RpcResultsBuilder() \ |
1792 |
.CreateSuccessfulNodeResult(self.master, None) |
1793 |
elif instance == self.running_inst.name: |
1794 |
return self.RpcResultsBuilder() \ |
1795 |
.CreateSuccessfulNodeResult( |
1796 |
self.master, {
|
1797 |
"memory": self.mocked_running_inst_memory, |
1798 |
"vcpus": self.mocked_running_inst_vcpus, |
1799 |
"state": self.mocked_running_inst_state, |
1800 |
"time": self.mocked_running_inst_time |
1801 |
}) |
1802 |
else:
|
1803 |
raise AssertionError() |
1804 |
self.rpc.call_instance_info.side_effect = _InstanceInfo
|
1805 |
|
1806 |
self.rpc.call_bridges_exist.return_value = \
|
1807 |
self.RpcResultsBuilder() \
|
1808 |
.CreateSuccessfulNodeResult(self.master, True) |
1809 |
|
1810 |
self.rpc.call_blockdev_getmirrorstatus.side_effect = \
|
1811 |
lambda node, _: self.RpcResultsBuilder() \ |
1812 |
.CreateSuccessfulNodeResult(node, []) |
1813 |
|
1814 |
self.rpc.call_blockdev_shutdown.side_effect = \
|
1815 |
lambda node, _: self.RpcResultsBuilder() \ |
1816 |
.CreateSuccessfulNodeResult(node, []) |
1817 |
|
1818 |
def testNoChanges(self): |
1819 |
op = self.CopyOpCode(self.op) |
1820 |
self.ExecOpCodeExpectOpPrereqError(op, "No changes submitted") |
1821 |
|
1822 |
def testGlobalHvparams(self): |
1823 |
op = self.CopyOpCode(self.op, |
1824 |
hvparams={constants.HV_MIGRATION_PORT: 1234})
|
1825 |
self.ExecOpCodeExpectOpPrereqError(
|
1826 |
op, "hypervisor parameters are global and cannot be customized")
|
1827 |
|
1828 |
def testHvparams(self): |
1829 |
op = self.CopyOpCode(self.op, |
1830 |
hvparams={constants.HV_BOOT_ORDER: "cd"})
|
1831 |
self.ExecOpCode(op)
|
1832 |
|
1833 |
def testDisksAndDiskTemplate(self): |
1834 |
op = self.CopyOpCode(self.op, |
1835 |
disk_template=constants.DT_PLAIN, |
1836 |
disks=[[constants.DDM_ADD, -1, {}]])
|
1837 |
self.ExecOpCodeExpectOpPrereqError(
|
1838 |
op, "Disk template conversion and other disk changes not supported at"
|
1839 |
" the same time")
|
1840 |
|
1841 |
def testDiskTemplateToMirroredNoRemoteNode(self): |
1842 |
op = self.CopyOpCode(self.op, |
1843 |
disk_template=constants.DT_DRBD8) |
1844 |
self.ExecOpCodeExpectOpPrereqError(
|
1845 |
op, "Changing the disk template to a mirrored one requires specifying"
|
1846 |
" a secondary node")
|
1847 |
|
1848 |
def testPrimaryNodeToOldPrimaryNode(self): |
1849 |
op = self.CopyOpCode(self.op, |
1850 |
pnode=self.master.name)
|
1851 |
self.ExecOpCode(op)
|
1852 |
|
1853 |
def testPrimaryNodeChange(self): |
1854 |
node = self.cfg.AddNewNode()
|
1855 |
op = self.CopyOpCode(self.op, |
1856 |
pnode=node.name) |
1857 |
self.ExecOpCode(op)
|
1858 |
|
1859 |
def testPrimaryNodeChangeRunningInstance(self): |
1860 |
node = self.cfg.AddNewNode()
|
1861 |
op = self.CopyOpCode(self.running_op, |
1862 |
pnode=node.name) |
1863 |
self.ExecOpCodeExpectOpPrereqError(op, "Instance is still running") |
1864 |
|
1865 |
def testOsChange(self): |
1866 |
os = self.cfg.CreateOs(supported_variants=[])
|
1867 |
self.rpc.call_os_get.return_value = \
|
1868 |
self.RpcResultsBuilder() \
|
1869 |
.CreateSuccessfulNodeResult(self.master, os)
|
1870 |
op = self.CopyOpCode(self.op, |
1871 |
os_name=os.name) |
1872 |
self.ExecOpCode(op)
|
1873 |
|
1874 |
def testVCpuChange(self): |
1875 |
op = self.CopyOpCode(self.op, |
1876 |
beparams={ |
1877 |
constants.BE_VCPUS: 4
|
1878 |
}) |
1879 |
self.ExecOpCode(op)
|
1880 |
|
1881 |
def testWrongCpuMask(self): |
1882 |
op = self.CopyOpCode(self.op, |
1883 |
beparams={ |
1884 |
constants.BE_VCPUS: 4
|
1885 |
}, |
1886 |
hvparams={ |
1887 |
constants.HV_CPU_MASK: "1,2:3,4"
|
1888 |
}) |
1889 |
self.ExecOpCodeExpectOpPrereqError(
|
1890 |
op, "Number of vCPUs .* does not match the CPU mask .*")
|
1891 |
|
1892 |
def testCorrectCpuMask(self): |
1893 |
op = self.CopyOpCode(self.op, |
1894 |
beparams={ |
1895 |
constants.BE_VCPUS: 4
|
1896 |
}, |
1897 |
hvparams={ |
1898 |
constants.HV_CPU_MASK: "1,2:3,4:all:1,4"
|
1899 |
}) |
1900 |
self.ExecOpCode(op)
|
1901 |
|
1902 |
def testOsParams(self): |
1903 |
op = self.CopyOpCode(self.op, |
1904 |
osparams={ |
1905 |
self.os.supported_parameters[0]: "test_param_val" |
1906 |
}) |
1907 |
self.ExecOpCode(op)
|
1908 |
|
1909 |
def testIncreaseMemoryTooMuch(self): |
1910 |
op = self.CopyOpCode(self.running_op, |
1911 |
beparams={ |
1912 |
constants.BE_MAXMEM: |
1913 |
self.mocked_master_memory_free * 2 |
1914 |
}) |
1915 |
self.ExecOpCodeExpectOpPrereqError(
|
1916 |
op, "This change will prevent the instance from starting")
|
1917 |
|
1918 |
def testIncreaseMemory(self): |
1919 |
op = self.CopyOpCode(self.running_op, |
1920 |
beparams={ |
1921 |
constants.BE_MAXMEM: self.mocked_master_memory_free
|
1922 |
}) |
1923 |
self.ExecOpCode(op)
|
1924 |
|
1925 |
def testIncreaseMemoryTooMuchForSecondary(self): |
1926 |
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP,
|
1927 |
disk_template=constants.DT_DRBD8, |
1928 |
secondary_node=self.snode)
|
1929 |
self.rpc.call_instance_info.side_effect = [
|
1930 |
self.RpcResultsBuilder()
|
1931 |
.CreateSuccessfulNodeResult(self.master,
|
1932 |
{ |
1933 |
"memory":
|
1934 |
self.mocked_snode_memory_free * 2, |
1935 |
"vcpus": self.mocked_running_inst_vcpus, |
1936 |
"state": self.mocked_running_inst_state, |
1937 |
"time": self.mocked_running_inst_time |
1938 |
})] |
1939 |
|
1940 |
op = self.CopyOpCode(self.op, |
1941 |
instance_name=inst.name, |
1942 |
beparams={ |
1943 |
constants.BE_MAXMEM: |
1944 |
self.mocked_snode_memory_free * 2, |
1945 |
constants.BE_AUTO_BALANCE: True
|
1946 |
}) |
1947 |
self.ExecOpCodeExpectOpPrereqError(
|
1948 |
op, "This change will prevent the instance from failover to its"
|
1949 |
" secondary node")
|
1950 |
|
1951 |
def testInvalidRuntimeMemory(self): |
1952 |
op = self.CopyOpCode(self.running_op, |
1953 |
runtime_mem=self.mocked_master_memory_free * 2) |
1954 |
self.ExecOpCodeExpectOpPrereqError(
|
1955 |
op, "Instance .* must have memory between .* and .* of memory")
|
1956 |
|
1957 |
def testIncreaseRuntimeMemory(self): |
1958 |
op = self.CopyOpCode(self.running_op, |
1959 |
runtime_mem=self.mocked_master_memory_free,
|
1960 |
beparams={ |
1961 |
constants.BE_MAXMEM: self.mocked_master_memory_free
|
1962 |
}) |
1963 |
self.ExecOpCode(op)
|
1964 |
|
1965 |
def testAddNicWithPoolIpNoNetwork(self): |
1966 |
op = self.CopyOpCode(self.op, |
1967 |
nics=[(constants.DDM_ADD, -1,
|
1968 |
{ |
1969 |
constants.INIC_IP: constants.NIC_IP_POOL |
1970 |
})]) |
1971 |
self.ExecOpCodeExpectOpPrereqError(
|
1972 |
op, "If ip=pool, parameter network cannot be none")
|
1973 |
|
1974 |
def testAddNicWithPoolIp(self): |
1975 |
net = self.cfg.AddNewNetwork()
|
1976 |
self.cfg.ConnectNetworkToGroup(net, self.group) |
1977 |
op = self.CopyOpCode(self.op, |
1978 |
nics=[(constants.DDM_ADD, -1,
|
1979 |
{ |
1980 |
constants.INIC_IP: constants.NIC_IP_POOL, |
1981 |
constants.INIC_NETWORK: net.name |
1982 |
})]) |
1983 |
self.ExecOpCode(op)
|
1984 |
|
1985 |
def testAddNicWithInvalidIp(self): |
1986 |
op = self.CopyOpCode(self.op, |
1987 |
nics=[(constants.DDM_ADD, -1,
|
1988 |
{ |
1989 |
constants.INIC_IP: "invalid"
|
1990 |
})]) |
1991 |
self.ExecOpCodeExpectOpPrereqError(
|
1992 |
op, "Invalid IP address")
|
1993 |
|
1994 |
def testAddNic(self): |
1995 |
op = self.CopyOpCode(self.op, |
1996 |
nics=[(constants.DDM_ADD, -1, {})])
|
1997 |
self.ExecOpCode(op)
|
1998 |
|
1999 |
def testNoHotplugSupport(self): |
2000 |
op = self.CopyOpCode(self.op, |
2001 |
nics=[(constants.DDM_ADD, -1, {})],
|
2002 |
hotplug=True)
|
2003 |
self.rpc.call_hotplug_supported.return_value = \
|
2004 |
self.RpcResultsBuilder() \
|
2005 |
.CreateFailedNodeResult(self.master)
|
2006 |
self.ExecOpCodeExpectOpPrereqError(op, "Hotplug is not possible") |
2007 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2008 |
|
2009 |
def testHotplugIfPossible(self): |
2010 |
op = self.CopyOpCode(self.op, |
2011 |
nics=[(constants.DDM_ADD, -1, {})],
|
2012 |
hotplug_if_possible=True)
|
2013 |
self.rpc.call_hotplug_supported.return_value = \
|
2014 |
self.RpcResultsBuilder() \
|
2015 |
.CreateFailedNodeResult(self.master)
|
2016 |
self.ExecOpCode(op)
|
2017 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2018 |
self.assertFalse(self.rpc.call_hotplug_device.called) |
2019 |
|
2020 |
def testHotAddNic(self): |
2021 |
op = self.CopyOpCode(self.op, |
2022 |
nics=[(constants.DDM_ADD, -1, {})],
|
2023 |
hotplug=True)
|
2024 |
self.rpc.call_hotplug_supported.return_value = \
|
2025 |
self.RpcResultsBuilder() \
|
2026 |
.CreateSuccessfulNodeResult(self.master)
|
2027 |
self.ExecOpCode(op)
|
2028 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2029 |
self.assertTrue(self.rpc.call_hotplug_device.called) |
2030 |
|
2031 |
def testAddNicWithIp(self): |
2032 |
op = self.CopyOpCode(self.op, |
2033 |
nics=[(constants.DDM_ADD, -1,
|
2034 |
{ |
2035 |
constants.INIC_IP: "2.3.1.4"
|
2036 |
})]) |
2037 |
self.ExecOpCode(op)
|
2038 |
|
2039 |
def testModifyNicRoutedWithoutIp(self): |
2040 |
op = self.CopyOpCode(self.op, |
2041 |
nics=[(constants.DDM_MODIFY, 0,
|
2042 |
{ |
2043 |
constants.INIC_MODE: constants.NIC_MODE_ROUTED |
2044 |
})]) |
2045 |
self.ExecOpCodeExpectOpPrereqError(
|
2046 |
op, "Cannot set the NIC IP address to None on a routed NIC")
|
2047 |
|
2048 |
def testModifyNicSetMac(self): |
2049 |
op = self.CopyOpCode(self.op, |
2050 |
nics=[(constants.DDM_MODIFY, 0,
|
2051 |
{ |
2052 |
constants.INIC_MAC: "0a:12:95:15:bf:75"
|
2053 |
})]) |
2054 |
self.ExecOpCode(op)
|
2055 |
|
2056 |
def testModifyNicWithPoolIpNoNetwork(self): |
2057 |
op = self.CopyOpCode(self.op, |
2058 |
nics=[(constants.DDM_MODIFY, -1,
|
2059 |
{ |
2060 |
constants.INIC_IP: constants.NIC_IP_POOL |
2061 |
})]) |
2062 |
self.ExecOpCodeExpectOpPrereqError(
|
2063 |
op, "ip=pool, but no network found")
|
2064 |
|
2065 |
def testModifyNicSetNet(self): |
2066 |
old_net = self.cfg.AddNewNetwork()
|
2067 |
self.cfg.ConnectNetworkToGroup(old_net, self.group) |
2068 |
inst = self.cfg.AddNewInstance(nics=[
|
2069 |
self.cfg.CreateNic(network=old_net,
|
2070 |
ip="198.51.100.2")])
|
2071 |
|
2072 |
new_net = self.cfg.AddNewNetwork(mac_prefix="be") |
2073 |
self.cfg.ConnectNetworkToGroup(new_net, self.group) |
2074 |
op = self.CopyOpCode(self.op, |
2075 |
instance_name=inst.name, |
2076 |
nics=[(constants.DDM_MODIFY, 0,
|
2077 |
{ |
2078 |
constants.INIC_NETWORK: new_net.name |
2079 |
})]) |
2080 |
self.ExecOpCode(op)
|
2081 |
|
2082 |
def testModifyNicSetLinkWhileConnected(self): |
2083 |
old_net = self.cfg.AddNewNetwork()
|
2084 |
self.cfg.ConnectNetworkToGroup(old_net, self.group) |
2085 |
inst = self.cfg.AddNewInstance(nics=[
|
2086 |
self.cfg.CreateNic(network=old_net)])
|
2087 |
|
2088 |
op = self.CopyOpCode(self.op, |
2089 |
instance_name=inst.name, |
2090 |
nics=[(constants.DDM_MODIFY, 0,
|
2091 |
{ |
2092 |
constants.INIC_LINK: "mock_link"
|
2093 |
})]) |
2094 |
self.ExecOpCodeExpectOpPrereqError(
|
2095 |
op, "Not allowed to change link or mode of a NIC that is connected"
|
2096 |
" to a network")
|
2097 |
|
2098 |
def testModifyNicSetNetAndIp(self): |
2099 |
net = self.cfg.AddNewNetwork(mac_prefix="be", network="123.123.123.0/24") |
2100 |
self.cfg.ConnectNetworkToGroup(net, self.group) |
2101 |
op = self.CopyOpCode(self.op, |
2102 |
nics=[(constants.DDM_MODIFY, 0,
|
2103 |
{ |
2104 |
constants.INIC_NETWORK: net.name, |
2105 |
constants.INIC_IP: "123.123.123.1"
|
2106 |
})]) |
2107 |
self.ExecOpCode(op)
|
2108 |
|
2109 |
def testModifyNic(self): |
2110 |
op = self.CopyOpCode(self.op, |
2111 |
nics=[(constants.DDM_MODIFY, 0, {})])
|
2112 |
self.ExecOpCode(op)
|
2113 |
|
2114 |
def testHotModifyNic(self): |
2115 |
op = self.CopyOpCode(self.op, |
2116 |
nics=[(constants.DDM_MODIFY, 0, {})],
|
2117 |
hotplug=True)
|
2118 |
self.rpc.call_hotplug_supported.return_value = \
|
2119 |
self.RpcResultsBuilder() \
|
2120 |
.CreateSuccessfulNodeResult(self.master)
|
2121 |
self.ExecOpCode(op)
|
2122 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2123 |
self.assertTrue(self.rpc.call_hotplug_device.called) |
2124 |
|
2125 |
def testRemoveLastNic(self): |
2126 |
op = self.CopyOpCode(self.op, |
2127 |
nics=[(constants.DDM_REMOVE, 0, {})])
|
2128 |
self.ExecOpCodeExpectOpPrereqError(
|
2129 |
op, "violates policy")
|
2130 |
|
2131 |
def testRemoveNic(self): |
2132 |
inst = self.cfg.AddNewInstance(nics=[self.cfg.CreateNic(), |
2133 |
self.cfg.CreateNic()])
|
2134 |
op = self.CopyOpCode(self.op, |
2135 |
instance_name=inst.name, |
2136 |
nics=[(constants.DDM_REMOVE, 0, {})])
|
2137 |
self.ExecOpCode(op)
|
2138 |
|
2139 |
def testHotRemoveNic(self): |
2140 |
inst = self.cfg.AddNewInstance(nics=[self.cfg.CreateNic(), |
2141 |
self.cfg.CreateNic()])
|
2142 |
op = self.CopyOpCode(self.op, |
2143 |
instance_name=inst.name, |
2144 |
nics=[(constants.DDM_REMOVE, 0, {})],
|
2145 |
hotplug=True)
|
2146 |
self.rpc.call_hotplug_supported.return_value = \
|
2147 |
self.RpcResultsBuilder() \
|
2148 |
.CreateSuccessfulNodeResult(self.master)
|
2149 |
self.ExecOpCode(op)
|
2150 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2151 |
self.assertTrue(self.rpc.call_hotplug_device.called) |
2152 |
|
2153 |
def testSetOffline(self): |
2154 |
op = self.CopyOpCode(self.op, |
2155 |
offline=True)
|
2156 |
self.ExecOpCode(op)
|
2157 |
|
2158 |
def testUnsetOffline(self): |
2159 |
op = self.CopyOpCode(self.op, |
2160 |
offline=False)
|
2161 |
self.ExecOpCode(op)
|
2162 |
|
2163 |
def testAddDiskInvalidMode(self): |
2164 |
op = self.CopyOpCode(self.op, |
2165 |
disks=[[constants.DDM_ADD, -1,
|
2166 |
{ |
2167 |
constants.IDISK_MODE: "invalid"
|
2168 |
}]]) |
2169 |
self.ExecOpCodeExpectOpPrereqError(
|
2170 |
op, "Invalid disk access mode 'invalid'")
|
2171 |
|
2172 |
def testAddDiskMissingSize(self): |
2173 |
op = self.CopyOpCode(self.op, |
2174 |
disks=[[constants.DDM_ADD, -1, {}]])
|
2175 |
self.ExecOpCodeExpectOpPrereqError(
|
2176 |
op, "Required disk parameter 'size' missing")
|
2177 |
|
2178 |
def testAddDiskInvalidSize(self): |
2179 |
op = self.CopyOpCode(self.op, |
2180 |
disks=[[constants.DDM_ADD, -1,
|
2181 |
{ |
2182 |
constants.IDISK_SIZE: "invalid"
|
2183 |
}]]) |
2184 |
self.ExecOpCodeExpectException(
|
2185 |
op, errors.TypeEnforcementError, "is not a valid size")
|
2186 |
|
2187 |
def testAddDiskRunningInstanceNoWaitForSync(self): |
2188 |
op = self.CopyOpCode(self.running_op, |
2189 |
disks=[[constants.DDM_ADD, -1,
|
2190 |
{ |
2191 |
constants.IDISK_SIZE: 1024
|
2192 |
}]], |
2193 |
wait_for_sync=False)
|
2194 |
self.ExecOpCodeExpectOpPrereqError(
|
2195 |
op, "Can't add a disk to an instance with activated disks"
|
2196 |
" and --no-wait-for-sync given.")
|
2197 |
|
2198 |
def testAddDiskDownInstance(self): |
2199 |
op = self.CopyOpCode(self.op, |
2200 |
disks=[[constants.DDM_ADD, -1,
|
2201 |
{ |
2202 |
constants.IDISK_SIZE: 1024
|
2203 |
}]]) |
2204 |
self.ExecOpCode(op)
|
2205 |
|
2206 |
self.assertTrue(self.rpc.call_blockdev_shutdown.called) |
2207 |
|
2208 |
def testAddDiskRunningInstance(self): |
2209 |
op = self.CopyOpCode(self.running_op, |
2210 |
disks=[[constants.DDM_ADD, -1,
|
2211 |
{ |
2212 |
constants.IDISK_SIZE: 1024
|
2213 |
}]]) |
2214 |
self.ExecOpCode(op)
|
2215 |
|
2216 |
self.assertFalse(self.rpc.call_blockdev_shutdown.called) |
2217 |
|
2218 |
def testAddDiskNoneName(self): |
2219 |
op = self.CopyOpCode(self.op, |
2220 |
disks=[[constants.DDM_ADD, -1,
|
2221 |
{ |
2222 |
constants.IDISK_SIZE: 1024,
|
2223 |
constants.IDISK_NAME: constants.VALUE_NONE |
2224 |
}]]) |
2225 |
self.ExecOpCode(op)
|
2226 |
|
2227 |
def testHotAddDisk(self): |
2228 |
self.rpc.call_blockdev_assemble.return_value = \
|
2229 |
self.RpcResultsBuilder() \
|
2230 |
.CreateSuccessfulNodeResult(self.master, ("/dev/mocked_path", |
2231 |
"/var/run/ganeti/instance-disks/mocked_d"))
|
2232 |
op = self.CopyOpCode(self.op, |
2233 |
disks=[[constants.DDM_ADD, -1,
|
2234 |
{ |
2235 |
constants.IDISK_SIZE: 1024,
|
2236 |
}]], |
2237 |
hotplug=True)
|
2238 |
self.rpc.call_hotplug_supported.return_value = \
|
2239 |
self.RpcResultsBuilder() \
|
2240 |
.CreateSuccessfulNodeResult(self.master)
|
2241 |
self.ExecOpCode(op)
|
2242 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2243 |
self.assertTrue(self.rpc.call_blockdev_create.called) |
2244 |
self.assertTrue(self.rpc.call_blockdev_assemble.called) |
2245 |
self.assertTrue(self.rpc.call_hotplug_device.called) |
2246 |
|
2247 |
def testHotRemoveDisk(self): |
2248 |
inst = self.cfg.AddNewInstance(disks=[self.cfg.CreateDisk(), |
2249 |
self.cfg.CreateDisk()])
|
2250 |
op = self.CopyOpCode(self.op, |
2251 |
instance_name=inst.name, |
2252 |
disks=[[constants.DDM_REMOVE, -1,
|
2253 |
{}]], |
2254 |
hotplug=True)
|
2255 |
self.rpc.call_hotplug_supported.return_value = \
|
2256 |
self.RpcResultsBuilder() \
|
2257 |
.CreateSuccessfulNodeResult(self.master)
|
2258 |
self.ExecOpCode(op)
|
2259 |
self.assertTrue(self.rpc.call_hotplug_supported.called) |
2260 |
self.assertTrue(self.rpc.call_hotplug_device.called) |
2261 |
self.assertTrue(self.rpc.call_blockdev_shutdown.called) |
2262 |
self.assertTrue(self.rpc.call_blockdev_remove.called) |
2263 |
|
2264 |
def testModifyDiskWithSize(self): |
2265 |
op = self.CopyOpCode(self.op, |
2266 |
disks=[[constants.DDM_MODIFY, 0,
|
2267 |
{ |
2268 |
constants.IDISK_SIZE: 1024
|
2269 |
}]]) |
2270 |
self.ExecOpCodeExpectOpPrereqError(
|
2271 |
op, "Disk size change not possible, use grow-disk")
|
2272 |
|
2273 |
def testModifyDiskWithRandomParams(self): |
2274 |
op = self.CopyOpCode(self.op, |
2275 |
disks=[[constants.DDM_MODIFY, 0,
|
2276 |
{ |
2277 |
constants.IDISK_METAVG: "new_meta_vg",
|
2278 |
constants.IDISK_MODE: "invalid",
|
2279 |
constants.IDISK_NAME: "new_name"
|
2280 |
}]]) |
2281 |
self.ExecOpCodeExpectException(op, errors.TypeEnforcementError,
|
2282 |
"Unknown parameter 'metavg'")
|
2283 |
|
2284 |
def testModifyDiskUnsetName(self): |
2285 |
op = self.CopyOpCode(self.op, |
2286 |
disks=[[constants.DDM_MODIFY, 0,
|
2287 |
{ |
2288 |
constants.IDISK_NAME: constants.VALUE_NONE |
2289 |
}]]) |
2290 |
self.ExecOpCode(op)
|
2291 |
|
2292 |
def testSetOldDiskTemplate(self): |
2293 |
op = self.CopyOpCode(self.op, |
2294 |
disk_template=self.inst.disk_template)
|
2295 |
self.ExecOpCodeExpectOpPrereqError(
|
2296 |
op, "Instance already has disk template")
|
2297 |
|
2298 |
def testSetDisabledDiskTemplate(self): |
2299 |
self.cfg.SetEnabledDiskTemplates([self.inst.disk_template]) |
2300 |
op = self.CopyOpCode(self.op, |
2301 |
disk_template=constants.DT_EXT) |
2302 |
self.ExecOpCodeExpectOpPrereqError(
|
2303 |
op, "Disk template .* is not enabled for this cluster")
|
2304 |
|
2305 |
def testInvalidDiskTemplateConversion(self): |
2306 |
op = self.CopyOpCode(self.op, |
2307 |
disk_template=constants.DT_EXT) |
2308 |
self.ExecOpCodeExpectOpPrereqError(
|
2309 |
op, "Unsupported disk template conversion from .* to .*")
|
2310 |
|
2311 |
def testConvertToDRBDWithSecondarySameAsPrimary(self): |
2312 |
op = self.CopyOpCode(self.op, |
2313 |
disk_template=constants.DT_DRBD8, |
2314 |
remote_node=self.master.name)
|
2315 |
self.ExecOpCodeExpectOpPrereqError(
|
2316 |
op, "Given new secondary node .* is the same as the primary node"
|
2317 |
" of the instance")
|
2318 |
|
2319 |
def testConvertPlainToDRBD(self): |
2320 |
self.rpc.call_blockdev_shutdown.return_value = \
|
2321 |
self.RpcResultsBuilder() \
|
2322 |
.CreateSuccessfulNodeResult(self.master, True) |
2323 |
self.rpc.call_blockdev_getmirrorstatus.return_value = \
|
2324 |
self.RpcResultsBuilder() \
|
2325 |
.CreateSuccessfulNodeResult(self.master, [objects.BlockDevStatus()])
|
2326 |
|
2327 |
op = self.CopyOpCode(self.op, |
2328 |
disk_template=constants.DT_DRBD8, |
2329 |
remote_node=self.snode.name)
|
2330 |
self.ExecOpCode(op)
|
2331 |
|
2332 |
def testConvertDRBDToPlain(self): |
2333 |
self.inst.disks = [self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, |
2334 |
primary_node=self.master,
|
2335 |
secondary_node=self.snode)]
|
2336 |
self.inst.disk_template = constants.DT_DRBD8
|
2337 |
self.rpc.call_blockdev_shutdown.return_value = \
|
2338 |
self.RpcResultsBuilder() \
|
2339 |
.CreateSuccessfulNodeResult(self.master, True) |
2340 |
self.rpc.call_blockdev_remove.return_value = \
|
2341 |
self.RpcResultsBuilder() \
|
2342 |
.CreateSuccessfulNodeResult(self.master)
|
2343 |
self.rpc.call_blockdev_getmirrorstatus.return_value = \
|
2344 |
self.RpcResultsBuilder() \
|
2345 |
.CreateSuccessfulNodeResult(self.master, [objects.BlockDevStatus()])
|
2346 |
|
2347 |
op = self.CopyOpCode(self.op, |
2348 |
disk_template=constants.DT_PLAIN) |
2349 |
self.ExecOpCode(op)
|
2350 |
|
2351 |
|
2352 |
class TestLUInstanceChangeGroup(CmdlibTestCase): |
2353 |
def setUp(self): |
2354 |
super(TestLUInstanceChangeGroup, self).setUp() |
2355 |
|
2356 |
self.group2 = self.cfg.AddNewNodeGroup() |
2357 |
self.node2 = self.cfg.AddNewNode(group=self.group2) |
2358 |
self.inst = self.cfg.AddNewInstance() |
2359 |
self.op = opcodes.OpInstanceChangeGroup(instance_name=self.inst.name) |
2360 |
|
2361 |
def testTargetGroupIsInstanceGroup(self): |
2362 |
op = self.CopyOpCode(self.op, |
2363 |
target_groups=[self.group.name])
|
2364 |
self.ExecOpCodeExpectOpPrereqError(
|
2365 |
op, "Can't use group\(s\) .* as targets, they are used by the"
|
2366 |
" instance .*")
|
2367 |
|
2368 |
def testNoTargetGroups(self): |
2369 |
inst = self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
2370 |
primary_node=self.master,
|
2371 |
secondary_node=self.node2)
|
2372 |
op = self.CopyOpCode(self.op, |
2373 |
instance_name=inst.name) |
2374 |
self.ExecOpCodeExpectOpPrereqError(
|
2375 |
op, "There are no possible target groups")
|
2376 |
|
2377 |
def testFailingIAllocator(self): |
2378 |
self.iallocator_cls.return_value.success = False |
2379 |
op = self.CopyOpCode(self.op) |
2380 |
|
2381 |
self.ExecOpCodeExpectOpPrereqError(
|
2382 |
op, "Can't compute solution for changing group of instance .*"
|
2383 |
" using iallocator .*")
|
2384 |
|
2385 |
def testChangeGroup(self): |
2386 |
self.iallocator_cls.return_value.success = True |
2387 |
self.iallocator_cls.return_value.result = ([], [], [])
|
2388 |
op = self.CopyOpCode(self.op) |
2389 |
|
2390 |
self.ExecOpCode(op)
|
2391 |
|
2392 |
|
2393 |
if __name__ == "__main__": |
2394 |
testutils.GanetiTestProgram() |