root / test / py / cmdlib / cluster_unittest.py @ 9b9e088c
History | View | Annotate | Download (73.2 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import unittest |
29 |
import operator |
30 |
import os |
31 |
import tempfile |
32 |
import shutil |
33 |
|
34 |
from ganeti import constants |
35 |
from ganeti import errors |
36 |
from ganeti import netutils |
37 |
from ganeti import objects |
38 |
from ganeti import opcodes |
39 |
from ganeti import utils |
40 |
from ganeti import pathutils |
41 |
from ganeti import query |
42 |
from ganeti.cmdlib import cluster |
43 |
from ganeti.hypervisor import hv_xen |
44 |
|
45 |
from testsupport import * |
46 |
|
47 |
import testutils |
48 |
|
49 |
|
50 |
class TestCertVerification(testutils.GanetiTestCase): |
51 |
def setUp(self): |
52 |
testutils.GanetiTestCase.setUp(self)
|
53 |
|
54 |
self.tmpdir = tempfile.mkdtemp()
|
55 |
|
56 |
def tearDown(self): |
57 |
shutil.rmtree(self.tmpdir)
|
58 |
|
59 |
def testVerifyCertificate(self): |
60 |
cluster._VerifyCertificate(testutils.TestDataFilename("cert1.pem"))
|
61 |
|
62 |
nonexist_filename = os.path.join(self.tmpdir, "does-not-exist") |
63 |
|
64 |
(errcode, msg) = cluster._VerifyCertificate(nonexist_filename) |
65 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
66 |
|
67 |
# Try to load non-certificate file
|
68 |
invalid_cert = testutils.TestDataFilename("bdev-net.txt")
|
69 |
(errcode, msg) = cluster._VerifyCertificate(invalid_cert) |
70 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
71 |
|
72 |
|
73 |
class TestClusterVerifySsh(unittest.TestCase): |
74 |
def testMultipleGroups(self): |
75 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
76 |
mygroupnodes = [ |
77 |
objects.Node(name="node20", group="my", offline=False), |
78 |
objects.Node(name="node21", group="my", offline=False), |
79 |
objects.Node(name="node22", group="my", offline=False), |
80 |
objects.Node(name="node23", group="my", offline=False), |
81 |
objects.Node(name="node24", group="my", offline=False), |
82 |
objects.Node(name="node25", group="my", offline=False), |
83 |
objects.Node(name="node26", group="my", offline=True), |
84 |
] |
85 |
nodes = [ |
86 |
objects.Node(name="node1", group="g1", offline=True), |
87 |
objects.Node(name="node2", group="g1", offline=False), |
88 |
objects.Node(name="node3", group="g1", offline=False), |
89 |
objects.Node(name="node4", group="g1", offline=True), |
90 |
objects.Node(name="node5", group="g1", offline=False), |
91 |
objects.Node(name="node10", group="xyz", offline=False), |
92 |
objects.Node(name="node11", group="xyz", offline=False), |
93 |
objects.Node(name="node40", group="alloff", offline=True), |
94 |
objects.Node(name="node41", group="alloff", offline=True), |
95 |
objects.Node(name="node50", group="aaa", offline=False), |
96 |
] + mygroupnodes |
97 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
98 |
|
99 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
100 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
101 |
self.assertEqual(set(perhost.keys()), set(online)) |
102 |
|
103 |
self.assertEqual(perhost, {
|
104 |
"node20": ["node10", "node2", "node50"], |
105 |
"node21": ["node11", "node3", "node50"], |
106 |
"node22": ["node10", "node5", "node50"], |
107 |
"node23": ["node11", "node2", "node50"], |
108 |
"node24": ["node10", "node3", "node50"], |
109 |
"node25": ["node11", "node5", "node50"], |
110 |
}) |
111 |
|
112 |
def testSingleGroup(self): |
113 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
114 |
nodes = [ |
115 |
objects.Node(name="node1", group="default", offline=True), |
116 |
objects.Node(name="node2", group="default", offline=False), |
117 |
objects.Node(name="node3", group="default", offline=False), |
118 |
objects.Node(name="node4", group="default", offline=True), |
119 |
] |
120 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
121 |
|
122 |
(online, perhost) = fn(nodes, "default", nodes)
|
123 |
self.assertEqual(online, ["node2", "node3"]) |
124 |
self.assertEqual(set(perhost.keys()), set(online)) |
125 |
|
126 |
self.assertEqual(perhost, {
|
127 |
"node2": [],
|
128 |
"node3": [],
|
129 |
}) |
130 |
|
131 |
|
132 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
133 |
def testSuccess(self): |
134 |
op = opcodes.OpClusterActivateMasterIp() |
135 |
|
136 |
self.rpc.call_node_activate_master_ip.return_value = \
|
137 |
self.RpcResultsBuilder() \
|
138 |
.CreateSuccessfulNodeResult(self.master)
|
139 |
|
140 |
self.ExecOpCode(op)
|
141 |
|
142 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
143 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
144 |
|
145 |
def testFailure(self): |
146 |
op = opcodes.OpClusterActivateMasterIp() |
147 |
|
148 |
self.rpc.call_node_activate_master_ip.return_value = \
|
149 |
self.RpcResultsBuilder() \
|
150 |
.CreateFailedNodeResult(self.master) \
|
151 |
|
152 |
self.ExecOpCodeExpectOpExecError(op)
|
153 |
|
154 |
|
155 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
156 |
def testSuccess(self): |
157 |
op = opcodes.OpClusterDeactivateMasterIp() |
158 |
|
159 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
160 |
self.RpcResultsBuilder() \
|
161 |
.CreateSuccessfulNodeResult(self.master)
|
162 |
|
163 |
self.ExecOpCode(op)
|
164 |
|
165 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
166 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
167 |
|
168 |
def testFailure(self): |
169 |
op = opcodes.OpClusterDeactivateMasterIp() |
170 |
|
171 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
172 |
self.RpcResultsBuilder() \
|
173 |
.CreateFailedNodeResult(self.master) \
|
174 |
|
175 |
self.ExecOpCodeExpectOpExecError(op)
|
176 |
|
177 |
|
178 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
179 |
def testInvalidField(self): |
180 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
181 |
|
182 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
183 |
|
184 |
def testAllFields(self): |
185 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
186 |
|
187 |
self.rpc.call_get_watcher_pause.return_value = \
|
188 |
self.RpcResultsBuilder() \
|
189 |
.CreateSuccessfulNodeResult(self.master, -1) |
190 |
|
191 |
ret = self.ExecOpCode(op)
|
192 |
|
193 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
194 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
195 |
|
196 |
def testEmpytFields(self): |
197 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
198 |
|
199 |
self.ExecOpCode(op)
|
200 |
|
201 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
202 |
|
203 |
|
204 |
class TestLUClusterDestroy(CmdlibTestCase): |
205 |
def testExistingNodes(self): |
206 |
op = opcodes.OpClusterDestroy() |
207 |
|
208 |
self.cfg.AddNewNode()
|
209 |
self.cfg.AddNewNode()
|
210 |
|
211 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
212 |
|
213 |
def testExistingInstances(self): |
214 |
op = opcodes.OpClusterDestroy() |
215 |
|
216 |
self.cfg.AddNewInstance()
|
217 |
self.cfg.AddNewInstance()
|
218 |
|
219 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
220 |
|
221 |
def testEmptyCluster(self): |
222 |
op = opcodes.OpClusterDestroy() |
223 |
|
224 |
self.ExecOpCode(op)
|
225 |
|
226 |
self.assertSingleHooksCall([self.master.name], |
227 |
"cluster-destroy",
|
228 |
constants.HOOKS_PHASE_POST) |
229 |
|
230 |
|
231 |
class TestLUClusterPostInit(CmdlibTestCase): |
232 |
def testExecuion(self): |
233 |
op = opcodes.OpClusterPostInit() |
234 |
|
235 |
self.ExecOpCode(op)
|
236 |
|
237 |
self.assertSingleHooksCall([self.master.name], |
238 |
"cluster-init",
|
239 |
constants.HOOKS_PHASE_POST) |
240 |
|
241 |
|
242 |
class TestLUClusterQuery(CmdlibTestCase): |
243 |
def testSimpleInvocation(self): |
244 |
op = opcodes.OpClusterQuery() |
245 |
|
246 |
self.ExecOpCode(op)
|
247 |
|
248 |
def testIPv6Cluster(self): |
249 |
op = opcodes.OpClusterQuery() |
250 |
|
251 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
252 |
|
253 |
self.ExecOpCode(op)
|
254 |
|
255 |
|
256 |
class TestLUClusterRedistConf(CmdlibTestCase): |
257 |
def testSimpleInvocation(self): |
258 |
op = opcodes.OpClusterRedistConf() |
259 |
|
260 |
self.ExecOpCode(op)
|
261 |
|
262 |
|
263 |
class TestLUClusterRename(CmdlibTestCase): |
264 |
NEW_NAME = "new-name.example.com"
|
265 |
NEW_IP = "203.0.113.100"
|
266 |
|
267 |
def testNoChanges(self): |
268 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
269 |
|
270 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
271 |
|
272 |
def testReachableIp(self): |
273 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
274 |
|
275 |
self.netutils_mod.GetHostname.return_value = \
|
276 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
277 |
self.netutils_mod.TcpPing.return_value = True |
278 |
|
279 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
280 |
|
281 |
def testValidRename(self): |
282 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
283 |
|
284 |
self.netutils_mod.GetHostname.return_value = \
|
285 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
286 |
|
287 |
self.ExecOpCode(op)
|
288 |
|
289 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
290 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
291 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
292 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
293 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
294 |
|
295 |
def testRenameOfflineMaster(self): |
296 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
297 |
|
298 |
self.master.offline = True |
299 |
self.netutils_mod.GetHostname.return_value = \
|
300 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
301 |
|
302 |
self.ExecOpCode(op)
|
303 |
|
304 |
|
305 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
306 |
def testNoInstances(self): |
307 |
op = opcodes.OpClusterRepairDiskSizes() |
308 |
|
309 |
self.ExecOpCode(op)
|
310 |
|
311 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN): |
312 |
pnode = self.master
|
313 |
snode = self.cfg.AddNewNode()
|
314 |
|
315 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
316 |
primary_node=pnode, |
317 |
secondary_node=snode) |
318 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
319 |
|
320 |
return (inst, disk)
|
321 |
|
322 |
def testSingleInstanceOnFailingNode(self): |
323 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
324 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
325 |
|
326 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
327 |
self.RpcResultsBuilder() \
|
328 |
.CreateFailedNodeResult(self.master)
|
329 |
|
330 |
self.ExecOpCode(op)
|
331 |
|
332 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
333 |
|
334 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
335 |
# not specifying instances repairs all
|
336 |
op = opcodes.OpClusterRepairDiskSizes() |
337 |
|
338 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
339 |
self.RpcResultsBuilder() \
|
340 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
341 |
|
342 |
return self.ExecOpCode(op) |
343 |
|
344 |
def testInvalidResultData(self): |
345 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
346 |
self.ResetMocks()
|
347 |
|
348 |
self._SetUpInstanceSingleDisk()
|
349 |
self._ExecOpClusterRepairDiskSizes(data)
|
350 |
|
351 |
self.mcpu.assertLogContainsRegex("ignoring") |
352 |
|
353 |
def testCorrectSize(self): |
354 |
self._SetUpInstanceSingleDisk()
|
355 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
356 |
self.mcpu.assertLogIsEmpty()
|
357 |
self.assertEqual(0, len(changed)) |
358 |
|
359 |
def testWrongSize(self): |
360 |
self._SetUpInstanceSingleDisk()
|
361 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
362 |
self.assertEqual(1, len(changed)) |
363 |
|
364 |
def testCorrectDRBD(self): |
365 |
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
366 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
367 |
self.mcpu.assertLogIsEmpty()
|
368 |
self.assertEqual(0, len(changed)) |
369 |
|
370 |
def testWrongDRBDChild(self): |
371 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
372 |
disk.children[0].size = 512 |
373 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
374 |
self.assertEqual(1, len(changed)) |
375 |
|
376 |
def testExclusiveStorageInvalidResultData(self): |
377 |
self._SetUpInstanceSingleDisk()
|
378 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
379 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
380 |
|
381 |
self.mcpu.assertLogContainsRegex(
|
382 |
"did not return valid spindles information")
|
383 |
|
384 |
def testExclusiveStorageCorrectSpindles(self): |
385 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
386 |
disk.spindles = 1
|
387 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
388 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
389 |
self.assertEqual(0, len(changed)) |
390 |
|
391 |
def testExclusiveStorageWrongSpindles(self): |
392 |
self._SetUpInstanceSingleDisk()
|
393 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
394 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
395 |
self.assertEqual(1, len(changed)) |
396 |
|
397 |
|
398 |
class TestLUClusterSetParams(CmdlibTestCase): |
399 |
UID_POOL = [(10, 1000)] |
400 |
|
401 |
def testUidPool(self): |
402 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
403 |
self.ExecOpCode(op)
|
404 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
405 |
|
406 |
def testAddUids(self): |
407 |
old_pool = [(1, 9)] |
408 |
self.cluster.uid_pool = list(old_pool) |
409 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
410 |
self.ExecOpCode(op)
|
411 |
self.assertEqual(set(self.UID_POOL + old_pool), |
412 |
set(self.cluster.uid_pool)) |
413 |
|
414 |
def testRemoveUids(self): |
415 |
additional_pool = [(1, 9)] |
416 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
417 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
418 |
self.ExecOpCode(op)
|
419 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
420 |
|
421 |
def testMasterNetmask(self): |
422 |
op = opcodes.OpClusterSetParams(master_netmask=26)
|
423 |
self.ExecOpCode(op)
|
424 |
self.assertEqual(26, self.cluster.master_netmask) |
425 |
|
426 |
def testInvalidDiskparams(self): |
427 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
428 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}},
|
429 |
{constants.DT_DRBD8: {constants.RBD_ACCESS: "bunny"}}]:
|
430 |
self.ResetMocks()
|
431 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
432 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
433 |
|
434 |
def testValidDiskparams(self): |
435 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
|
436 |
constants.RBD_ACCESS: "kernelspace"}}
|
437 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
438 |
self.ExecOpCode(op)
|
439 |
self.assertEqual(diskparams[constants.DT_RBD],
|
440 |
self.cluster.diskparams[constants.DT_RBD])
|
441 |
|
442 |
def testMinimalDiskparams(self): |
443 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
444 |
self.cluster.diskparams = {}
|
445 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
446 |
self.ExecOpCode(op)
|
447 |
self.assertEqual(diskparams, self.cluster.diskparams) |
448 |
|
449 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
450 |
self.cfg.AddNewInstance(disks=[
|
451 |
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)]) |
452 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
453 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
454 |
|
455 |
def testFileStorageDir(self): |
456 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
457 |
self.ExecOpCode(op)
|
458 |
|
459 |
def testSetFileStorageDirToCurrentValue(self): |
460 |
op = opcodes.OpClusterSetParams( |
461 |
file_storage_dir=self.cluster.file_storage_dir)
|
462 |
self.ExecOpCode(op)
|
463 |
|
464 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
465 |
|
466 |
def testValidDrbdHelper(self): |
467 |
node1 = self.cfg.AddNewNode()
|
468 |
node1.offline = True
|
469 |
self.rpc.call_drbd_helper.return_value = \
|
470 |
self.RpcResultsBuilder() \
|
471 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
472 |
.AddOfflineNode(node1) \ |
473 |
.Build() |
474 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
475 |
self.ExecOpCode(op)
|
476 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
477 |
|
478 |
def testDrbdHelperFailingNode(self): |
479 |
self.rpc.call_drbd_helper.return_value = \
|
480 |
self.RpcResultsBuilder() \
|
481 |
.AddFailedNode(self.master) \
|
482 |
.Build() |
483 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
484 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
485 |
|
486 |
def testInvalidDrbdHelper(self): |
487 |
self.rpc.call_drbd_helper.return_value = \
|
488 |
self.RpcResultsBuilder() \
|
489 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
490 |
.Build() |
491 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
492 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
493 |
|
494 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
495 |
drbd_helper = "/bin/random_helper"
|
496 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
497 |
self.rpc.call_drbd_helper.return_value = \
|
498 |
self.RpcResultsBuilder() \
|
499 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
500 |
.Build() |
501 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
502 |
self.ExecOpCode(op)
|
503 |
|
504 |
self.mcpu.assertLogContainsRegex("but did not enable") |
505 |
|
506 |
def testResetDrbdHelperDrbdDisabled(self): |
507 |
drbd_helper = ""
|
508 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
509 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
510 |
self.ExecOpCode(op)
|
511 |
|
512 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
513 |
|
514 |
def testResetDrbdHelperDrbdEnabled(self): |
515 |
drbd_helper = ""
|
516 |
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
|
517 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
518 |
self.ExecOpCodeExpectOpPrereqError(
|
519 |
op, "Cannot disable drbd helper while DRBD is enabled.")
|
520 |
|
521 |
def testEnableDrbdNoHelper(self): |
522 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
523 |
self.cluster.drbd_usermode_helper = None |
524 |
enabled_disk_templates = [constants.DT_DRBD8] |
525 |
op = opcodes.OpClusterSetParams( |
526 |
enabled_disk_templates=enabled_disk_templates) |
527 |
self.ExecOpCodeExpectOpPrereqError(
|
528 |
op, "Cannot enable DRBD without a DRBD usermode helper set")
|
529 |
|
530 |
def testEnableDrbdHelperSet(self): |
531 |
drbd_helper = "/bin/random_helper"
|
532 |
self.rpc.call_drbd_helper.return_value = \
|
533 |
self.RpcResultsBuilder() \
|
534 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
535 |
.Build() |
536 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
537 |
self.cluster.drbd_usermode_helper = drbd_helper
|
538 |
enabled_disk_templates = [constants.DT_DRBD8] |
539 |
op = opcodes.OpClusterSetParams( |
540 |
enabled_disk_templates=enabled_disk_templates, |
541 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
542 |
self.ExecOpCode(op)
|
543 |
|
544 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
545 |
|
546 |
def testDrbdHelperAlreadySet(self): |
547 |
drbd_helper = "/bin/true"
|
548 |
self.rpc.call_drbd_helper.return_value = \
|
549 |
self.RpcResultsBuilder() \
|
550 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
551 |
.Build() |
552 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
553 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
554 |
self.ExecOpCode(op)
|
555 |
|
556 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
557 |
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state") |
558 |
|
559 |
def testSetDrbdHelper(self): |
560 |
drbd_helper = "/bin/true"
|
561 |
self.rpc.call_drbd_helper.return_value = \
|
562 |
self.RpcResultsBuilder() \
|
563 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
564 |
.Build() |
565 |
self.cluster.drbd_usermode_helper = "/bin/false" |
566 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
|
567 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
568 |
self.ExecOpCode(op)
|
569 |
|
570 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
571 |
|
572 |
def testBeparams(self): |
573 |
beparams = {constants.BE_VCPUS: 32}
|
574 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
575 |
self.ExecOpCode(op)
|
576 |
self.assertEqual(32, self.cluster |
577 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
578 |
|
579 |
def testNdparams(self): |
580 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
581 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
582 |
self.ExecOpCode(op)
|
583 |
self.assertEqual(True, self.cluster |
584 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
585 |
|
586 |
def testNdparamsResetOobProgram(self): |
587 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
588 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
589 |
self.ExecOpCode(op)
|
590 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
591 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
592 |
|
593 |
def testHvState(self): |
594 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
595 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
596 |
self.ExecOpCode(op)
|
597 |
self.assertEqual(8, self.cluster.hv_state_static |
598 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
599 |
|
600 |
def testDiskState(self): |
601 |
disk_state = { |
602 |
constants.DT_PLAIN: { |
603 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
604 |
} |
605 |
} |
606 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
607 |
self.ExecOpCode(op)
|
608 |
self.assertEqual(10, self.cluster |
609 |
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
|
610 |
[constants.DS_DISK_TOTAL]) |
611 |
|
612 |
def testDefaultIPolicy(self): |
613 |
ipolicy = constants.IPOLICY_DEFAULTS |
614 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
615 |
self.ExecOpCode(op)
|
616 |
|
617 |
def testIPolicyNewViolation(self): |
618 |
import ganeti.constants as C |
619 |
ipolicy = C.IPOLICY_DEFAULTS |
620 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
621 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
622 |
|
623 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
624 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
625 |
self.ExecOpCode(op)
|
626 |
|
627 |
self.mcpu.assertLogContainsRegex("instances violate them") |
628 |
|
629 |
def testNicparamsNoInstance(self): |
630 |
nicparams = { |
631 |
constants.NIC_LINK: "mock_bridge"
|
632 |
} |
633 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
634 |
self.ExecOpCode(op)
|
635 |
|
636 |
self.assertEqual("mock_bridge", |
637 |
self.cluster.nicparams
|
638 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
639 |
|
640 |
def testNicparamsInvalidConf(self): |
641 |
nicparams = { |
642 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
643 |
constants.NIC_LINK: ""
|
644 |
} |
645 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
646 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
647 |
|
648 |
def testNicparamsInvalidInstanceConf(self): |
649 |
nicparams = { |
650 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
651 |
constants.NIC_LINK: "mock_bridge"
|
652 |
} |
653 |
self.cfg.AddNewInstance(nics=[
|
654 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
655 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
656 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
657 |
|
658 |
def testNicparamsMissingIp(self): |
659 |
nicparams = { |
660 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
661 |
} |
662 |
self.cfg.AddNewInstance()
|
663 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
664 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
665 |
|
666 |
def testNicparamsWithInstance(self): |
667 |
nicparams = { |
668 |
constants.NIC_LINK: "mock_bridge"
|
669 |
} |
670 |
self.cfg.AddNewInstance()
|
671 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
672 |
self.ExecOpCode(op)
|
673 |
|
674 |
def testDefaultHvparams(self): |
675 |
hvparams = constants.HVC_DEFAULTS |
676 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
677 |
self.ExecOpCode(op)
|
678 |
|
679 |
self.assertEqual(hvparams, self.cluster.hvparams) |
680 |
|
681 |
def testMinimalHvparams(self): |
682 |
hvparams = { |
683 |
constants.HT_FAKE: { |
684 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
685 |
} |
686 |
} |
687 |
self.cluster.hvparams = {}
|
688 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
689 |
self.ExecOpCode(op)
|
690 |
|
691 |
self.assertEqual(hvparams, self.cluster.hvparams) |
692 |
|
693 |
def testOsHvp(self): |
694 |
os_hvp = { |
695 |
"mocked_os": {
|
696 |
constants.HT_FAKE: { |
697 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
698 |
} |
699 |
}, |
700 |
"other_os": constants.HVC_DEFAULTS
|
701 |
} |
702 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
703 |
self.ExecOpCode(op)
|
704 |
|
705 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
706 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
707 |
[constants.HV_MIGRATION_MODE]) |
708 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
709 |
|
710 |
def testRemoveOsHvp(self): |
711 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
712 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
713 |
self.ExecOpCode(op)
|
714 |
|
715 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
716 |
|
717 |
def testDefaultOsHvp(self): |
718 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
719 |
self.cluster.os_hvp = {"mocked_os": {}} |
720 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
721 |
self.ExecOpCode(op)
|
722 |
|
723 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
724 |
|
725 |
def testOsparams(self): |
726 |
osparams = { |
727 |
"mocked_os": {
|
728 |
"param1": "value1", |
729 |
"param2": None |
730 |
}, |
731 |
"other_os": {
|
732 |
"param1": None |
733 |
} |
734 |
} |
735 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
736 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
737 |
self.ExecOpCode(op)
|
738 |
|
739 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
740 |
|
741 |
def testEnabledHypervisors(self): |
742 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
743 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
744 |
self.ExecOpCode(op)
|
745 |
|
746 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
747 |
|
748 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
749 |
enabled_hypervisors = [constants.HT_FAKE] |
750 |
self.cluster.hvparams = {}
|
751 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
752 |
self.ExecOpCode(op)
|
753 |
|
754 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
755 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
756 |
self.cluster.hvparams[constants.HT_FAKE])
|
757 |
|
758 |
@testutils.patch_object(utils, "FindFile") |
759 |
def testValidDefaultIallocator(self, find_file_mock): |
760 |
find_file_mock.return_value = "/random/path"
|
761 |
default_iallocator = "/random/path"
|
762 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
763 |
self.ExecOpCode(op)
|
764 |
|
765 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
766 |
|
767 |
@testutils.patch_object(utils, "FindFile") |
768 |
def testInvalidDefaultIallocator(self, find_file_mock): |
769 |
find_file_mock.return_value = None
|
770 |
default_iallocator = "/random/path"
|
771 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
772 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
773 |
|
774 |
def testEnabledDiskTemplates(self): |
775 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
776 |
op = opcodes.OpClusterSetParams( |
777 |
enabled_disk_templates=enabled_disk_templates, |
778 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
779 |
self.ExecOpCode(op)
|
780 |
|
781 |
self.assertEqual(enabled_disk_templates,
|
782 |
self.cluster.enabled_disk_templates)
|
783 |
|
784 |
def testEnabledDiskTemplatesWithoutVgName(self): |
785 |
enabled_disk_templates = [constants.DT_PLAIN] |
786 |
self.cluster.volume_group_name = None |
787 |
op = opcodes.OpClusterSetParams( |
788 |
enabled_disk_templates=enabled_disk_templates) |
789 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
790 |
|
791 |
def testDisableDiskTemplateWithExistingInstance(self): |
792 |
enabled_disk_templates = [constants.DT_DISKLESS] |
793 |
self.cfg.AddNewInstance(
|
794 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
795 |
op = opcodes.OpClusterSetParams( |
796 |
enabled_disk_templates=enabled_disk_templates, |
797 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
798 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
799 |
|
800 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
801 |
vg_name = "test_vg"
|
802 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
803 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
804 |
self.ExecOpCode(op)
|
805 |
|
806 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
807 |
self.mcpu.assertLogIsEmpty()
|
808 |
|
809 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
810 |
vg_name = ""
|
811 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
812 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
813 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
814 |
|
815 |
def testUnsetVgNameWithLvmInstance(self): |
816 |
vg_name = ""
|
817 |
self.cfg.AddNewInstance(
|
818 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
819 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
820 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
821 |
|
822 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
823 |
vg_name = ""
|
824 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
825 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
826 |
self.ExecOpCode(op)
|
827 |
|
828 |
self.assertEqual(None, self.cluster.volume_group_name) |
829 |
|
830 |
def testVgNameToOldName(self): |
831 |
vg_name = self.cluster.volume_group_name
|
832 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
833 |
self.ExecOpCode(op)
|
834 |
|
835 |
self.mcpu.assertLogContainsRegex("already in desired state") |
836 |
|
837 |
def testVgNameWithFailingNode(self): |
838 |
vg_name = "test_vg"
|
839 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
840 |
self.rpc.call_vg_list.return_value = \
|
841 |
self.RpcResultsBuilder() \
|
842 |
.AddFailedNode(self.master) \
|
843 |
.Build() |
844 |
self.ExecOpCode(op)
|
845 |
|
846 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
847 |
|
848 |
def testVgNameWithValidNode(self): |
849 |
vg_name = "test_vg"
|
850 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
851 |
self.rpc.call_vg_list.return_value = \
|
852 |
self.RpcResultsBuilder() \
|
853 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
854 |
.Build() |
855 |
self.ExecOpCode(op)
|
856 |
|
857 |
def testVgNameWithTooSmallNode(self): |
858 |
vg_name = "test_vg"
|
859 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
860 |
self.rpc.call_vg_list.return_value = \
|
861 |
self.RpcResultsBuilder() \
|
862 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
863 |
.Build() |
864 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
865 |
|
866 |
def testMiscParameters(self): |
867 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
868 |
maintain_node_health=True,
|
869 |
modify_etc_hosts=True,
|
870 |
prealloc_wipe_disks=True,
|
871 |
reserved_lvs=["/dev/mock_lv"],
|
872 |
use_external_mip_script=True)
|
873 |
self.ExecOpCode(op)
|
874 |
|
875 |
self.mcpu.assertLogIsEmpty()
|
876 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
877 |
self.assertEqual(True, self.cluster.maintain_node_health) |
878 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
879 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
880 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
881 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
882 |
|
883 |
def testAddHiddenOs(self): |
884 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
885 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
886 |
(constants.DDM_ADD, "hidden3")])
|
887 |
self.ExecOpCode(op)
|
888 |
|
889 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
890 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
891 |
|
892 |
def testRemoveBlacklistedOs(self): |
893 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
894 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
895 |
(constants.DDM_REMOVE, "blisted2"),
|
896 |
(constants.DDM_REMOVE, "blisted3")])
|
897 |
self.ExecOpCode(op)
|
898 |
|
899 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
900 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
901 |
|
902 |
def testMasterNetdev(self): |
903 |
master_netdev = "test_dev"
|
904 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
905 |
self.ExecOpCode(op)
|
906 |
|
907 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
908 |
|
909 |
def testMasterNetdevFailNoForce(self): |
910 |
master_netdev = "test_dev"
|
911 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
912 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
913 |
self.RpcResultsBuilder() \
|
914 |
.CreateFailedNodeResult(self.master)
|
915 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
916 |
|
917 |
def testMasterNetdevFailForce(self): |
918 |
master_netdev = "test_dev"
|
919 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
920 |
force=True)
|
921 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
922 |
self.RpcResultsBuilder() \
|
923 |
.CreateFailedNodeResult(self.master)
|
924 |
self.ExecOpCode(op)
|
925 |
|
926 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
927 |
|
928 |
|
929 |
class TestLUClusterVerify(CmdlibTestCase): |
930 |
def testVerifyAllGroups(self): |
931 |
op = opcodes.OpClusterVerify() |
932 |
result = self.ExecOpCode(op)
|
933 |
|
934 |
self.assertEqual(2, len(result["jobs"])) |
935 |
|
936 |
def testVerifyDefaultGroups(self): |
937 |
op = opcodes.OpClusterVerify(group_name="default")
|
938 |
result = self.ExecOpCode(op)
|
939 |
|
940 |
self.assertEqual(1, len(result["jobs"])) |
941 |
|
942 |
|
943 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
944 |
|
945 |
def setUp(self): |
946 |
super(TestLUClusterVerifyConfig, self).setUp() |
947 |
|
948 |
self._load_cert_patcher = testutils \
|
949 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
950 |
self._load_cert_mock = self._load_cert_patcher.start() |
951 |
self._verify_cert_patcher = testutils \
|
952 |
.patch_object(utils, "VerifyX509Certificate")
|
953 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
954 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
955 |
self._read_file_mock = self._read_file_patcher.start() |
956 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
957 |
self._can_read_mock = self._can_read_patcher.start() |
958 |
|
959 |
self._can_read_mock.return_value = True |
960 |
self._read_file_mock.return_value = True |
961 |
self._verify_cert_mock.return_value = (None, "") |
962 |
self._load_cert_mock.return_value = True |
963 |
|
964 |
def tearDown(self): |
965 |
super(TestLUClusterVerifyConfig, self).tearDown() |
966 |
|
967 |
self._can_read_patcher.stop()
|
968 |
self._read_file_patcher.stop()
|
969 |
self._verify_cert_patcher.stop()
|
970 |
self._load_cert_patcher.stop()
|
971 |
|
972 |
def testSuccessfulRun(self): |
973 |
self.cfg.AddNewInstance()
|
974 |
op = opcodes.OpClusterVerifyConfig() |
975 |
result = self.ExecOpCode(op)
|
976 |
|
977 |
self.assertTrue(result)
|
978 |
|
979 |
def testDanglingNode(self): |
980 |
node = self.cfg.AddNewNode()
|
981 |
self.cfg.AddNewInstance(primary_node=node)
|
982 |
node.group = "invalid"
|
983 |
op = opcodes.OpClusterVerifyConfig() |
984 |
result = self.ExecOpCode(op)
|
985 |
|
986 |
self.mcpu.assertLogContainsRegex(
|
987 |
"following nodes \(and their instances\) belong to a non existing group")
|
988 |
self.assertFalse(result)
|
989 |
|
990 |
def testDanglingInstance(self): |
991 |
inst = self.cfg.AddNewInstance()
|
992 |
inst.primary_node = "invalid"
|
993 |
op = opcodes.OpClusterVerifyConfig() |
994 |
result = self.ExecOpCode(op)
|
995 |
|
996 |
self.mcpu.assertLogContainsRegex(
|
997 |
"following instances have a non-existing primary-node")
|
998 |
self.assertFalse(result)
|
999 |
|
1000 |
|
1001 |
class TestLUClusterVerifyGroup(CmdlibTestCase): |
1002 |
def testEmptyNodeGroup(self): |
1003 |
group = self.cfg.AddNewNodeGroup()
|
1004 |
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
|
1005 |
|
1006 |
result = self.ExecOpCode(op)
|
1007 |
|
1008 |
self.assertTrue(result)
|
1009 |
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification") |
1010 |
|
1011 |
def testSimpleInvocation(self): |
1012 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1013 |
|
1014 |
self.ExecOpCode(op)
|
1015 |
|
1016 |
def testSimpleInvocationWithInstance(self): |
1017 |
self.cfg.AddNewInstance(disks=[])
|
1018 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1019 |
|
1020 |
self.ExecOpCode(op)
|
1021 |
|
1022 |
def testGhostNode(self): |
1023 |
group = self.cfg.AddNewNodeGroup()
|
1024 |
node = self.cfg.AddNewNode(group=group.uuid, offline=True) |
1025 |
self.master.offline = True |
1026 |
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
1027 |
primary_node=self.master,
|
1028 |
secondary_node=node) |
1029 |
|
1030 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1031 |
RpcResultsBuilder() \ |
1032 |
.AddOfflineNode(self.master) \
|
1033 |
.Build() |
1034 |
|
1035 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1036 |
|
1037 |
self.ExecOpCode(op)
|
1038 |
|
1039 |
def testValidRpcResult(self): |
1040 |
self.cfg.AddNewInstance(disks=[])
|
1041 |
|
1042 |
self.rpc.call_node_verify.return_value = \
|
1043 |
RpcResultsBuilder() \ |
1044 |
.AddSuccessfulNode(self.master, {}) \
|
1045 |
.Build() |
1046 |
|
1047 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1048 |
|
1049 |
self.ExecOpCode(op)
|
1050 |
|
1051 |
|
1052 |
class TestLUClusterVerifyGroupMethods(CmdlibTestCase): |
1053 |
"""Base class for testing individual methods in LUClusterVerifyGroup.
|
1054 |
|
1055 |
"""
|
1056 |
def setUp(self): |
1057 |
super(TestLUClusterVerifyGroupMethods, self).setUp() |
1058 |
self.op = opcodes.OpClusterVerifyGroup(group_name="default") |
1059 |
|
1060 |
def PrepareLU(self, lu): |
1061 |
lu._exclusive_storage = False
|
1062 |
lu.master_node = self.master_uuid
|
1063 |
lu.group_info = self.group
|
1064 |
cluster.LUClusterVerifyGroup.all_node_info = \ |
1065 |
property(fget=lambda _: self.cfg.GetAllNodesInfo()) |
1066 |
|
1067 |
|
1068 |
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods): |
1069 |
@withLockedLU
|
1070 |
def testInvalidNodeResult(self, lu): |
1071 |
self.assertFalse(lu._VerifyNode(self.master, None)) |
1072 |
self.assertFalse(lu._VerifyNode(self.master, "")) |
1073 |
|
1074 |
@withLockedLU
|
1075 |
def testInvalidVersion(self, lu): |
1076 |
self.assertFalse(lu._VerifyNode(self.master, {"version": None})) |
1077 |
self.assertFalse(lu._VerifyNode(self.master, {"version": ""})) |
1078 |
self.assertFalse(lu._VerifyNode(self.master, { |
1079 |
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION) |
1080 |
})) |
1081 |
|
1082 |
self.mcpu.ClearLogMessages()
|
1083 |
self.assertTrue(lu._VerifyNode(self.master, { |
1084 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x") |
1085 |
})) |
1086 |
self.mcpu.assertLogContainsRegex("software version mismatch") |
1087 |
|
1088 |
def _GetValidNodeResult(self, additional_fields): |
1089 |
ret = { |
1090 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
|
1091 |
constants.NV_NODESETUP: [] |
1092 |
} |
1093 |
ret.update(additional_fields) |
1094 |
return ret
|
1095 |
|
1096 |
@withLockedLU
|
1097 |
def testHypervisor(self, lu): |
1098 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1099 |
constants.NV_HYPERVISOR: { |
1100 |
constants.HT_XEN_PVM: None,
|
1101 |
constants.HT_XEN_HVM: "mock error"
|
1102 |
} |
1103 |
})) |
1104 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1105 |
self.mcpu.assertLogContainsRegex("mock error") |
1106 |
|
1107 |
@withLockedLU
|
1108 |
def testHvParams(self, lu): |
1109 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1110 |
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")] |
1111 |
})) |
1112 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1113 |
self.mcpu.assertLogContainsRegex("mock item") |
1114 |
self.mcpu.assertLogContainsRegex("mock error") |
1115 |
|
1116 |
@withLockedLU
|
1117 |
def testSuccessfulResult(self, lu): |
1118 |
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({}))) |
1119 |
self.mcpu.assertLogIsEmpty()
|
1120 |
|
1121 |
|
1122 |
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods): |
1123 |
@withLockedLU
|
1124 |
def testInvalidNodeResult(self, lu): |
1125 |
for ndata in [{}, {constants.NV_TIME: "invalid"}]: |
1126 |
self.mcpu.ClearLogMessages()
|
1127 |
lu._VerifyNodeTime(self.master, ndata, None, None) |
1128 |
|
1129 |
self.mcpu.assertLogContainsRegex("Node returned invalid time") |
1130 |
|
1131 |
@withLockedLU
|
1132 |
def testNodeDiverges(self, lu): |
1133 |
for ntime in [(0, 0), (2000, 0)]: |
1134 |
self.mcpu.ClearLogMessages()
|
1135 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005) |
1136 |
|
1137 |
self.mcpu.assertLogContainsRegex("Node time diverges") |
1138 |
|
1139 |
@withLockedLU
|
1140 |
def testSuccessfulResult(self, lu): |
1141 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5) |
1142 |
self.mcpu.assertLogIsEmpty()
|
1143 |
|
1144 |
|
1145 |
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM( |
1146 |
TestLUClusterVerifyGroupMethods): |
1147 |
def setUp(self): |
1148 |
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp() |
1149 |
self.VALID_NRESULT = {
|
1150 |
constants.NV_VGLIST: {"mock_vg": 30000}, |
1151 |
constants.NV_PVLIST: [ |
1152 |
{ |
1153 |
"name": "mock_pv", |
1154 |
"vg_name": "mock_vg", |
1155 |
"size": 5000, |
1156 |
"free": 2500, |
1157 |
"attributes": [],
|
1158 |
"lv_list": []
|
1159 |
} |
1160 |
] |
1161 |
} |
1162 |
|
1163 |
@withLockedLU
|
1164 |
def testNoVgName(self, lu): |
1165 |
lu._UpdateVerifyNodeLVM(self.master, {}, None, None) |
1166 |
self.mcpu.assertLogIsEmpty()
|
1167 |
|
1168 |
@withLockedLU
|
1169 |
def testEmptyNodeResult(self, lu): |
1170 |
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None) |
1171 |
self.mcpu.assertLogContainsRegex("unable to check volume groups") |
1172 |
self.mcpu.assertLogContainsRegex("Can't get PV list from node") |
1173 |
|
1174 |
@withLockedLU
|
1175 |
def testValidNodeResult(self, lu): |
1176 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None) |
1177 |
self.mcpu.assertLogIsEmpty()
|
1178 |
|
1179 |
@withLockedLU
|
1180 |
def testValidNodeResultExclusiveStorage(self, lu): |
1181 |
lu._exclusive_storage = True
|
1182 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", |
1183 |
cluster.LUClusterVerifyGroup.NodeImage()) |
1184 |
self.mcpu.assertLogIsEmpty()
|
1185 |
|
1186 |
|
1187 |
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion( |
1188 |
TestLUClusterVerifyGroupMethods): |
1189 |
@withLockedLU
|
1190 |
def testEmptyNodeResult(self, lu): |
1191 |
lu._VerifyGroupDRBDVersion({}) |
1192 |
self.mcpu.assertLogIsEmpty()
|
1193 |
|
1194 |
@withLockedLU
|
1195 |
def testValidNodeResult(self, lu): |
1196 |
lu._VerifyGroupDRBDVersion( |
1197 |
RpcResultsBuilder() |
1198 |
.AddSuccessfulNode(self.master, {
|
1199 |
constants.NV_DRBDVERSION: "8.3.0"
|
1200 |
}) |
1201 |
.Build()) |
1202 |
self.mcpu.assertLogIsEmpty()
|
1203 |
|
1204 |
@withLockedLU
|
1205 |
def testDifferentVersions(self, lu): |
1206 |
node1 = self.cfg.AddNewNode()
|
1207 |
lu._VerifyGroupDRBDVersion( |
1208 |
RpcResultsBuilder() |
1209 |
.AddSuccessfulNode(self.master, {
|
1210 |
constants.NV_DRBDVERSION: "8.3.0"
|
1211 |
}) |
1212 |
.AddSuccessfulNode(node1, { |
1213 |
constants.NV_DRBDVERSION: "8.4.0"
|
1214 |
}) |
1215 |
.Build()) |
1216 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0") |
1217 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0") |
1218 |
|
1219 |
|
1220 |
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods): |
1221 |
@withLockedLU
|
1222 |
def testNoVgName(self, lu): |
1223 |
lu._VerifyGroupLVM(None, None) |
1224 |
self.mcpu.assertLogIsEmpty()
|
1225 |
|
1226 |
@withLockedLU
|
1227 |
def testNoExclusiveStorage(self, lu): |
1228 |
lu._VerifyGroupLVM(None, "mock_vg") |
1229 |
self.mcpu.assertLogIsEmpty()
|
1230 |
|
1231 |
@withLockedLU
|
1232 |
def testNoPvInfo(self, lu): |
1233 |
lu._exclusive_storage = True
|
1234 |
nimg = cluster.LUClusterVerifyGroup.NodeImage() |
1235 |
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg") |
1236 |
self.mcpu.assertLogIsEmpty()
|
1237 |
|
1238 |
@withLockedLU
|
1239 |
def testValidPvInfos(self, lu): |
1240 |
lu._exclusive_storage = True
|
1241 |
node2 = self.cfg.AddNewNode()
|
1242 |
nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
|
1243 |
nimg1.pv_min = 10000
|
1244 |
nimg1.pv_max = 10010
|
1245 |
nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1246 |
nimg2.pv_min = 9998
|
1247 |
nimg2.pv_max = 10005
|
1248 |
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg") |
1249 |
self.mcpu.assertLogIsEmpty()
|
1250 |
|
1251 |
|
1252 |
class TestLUClusterVerifyGroupVerifyNodeBridges( |
1253 |
TestLUClusterVerifyGroupMethods): |
1254 |
@withLockedLU
|
1255 |
def testNoBridges(self, lu): |
1256 |
lu._VerifyNodeBridges(None, None, None) |
1257 |
self.mcpu.assertLogIsEmpty()
|
1258 |
|
1259 |
@withLockedLU
|
1260 |
def testInvalidBridges(self, lu): |
1261 |
for ndata in [{}, {constants.NV_BRIDGES: ""}]: |
1262 |
self.mcpu.ClearLogMessages()
|
1263 |
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"]) |
1264 |
self.mcpu.assertLogContainsRegex("not return valid bridge information") |
1265 |
|
1266 |
self.mcpu.ClearLogMessages()
|
1267 |
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]}, |
1268 |
["mock_bridge"])
|
1269 |
self.mcpu.assertLogContainsRegex("missing bridge") |
1270 |
|
1271 |
|
1272 |
class TestLUClusterVerifyGroupVerifyNodeUserScripts( |
1273 |
TestLUClusterVerifyGroupMethods): |
1274 |
@withLockedLU
|
1275 |
def testNoUserScripts(self, lu): |
1276 |
lu._VerifyNodeUserScripts(self.master, {})
|
1277 |
self.mcpu.assertLogContainsRegex("did not return user scripts information") |
1278 |
|
1279 |
@withLockedLU
|
1280 |
def testBrokenUserScripts(self, lu): |
1281 |
lu._VerifyNodeUserScripts(self.master,
|
1282 |
{constants.NV_USERSCRIPTS: ["script"]})
|
1283 |
self.mcpu.assertLogContainsRegex("scripts not present or not executable") |
1284 |
|
1285 |
|
1286 |
class TestLUClusterVerifyGroupVerifyNodeNetwork( |
1287 |
TestLUClusterVerifyGroupMethods): |
1288 |
|
1289 |
def setUp(self): |
1290 |
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp() |
1291 |
self.VALID_NRESULT = {
|
1292 |
constants.NV_NODELIST: {}, |
1293 |
constants.NV_NODENETTEST: {}, |
1294 |
constants.NV_MASTERIP: True
|
1295 |
} |
1296 |
|
1297 |
@withLockedLU
|
1298 |
def testEmptyNodeResult(self, lu): |
1299 |
lu._VerifyNodeNetwork(self.master, {})
|
1300 |
self.mcpu.assertLogContainsRegex(
|
1301 |
"node hasn't returned node ssh connectivity data")
|
1302 |
self.mcpu.assertLogContainsRegex(
|
1303 |
"node hasn't returned node tcp connectivity data")
|
1304 |
self.mcpu.assertLogContainsRegex(
|
1305 |
"node hasn't returned node master IP reachability data")
|
1306 |
|
1307 |
@withLockedLU
|
1308 |
def testValidResult(self, lu): |
1309 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1310 |
self.mcpu.assertLogIsEmpty()
|
1311 |
|
1312 |
@withLockedLU
|
1313 |
def testSshProblem(self, lu): |
1314 |
self.VALID_NRESULT.update({
|
1315 |
constants.NV_NODELIST: { |
1316 |
"mock_node": "mock_error" |
1317 |
} |
1318 |
}) |
1319 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1320 |
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'") |
1321 |
|
1322 |
@withLockedLU
|
1323 |
def testTcpProblem(self, lu): |
1324 |
self.VALID_NRESULT.update({
|
1325 |
constants.NV_NODENETTEST: { |
1326 |
"mock_node": "mock_error" |
1327 |
} |
1328 |
}) |
1329 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1330 |
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'") |
1331 |
|
1332 |
@withLockedLU
|
1333 |
def testMasterIpNotReachable(self, lu): |
1334 |
self.VALID_NRESULT.update({
|
1335 |
constants.NV_MASTERIP: False
|
1336 |
}) |
1337 |
node1 = self.cfg.AddNewNode()
|
1338 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1339 |
self.mcpu.assertLogContainsRegex(
|
1340 |
"the master node cannot reach the master IP")
|
1341 |
|
1342 |
self.mcpu.ClearLogMessages()
|
1343 |
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
|
1344 |
self.mcpu.assertLogContainsRegex("cannot reach the master IP") |
1345 |
|
1346 |
|
1347 |
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods): |
1348 |
def setUp(self): |
1349 |
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp() |
1350 |
|
1351 |
self.node1 = self.cfg.AddNewNode() |
1352 |
self.drbd_inst = self.cfg.AddNewInstance( |
1353 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1354 |
primary_node=self.master,
|
1355 |
secondary_node=self.node1)])
|
1356 |
self.running_inst = self.cfg.AddNewInstance( |
1357 |
admin_state=constants.ADMINST_UP, disks_active=True)
|
1358 |
self.diskless_inst = self.cfg.AddNewInstance(disks=[]) |
1359 |
|
1360 |
self.master_img = \
|
1361 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1362 |
self.master_img.volumes = ["/".join(disk.logical_id) |
1363 |
for inst in [self.running_inst, |
1364 |
self.diskless_inst]
|
1365 |
for disk in inst.disks] |
1366 |
self.master_img.volumes.extend(
|
1367 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children]) |
1368 |
self.master_img.instances = [self.running_inst.uuid] |
1369 |
self.node1_img = \
|
1370 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
|
1371 |
self.node1_img.volumes = \
|
1372 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children] |
1373 |
self.node_imgs = {
|
1374 |
self.master_uuid: self.master_img, |
1375 |
self.node1.uuid: self.node1_img |
1376 |
} |
1377 |
self.diskstatus = {
|
1378 |
self.master_uuid: [
|
1379 |
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
|
1380 |
for _ in self.running_inst.disks |
1381 |
] |
1382 |
} |
1383 |
|
1384 |
@withLockedLU
|
1385 |
def testDisklessInst(self, lu): |
1386 |
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {}) |
1387 |
self.mcpu.assertLogIsEmpty()
|
1388 |
|
1389 |
@withLockedLU
|
1390 |
def testOfflineNode(self, lu): |
1391 |
self.master_img.offline = True |
1392 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {}) |
1393 |
self.mcpu.assertLogIsEmpty()
|
1394 |
|
1395 |
@withLockedLU
|
1396 |
def testRunningOnOfflineNode(self, lu): |
1397 |
self.master_img.offline = True |
1398 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1399 |
self.mcpu.assertLogContainsRegex(
|
1400 |
"instance is marked as running and lives on offline node")
|
1401 |
|
1402 |
@withLockedLU
|
1403 |
def testMissingVolume(self, lu): |
1404 |
self.master_img.volumes = []
|
1405 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1406 |
self.mcpu.assertLogContainsRegex("volume .* missing") |
1407 |
|
1408 |
@withLockedLU
|
1409 |
def testRunningInstanceOnWrongNode(self, lu): |
1410 |
self.master_img.instances = []
|
1411 |
self.diskless_inst.admin_state = constants.ADMINST_UP
|
1412 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1413 |
self.mcpu.assertLogContainsRegex("instance not running on its primary node") |
1414 |
|
1415 |
@withLockedLU
|
1416 |
def testRunningInstanceOnRightNode(self, lu): |
1417 |
self.master_img.instances = [self.running_inst.uuid] |
1418 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1419 |
self.mcpu.assertLogIsEmpty()
|
1420 |
|
1421 |
@withLockedLU
|
1422 |
def testValidDiskStatus(self, lu): |
1423 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1424 |
self.mcpu.assertLogIsEmpty()
|
1425 |
|
1426 |
@withLockedLU
|
1427 |
def testDegradedDiskStatus(self, lu): |
1428 |
self.diskstatus[self.master_uuid][0][1].is_degraded = True |
1429 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1430 |
self.mcpu.assertLogContainsRegex("instance .* is degraded") |
1431 |
|
1432 |
@withLockedLU
|
1433 |
def testNotOkayDiskStatus(self, lu): |
1434 |
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY |
1435 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1436 |
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'") |
1437 |
|
1438 |
@withLockedLU
|
1439 |
def testExclusiveStorageWithInvalidInstance(self, lu): |
1440 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1441 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1442 |
self.mcpu.assertLogContainsRegex(
|
1443 |
"instance has template drbd, which is not supported")
|
1444 |
|
1445 |
@withLockedLU
|
1446 |
def testExclusiveStorageWithValidInstance(self, lu): |
1447 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1448 |
self.running_inst.disks[0].spindles = 1 |
1449 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1450 |
self.mcpu.assertLogIsEmpty()
|
1451 |
|
1452 |
@withLockedLU
|
1453 |
def testDrbdInTwoGroups(self, lu): |
1454 |
group = self.cfg.AddNewNodeGroup()
|
1455 |
self.node1.group = group.uuid
|
1456 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1457 |
self.mcpu.assertLogContainsRegex(
|
1458 |
"instance has primary and secondary nodes in different groups")
|
1459 |
|
1460 |
@withLockedLU
|
1461 |
def testOfflineSecondary(self, lu): |
1462 |
self.node1_img.offline = True |
1463 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1464 |
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)") |
1465 |
|
1466 |
|
1467 |
class TestLUClusterVerifyGroupVerifyOrphanVolumes( |
1468 |
TestLUClusterVerifyGroupMethods): |
1469 |
@withLockedLU
|
1470 |
def testOrphanedVolume(self, lu): |
1471 |
master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1472 |
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"] |
1473 |
node_imgs = { |
1474 |
self.master_uuid: master_img
|
1475 |
} |
1476 |
node_vol_should = { |
1477 |
self.master_uuid: ["mock_vg/disk_0"] |
1478 |
} |
1479 |
|
1480 |
lu._VerifyOrphanVolumes(node_vol_should, node_imgs, |
1481 |
utils.FieldSet("mock_vg/disk_2"))
|
1482 |
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown") |
1483 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown") |
1484 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown") |
1485 |
|
1486 |
|
1487 |
class TestLUClusterVerifyGroupVerifyNPlusOneMemory( |
1488 |
TestLUClusterVerifyGroupMethods): |
1489 |
@withLockedLU
|
1490 |
def testN1Failure(self, lu): |
1491 |
group1 = self.cfg.AddNewNodeGroup()
|
1492 |
|
1493 |
node1 = self.cfg.AddNewNode()
|
1494 |
node2 = self.cfg.AddNewNode(group=group1)
|
1495 |
node3 = self.cfg.AddNewNode()
|
1496 |
|
1497 |
inst1 = self.cfg.AddNewInstance()
|
1498 |
inst2 = self.cfg.AddNewInstance()
|
1499 |
inst3 = self.cfg.AddNewInstance()
|
1500 |
|
1501 |
node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid) |
1502 |
node1_img.sbp = { |
1503 |
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
|
1504 |
} |
1505 |
|
1506 |
node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1507 |
|
1508 |
node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid) |
1509 |
node3_img.offline = True
|
1510 |
|
1511 |
node_imgs = { |
1512 |
node1.uuid: node1_img, |
1513 |
node2.uuid: node2_img, |
1514 |
node3.uuid: node3_img |
1515 |
} |
1516 |
|
1517 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1518 |
self.mcpu.assertLogContainsRegex(
|
1519 |
"not enough memory to accomodate instance failovers")
|
1520 |
|
1521 |
self.mcpu.ClearLogMessages()
|
1522 |
node1_img.mfree = 1000
|
1523 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1524 |
self.mcpu.assertLogIsEmpty()
|
1525 |
|
1526 |
|
1527 |
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods): |
1528 |
@withLockedLU
|
1529 |
def test(self, lu): |
1530 |
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1531 |
vm_capable=True)
|
1532 |
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False) |
1533 |
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1534 |
vm_capable=True)
|
1535 |
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1536 |
vm_capable=True)
|
1537 |
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True) |
1538 |
|
1539 |
nodeinfo = [self.master, node1, node2, node3, node4, node5]
|
1540 |
files_all = set([
|
1541 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
1542 |
pathutils.RAPI_CERT_FILE, |
1543 |
pathutils.RAPI_USERS_FILE, |
1544 |
]) |
1545 |
files_opt = set([
|
1546 |
pathutils.RAPI_USERS_FILE, |
1547 |
hv_xen.XL_CONFIG_FILE, |
1548 |
pathutils.VNC_PASSWORD_FILE, |
1549 |
]) |
1550 |
files_mc = set([
|
1551 |
pathutils.CLUSTER_CONF_FILE, |
1552 |
]) |
1553 |
files_vm = set([
|
1554 |
hv_xen.XEND_CONFIG_FILE, |
1555 |
hv_xen.XL_CONFIG_FILE, |
1556 |
pathutils.VNC_PASSWORD_FILE, |
1557 |
]) |
1558 |
nvinfo = RpcResultsBuilder() \ |
1559 |
.AddSuccessfulNode(self.master, {
|
1560 |
constants.NV_FILELIST: { |
1561 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
1562 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
1563 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1564 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1565 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1566 |
}}) \ |
1567 |
.AddSuccessfulNode(node1, { |
1568 |
constants.NV_FILELIST: { |
1569 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1570 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1571 |
} |
1572 |
}) \ |
1573 |
.AddSuccessfulNode(node2, { |
1574 |
constants.NV_FILELIST: { |
1575 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1576 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1577 |
} |
1578 |
}) \ |
1579 |
.AddSuccessfulNode(node3, { |
1580 |
constants.NV_FILELIST: { |
1581 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1582 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
1583 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1584 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
1585 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1586 |
} |
1587 |
}) \ |
1588 |
.AddSuccessfulNode(node4, {}) \ |
1589 |
.AddOfflineNode(node5) \ |
1590 |
.Build() |
1591 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
1592 |
|
1593 |
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
|
1594 |
(files_all, files_opt, files_mc, files_vm)) |
1595 |
|
1596 |
expected_msgs = [ |
1597 |
"File %s found with 2 different checksums (variant 1 on"
|
1598 |
" %s, %s, %s; variant 2 on %s)" %
|
1599 |
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name, |
1600 |
self.master.name),
|
1601 |
"File %s is missing from node(s) %s" %
|
1602 |
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name), |
1603 |
"File %s should not exist on node(s) %s" %
|
1604 |
(pathutils.CLUSTER_CONF_FILE, node3.name), |
1605 |
"File %s is missing from node(s) %s" %
|
1606 |
(hv_xen.XEND_CONFIG_FILE, node3.name), |
1607 |
"File %s is missing from node(s) %s" %
|
1608 |
(pathutils.CLUSTER_CONF_FILE, node2.name), |
1609 |
"File %s found with 2 different checksums (variant 1 on"
|
1610 |
" %s; variant 2 on %s)" %
|
1611 |
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
|
1612 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1613 |
" found on %s, %s, %s)" %
|
1614 |
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
|
1615 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1616 |
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
|
1617 |
"Node did not return file checksum data",
|
1618 |
] |
1619 |
|
1620 |
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs)) |
1621 |
for expected_msg in expected_msgs: |
1622 |
self.mcpu.assertLogContainsInLine(expected_msg)
|
1623 |
|
1624 |
|
1625 |
class TestLUClusterVerifyGroupVerifyNodeDrbd(TestLUClusterVerifyGroupMethods): |
1626 |
def setUp(self): |
1627 |
super(TestLUClusterVerifyGroupVerifyNodeDrbd, self).setUp() |
1628 |
|
1629 |
self.node1 = self.cfg.AddNewNode() |
1630 |
self.node2 = self.cfg.AddNewNode() |
1631 |
self.inst = self.cfg.AddNewInstance( |
1632 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1633 |
primary_node=self.node1,
|
1634 |
secondary_node=self.node2)],
|
1635 |
admin_state=constants.ADMINST_UP) |
1636 |
|
1637 |
@withLockedLU
|
1638 |
def testNoDrbdHelper(self, lu): |
1639 |
lu._VerifyNodeDrbd(self.master, {}, self.cfg.GetAllInstancesInfo(), None, |
1640 |
self.cfg.ComputeDRBDMap())
|
1641 |
self.mcpu.assertLogIsEmpty()
|
1642 |
|
1643 |
@withLockedLU
|
1644 |
def testDrbdHelperInvalidNodeResult(self, lu): |
1645 |
for ndata, expected in [({}, "no drbd usermode helper returned"), |
1646 |
({constants.NV_DRBDHELPER: (False, "")}, |
1647 |
"drbd usermode helper check unsuccessful"),
|
1648 |
({constants.NV_DRBDHELPER: (True, "/bin/false")}, |
1649 |
"wrong drbd usermode helper")]:
|
1650 |
self.mcpu.ClearLogMessages()
|
1651 |
lu._VerifyNodeDrbd(self.master, ndata, self.cfg.GetAllInstancesInfo(), |
1652 |
"/bin/true", self.cfg.ComputeDRBDMap()) |
1653 |
self.mcpu.assertLogContainsRegex(expected)
|
1654 |
|
1655 |
@withLockedLU
|
1656 |
def testNoNodeResult(self, lu): |
1657 |
lu._VerifyNodeDrbd(self.node1, {}, self.cfg.GetAllInstancesInfo(), |
1658 |
None, self.cfg.ComputeDRBDMap()) |
1659 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1660 |
|
1661 |
@withLockedLU
|
1662 |
def testInvalidNodeResult(self, lu): |
1663 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: ""}, |
1664 |
self.cfg.GetAllInstancesInfo(), None, |
1665 |
self.cfg.ComputeDRBDMap())
|
1666 |
self.mcpu.assertLogContainsRegex("cannot parse drbd status file") |
1667 |
|
1668 |
@withLockedLU
|
1669 |
def testWrongMinorInUse(self, lu): |
1670 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [2]}, |
1671 |
self.cfg.GetAllInstancesInfo(), None, |
1672 |
self.cfg.ComputeDRBDMap())
|
1673 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1674 |
self.mcpu.assertLogContainsRegex("unallocated drbd minor 2 is in use") |
1675 |
|
1676 |
@withLockedLU
|
1677 |
def testValidResult(self, lu): |
1678 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [1]}, |
1679 |
self.cfg.GetAllInstancesInfo(), None, |
1680 |
self.cfg.ComputeDRBDMap())
|
1681 |
self.mcpu.assertLogIsEmpty()
|
1682 |
|
1683 |
|
1684 |
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods): |
1685 |
@withLockedLU
|
1686 |
def testUpdateNodeOsInvalidNodeResult(self, lu): |
1687 |
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]}, |
1688 |
{constants.NV_OSLIST: [["1", "2"]]}]: |
1689 |
self.mcpu.ClearLogMessages()
|
1690 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1691 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1692 |
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data") |
1693 |
|
1694 |
@withLockedLU
|
1695 |
def testUpdateNodeOsValidNodeResult(self, lu): |
1696 |
ndata = { |
1697 |
constants.NV_OSLIST: [ |
1698 |
["mock_OS", "/mocked/path", True, "", ["default"], [], |
1699 |
[constants.OS_API_V20]], |
1700 |
["Another_Mock", "/random", True, "", ["var1", "var2"], |
1701 |
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS] |
1702 |
] |
1703 |
} |
1704 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1705 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1706 |
self.mcpu.assertLogIsEmpty()
|
1707 |
|
1708 |
@withLockedLU
|
1709 |
def testVerifyNodeOs(self, lu): |
1710 |
node = self.cfg.AddNewNode()
|
1711 |
nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1712 |
nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid) |
1713 |
|
1714 |
nimg_root.os_fail = False
|
1715 |
nimg_root.oslist = { |
1716 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1717 |
set([constants.OS_API_V20]))],
|
1718 |
"broken_base_os": [("/broken", False, "", set(), set(), |
1719 |
set([constants.OS_API_V20]))],
|
1720 |
"only_on_root": [("/random", True, "", set(), set(), set())], |
1721 |
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]), |
1722 |
set([("param1", "val1"), ("param2", "val2")]), |
1723 |
set([constants.OS_API_V20]))]
|
1724 |
} |
1725 |
nimg.os_fail = False
|
1726 |
nimg.oslist = { |
1727 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1728 |
set([constants.OS_API_V20]))],
|
1729 |
"only_on_test": [("/random", True, "", set(), set(), set())], |
1730 |
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]), |
1731 |
set([("param1", "val1"), ("param3", "val3")]), |
1732 |
set([constants.OS_API_V15]))],
|
1733 |
"broken_os": [("/broken", False, "", set(), set(), |
1734 |
set([constants.OS_API_V20]))],
|
1735 |
"multi_entries": [
|
1736 |
("/multi1", True, "", set(), set(), set([constants.OS_API_V20])), |
1737 |
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]))] |
1738 |
} |
1739 |
|
1740 |
lu._VerifyNodeOS(node, nimg, nimg_root) |
1741 |
|
1742 |
expected_msgs = [ |
1743 |
"Extra OS only_on_test not present on reference node",
|
1744 |
"OSes present on reference node .* but missing on this node:" +
|
1745 |
" only_on_root",
|
1746 |
"OS API version for diffing_os differs",
|
1747 |
"OS variants list for diffing_os differs",
|
1748 |
"OS parameters for diffing_os differs",
|
1749 |
"Invalid OS broken_os",
|
1750 |
"Extra OS broken_os not present on reference node",
|
1751 |
"OS 'multi_entries' has multiple entries",
|
1752 |
"Extra OS multi_entries not present on reference node"
|
1753 |
] |
1754 |
|
1755 |
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages())) |
1756 |
for expected_msg in expected_msgs: |
1757 |
self.mcpu.assertLogContainsRegex(expected_msg)
|
1758 |
|
1759 |
|
1760 |
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths( |
1761 |
TestLUClusterVerifyGroupMethods): |
1762 |
@withLockedLU
|
1763 |
def testNotMaster(self, lu): |
1764 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False) |
1765 |
self.mcpu.assertLogIsEmpty()
|
1766 |
|
1767 |
@withLockedLU
|
1768 |
def testNotMasterButRetunedValue(self, lu): |
1769 |
lu._VerifyAcceptedFileStoragePaths( |
1770 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False) |
1771 |
self.mcpu.assertLogContainsRegex(
|
1772 |
"Node should not have returned forbidden file storage paths")
|
1773 |
|
1774 |
@withLockedLU
|
1775 |
def testMasterInvalidNodeResult(self, lu): |
1776 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True) |
1777 |
self.mcpu.assertLogContainsRegex(
|
1778 |
"Node did not return forbidden file storage paths")
|
1779 |
|
1780 |
@withLockedLU
|
1781 |
def testMasterForbiddenPaths(self, lu): |
1782 |
lu._VerifyAcceptedFileStoragePaths( |
1783 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True) |
1784 |
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths") |
1785 |
|
1786 |
@withLockedLU
|
1787 |
def testMasterSuccess(self, lu): |
1788 |
lu._VerifyAcceptedFileStoragePaths( |
1789 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True) |
1790 |
self.mcpu.assertLogIsEmpty()
|
1791 |
|
1792 |
|
1793 |
class TestLUClusterVerifyGroupVerifyStoragePaths( |
1794 |
TestLUClusterVerifyGroupMethods): |
1795 |
@withLockedLU
|
1796 |
def testVerifyFileStoragePathsSuccess(self, lu): |
1797 |
lu._VerifyFileStoragePaths(self.master, {})
|
1798 |
self.mcpu.assertLogIsEmpty()
|
1799 |
|
1800 |
@withLockedLU
|
1801 |
def testVerifyFileStoragePathsFailure(self, lu): |
1802 |
lu._VerifyFileStoragePaths(self.master,
|
1803 |
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
|
1804 |
self.mcpu.assertLogContainsRegex(
|
1805 |
"The configured file storage path is unusable")
|
1806 |
|
1807 |
@withLockedLU
|
1808 |
def testVerifySharedFileStoragePathsSuccess(self, lu): |
1809 |
lu._VerifySharedFileStoragePaths(self.master, {})
|
1810 |
self.mcpu.assertLogIsEmpty()
|
1811 |
|
1812 |
@withLockedLU
|
1813 |
def testVerifySharedFileStoragePathsFailure(self, lu): |
1814 |
lu._VerifySharedFileStoragePaths( |
1815 |
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"}) |
1816 |
self.mcpu.assertLogContainsRegex(
|
1817 |
"The configured sharedfile storage path is unusable")
|
1818 |
|
1819 |
|
1820 |
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods): |
1821 |
@withLockedLU
|
1822 |
def testEmptyResult(self, lu): |
1823 |
lu._VerifyOob(self.master, {})
|
1824 |
self.mcpu.assertLogIsEmpty()
|
1825 |
|
1826 |
@withLockedLU
|
1827 |
def testErrorResults(self, lu): |
1828 |
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]}) |
1829 |
self.mcpu.assertLogContainsRegex("path1") |
1830 |
self.mcpu.assertLogContainsRegex("path2") |
1831 |
|
1832 |
|
1833 |
class TestLUClusterVerifyGroupUpdateNodeVolumes( |
1834 |
TestLUClusterVerifyGroupMethods): |
1835 |
def setUp(self): |
1836 |
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp() |
1837 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1838 |
|
1839 |
@withLockedLU
|
1840 |
def testNoVgName(self, lu): |
1841 |
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None) |
1842 |
self.mcpu.assertLogIsEmpty()
|
1843 |
self.assertTrue(self.nimg.lvm_fail) |
1844 |
|
1845 |
@withLockedLU
|
1846 |
def testErrorMessage(self, lu): |
1847 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"}, |
1848 |
self.nimg, "mock_vg") |
1849 |
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error") |
1850 |
self.assertTrue(self.nimg.lvm_fail) |
1851 |
|
1852 |
@withLockedLU
|
1853 |
def testInvalidNodeResult(self, lu): |
1854 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]}, |
1855 |
self.nimg, "mock_vg") |
1856 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1857 |
self.assertTrue(self.nimg.lvm_fail) |
1858 |
|
1859 |
@withLockedLU
|
1860 |
def testValidNodeResult(self, lu): |
1861 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
|
1862 |
self.nimg, "mock_vg") |
1863 |
self.mcpu.assertLogIsEmpty()
|
1864 |
self.assertFalse(self.nimg.lvm_fail) |
1865 |
|
1866 |
|
1867 |
class TestLUClusterVerifyGroupUpdateNodeInstances( |
1868 |
TestLUClusterVerifyGroupMethods): |
1869 |
def setUp(self): |
1870 |
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp() |
1871 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1872 |
|
1873 |
@withLockedLU
|
1874 |
def testInvalidNodeResult(self, lu): |
1875 |
lu._UpdateNodeInstances(self.master, {}, self.nimg) |
1876 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1877 |
|
1878 |
@withLockedLU
|
1879 |
def testValidNodeResult(self, lu): |
1880 |
inst = self.cfg.AddNewInstance()
|
1881 |
lu._UpdateNodeInstances(self.master,
|
1882 |
{constants.NV_INSTANCELIST: [inst.name]}, |
1883 |
self.nimg)
|
1884 |
self.mcpu.assertLogIsEmpty()
|
1885 |
|
1886 |
|
1887 |
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods): |
1888 |
def setUp(self): |
1889 |
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp() |
1890 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1891 |
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}} |
1892 |
|
1893 |
@withLockedLU
|
1894 |
def testInvalidHvNodeResult(self, lu): |
1895 |
for ndata in [{}, {constants.NV_HVINFO: ""}]: |
1896 |
self.mcpu.ClearLogMessages()
|
1897 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None) |
1898 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1899 |
|
1900 |
@withLockedLU
|
1901 |
def testInvalidMemoryFreeHvNodeResult(self, lu): |
1902 |
lu._UpdateNodeInfo(self.master,
|
1903 |
{constants.NV_HVINFO: {"memory_free": "abc"}}, |
1904 |
self.nimg, None) |
1905 |
self.mcpu.assertLogContainsRegex(
|
1906 |
"node returned invalid nodeinfo, check hypervisor")
|
1907 |
|
1908 |
@withLockedLU
|
1909 |
def testValidHvNodeResult(self, lu): |
1910 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None) |
1911 |
self.mcpu.assertLogIsEmpty()
|
1912 |
|
1913 |
@withLockedLU
|
1914 |
def testInvalidVgNodeResult(self, lu): |
1915 |
for vgdata in [[], ""]: |
1916 |
self.mcpu.ClearLogMessages()
|
1917 |
ndata = {constants.NV_VGLIST: vgdata} |
1918 |
ndata.update(self.valid_hvresult)
|
1919 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg") |
1920 |
self.mcpu.assertLogContainsRegex(
|
1921 |
"node didn't return data for the volume group 'mock_vg'")
|
1922 |
|
1923 |
@withLockedLU
|
1924 |
def testInvalidDiskFreeVgNodeResult(self, lu): |
1925 |
self.valid_hvresult.update({
|
1926 |
constants.NV_VGLIST: {"mock_vg": "abc"} |
1927 |
}) |
1928 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1929 |
self.mcpu.assertLogContainsRegex(
|
1930 |
"node returned invalid LVM info, check LVM status")
|
1931 |
|
1932 |
@withLockedLU
|
1933 |
def testValidVgNodeResult(self, lu): |
1934 |
self.valid_hvresult.update({
|
1935 |
constants.NV_VGLIST: {"mock_vg": 10000} |
1936 |
}) |
1937 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1938 |
self.mcpu.assertLogIsEmpty()
|
1939 |
|
1940 |
|
1941 |
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods): |
1942 |
def setUp(self): |
1943 |
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp() |
1944 |
|
1945 |
self.node1 = self.cfg.AddNewNode() |
1946 |
self.node2 = self.cfg.AddNewNode() |
1947 |
self.node3 = self.cfg.AddNewNode() |
1948 |
|
1949 |
self.diskless_inst = \
|
1950 |
self.cfg.AddNewInstance(primary_node=self.node1, |
1951 |
disk_template=constants.DT_DISKLESS) |
1952 |
self.plain_inst = \
|
1953 |
self.cfg.AddNewInstance(primary_node=self.node2, |
1954 |
disk_template=constants.DT_PLAIN) |
1955 |
self.drbd_inst = \
|
1956 |
self.cfg.AddNewInstance(primary_node=self.node3, |
1957 |
secondary_node=self.node2,
|
1958 |
disk_template=constants.DT_DRBD8) |
1959 |
|
1960 |
self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1961 |
uuid=self.node1.uuid)
|
1962 |
self.node1_img.pinst = [self.diskless_inst.uuid] |
1963 |
self.node1_img.sinst = []
|
1964 |
self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1965 |
uuid=self.node2.uuid)
|
1966 |
self.node2_img.pinst = [self.plain_inst.uuid] |
1967 |
self.node2_img.sinst = [self.drbd_inst.uuid] |
1968 |
self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1969 |
uuid=self.node3.uuid)
|
1970 |
self.node3_img.pinst = [self.drbd_inst.uuid] |
1971 |
self.node3_img.sinst = []
|
1972 |
|
1973 |
self.node_images = {
|
1974 |
self.node1.uuid: self.node1_img, |
1975 |
self.node2.uuid: self.node2_img, |
1976 |
self.node3.uuid: self.node3_img |
1977 |
} |
1978 |
|
1979 |
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid] |
1980 |
|
1981 |
@withLockedLU
|
1982 |
def testSuccessfulRun(self, lu): |
1983 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1984 |
RpcResultsBuilder() \ |
1985 |
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \ |
1986 |
.AddSuccessfulNode(self.node3, [(True, "")]) \ |
1987 |
.Build() |
1988 |
|
1989 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
1990 |
self.cfg.GetAllInstancesInfo())
|
1991 |
|
1992 |
self.mcpu.assertLogIsEmpty()
|
1993 |
|
1994 |
@withLockedLU
|
1995 |
def testOfflineAndFailingNodes(self, lu): |
1996 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1997 |
RpcResultsBuilder() \ |
1998 |
.AddOfflineNode(self.node2) \
|
1999 |
.AddFailedNode(self.node3) \
|
2000 |
.Build() |
2001 |
|
2002 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2003 |
self.cfg.GetAllInstancesInfo())
|
2004 |
|
2005 |
self.mcpu.assertLogContainsRegex("while getting disk information") |
2006 |
|
2007 |
@withLockedLU
|
2008 |
def testInvalidNodeResult(self, lu): |
2009 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2010 |
RpcResultsBuilder() \ |
2011 |
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \ |
2012 |
.AddSuccessfulNode(self.node3, [""]) \ |
2013 |
.Build() |
2014 |
|
2015 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2016 |
self.cfg.GetAllInstancesInfo())
|
2017 |
# logging is not performed through mcpu
|
2018 |
self.mcpu.assertLogIsEmpty()
|
2019 |
|
2020 |
|
2021 |
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods): |
2022 |
def setUp(self): |
2023 |
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp() |
2024 |
|
2025 |
self.feedback_fn = lambda _: None |
2026 |
|
2027 |
def PrepareLU(self, lu): |
2028 |
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu) |
2029 |
|
2030 |
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys()) |
2031 |
|
2032 |
@withLockedLU
|
2033 |
def testEmptyGroup(self, lu): |
2034 |
lu.my_node_uuids = [] |
2035 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None) |
2036 |
|
2037 |
@withLockedLU
|
2038 |
def testFailedResult(self, lu): |
2039 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2040 |
RpcResultsBuilder(use_node_names=True)
|
2041 |
.AddFailedNode(self.master).Build(),
|
2042 |
self.feedback_fn,
|
2043 |
None)
|
2044 |
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution") |
2045 |
|
2046 |
@withLockedLU
|
2047 |
def testOfflineNode(self, lu): |
2048 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2049 |
RpcResultsBuilder(use_node_names=True)
|
2050 |
.AddOfflineNode(self.master).Build(),
|
2051 |
self.feedback_fn,
|
2052 |
None)
|
2053 |
|
2054 |
@withLockedLU
|
2055 |
def testValidResult(self, lu): |
2056 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2057 |
RpcResultsBuilder(use_node_names=True)
|
2058 |
.AddSuccessfulNode(self.master,
|
2059 |
[("mock_script",
|
2060 |
constants.HKR_SUCCESS, |
2061 |
"mock output")])
|
2062 |
.Build(), |
2063 |
self.feedback_fn,
|
2064 |
None)
|
2065 |
|
2066 |
@withLockedLU
|
2067 |
def testFailedScriptResult(self, lu): |
2068 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2069 |
RpcResultsBuilder(use_node_names=True)
|
2070 |
.AddSuccessfulNode(self.master,
|
2071 |
[("mock_script",
|
2072 |
constants.HKR_FAIL, |
2073 |
"mock output")])
|
2074 |
.Build(), |
2075 |
self.feedback_fn,
|
2076 |
None)
|
2077 |
self.mcpu.assertLogContainsRegex("Script mock_script failed") |
2078 |
|
2079 |
|
2080 |
class TestLUClusterVerifyDisks(CmdlibTestCase): |
2081 |
def testVerifyDisks(self): |
2082 |
op = opcodes.OpClusterVerifyDisks() |
2083 |
result = self.ExecOpCode(op)
|
2084 |
|
2085 |
self.assertEqual(1, len(result["jobs"])) |
2086 |
|
2087 |
|
2088 |
if __name__ == "__main__": |
2089 |
testutils.GanetiTestProgram() |