root / test / py / cmdlib / cluster_unittest.py @ 11414807
History | View | Annotate | Download (73 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import unittest |
29 |
import operator |
30 |
import os |
31 |
import tempfile |
32 |
import shutil |
33 |
|
34 |
from ganeti import constants |
35 |
from ganeti import errors |
36 |
from ganeti import netutils |
37 |
from ganeti import objects |
38 |
from ganeti import opcodes |
39 |
from ganeti import utils |
40 |
from ganeti import pathutils |
41 |
from ganeti import query |
42 |
from ganeti.cmdlib import cluster |
43 |
from ganeti.hypervisor import hv_xen |
44 |
|
45 |
from testsupport import * |
46 |
|
47 |
import testutils |
48 |
|
49 |
|
50 |
class TestCertVerification(testutils.GanetiTestCase): |
51 |
def setUp(self): |
52 |
testutils.GanetiTestCase.setUp(self)
|
53 |
|
54 |
self.tmpdir = tempfile.mkdtemp()
|
55 |
|
56 |
def tearDown(self): |
57 |
shutil.rmtree(self.tmpdir)
|
58 |
|
59 |
def testVerifyCertificate(self): |
60 |
cluster._VerifyCertificate(testutils.TestDataFilename("cert1.pem"))
|
61 |
|
62 |
nonexist_filename = os.path.join(self.tmpdir, "does-not-exist") |
63 |
|
64 |
(errcode, msg) = cluster._VerifyCertificate(nonexist_filename) |
65 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
66 |
|
67 |
# Try to load non-certificate file
|
68 |
invalid_cert = testutils.TestDataFilename("bdev-net.txt")
|
69 |
(errcode, msg) = cluster._VerifyCertificate(invalid_cert) |
70 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
71 |
|
72 |
|
73 |
class TestClusterVerifySsh(unittest.TestCase): |
74 |
def testMultipleGroups(self): |
75 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
76 |
mygroupnodes = [ |
77 |
objects.Node(name="node20", group="my", offline=False), |
78 |
objects.Node(name="node21", group="my", offline=False), |
79 |
objects.Node(name="node22", group="my", offline=False), |
80 |
objects.Node(name="node23", group="my", offline=False), |
81 |
objects.Node(name="node24", group="my", offline=False), |
82 |
objects.Node(name="node25", group="my", offline=False), |
83 |
objects.Node(name="node26", group="my", offline=True), |
84 |
] |
85 |
nodes = [ |
86 |
objects.Node(name="node1", group="g1", offline=True), |
87 |
objects.Node(name="node2", group="g1", offline=False), |
88 |
objects.Node(name="node3", group="g1", offline=False), |
89 |
objects.Node(name="node4", group="g1", offline=True), |
90 |
objects.Node(name="node5", group="g1", offline=False), |
91 |
objects.Node(name="node10", group="xyz", offline=False), |
92 |
objects.Node(name="node11", group="xyz", offline=False), |
93 |
objects.Node(name="node40", group="alloff", offline=True), |
94 |
objects.Node(name="node41", group="alloff", offline=True), |
95 |
objects.Node(name="node50", group="aaa", offline=False), |
96 |
] + mygroupnodes |
97 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
98 |
|
99 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
100 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
101 |
self.assertEqual(set(perhost.keys()), set(online)) |
102 |
|
103 |
self.assertEqual(perhost, {
|
104 |
"node20": ["node10", "node2", "node50"], |
105 |
"node21": ["node11", "node3", "node50"], |
106 |
"node22": ["node10", "node5", "node50"], |
107 |
"node23": ["node11", "node2", "node50"], |
108 |
"node24": ["node10", "node3", "node50"], |
109 |
"node25": ["node11", "node5", "node50"], |
110 |
}) |
111 |
|
112 |
def testSingleGroup(self): |
113 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
114 |
nodes = [ |
115 |
objects.Node(name="node1", group="default", offline=True), |
116 |
objects.Node(name="node2", group="default", offline=False), |
117 |
objects.Node(name="node3", group="default", offline=False), |
118 |
objects.Node(name="node4", group="default", offline=True), |
119 |
] |
120 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
121 |
|
122 |
(online, perhost) = fn(nodes, "default", nodes)
|
123 |
self.assertEqual(online, ["node2", "node3"]) |
124 |
self.assertEqual(set(perhost.keys()), set(online)) |
125 |
|
126 |
self.assertEqual(perhost, {
|
127 |
"node2": [],
|
128 |
"node3": [],
|
129 |
}) |
130 |
|
131 |
|
132 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
133 |
def testSuccess(self): |
134 |
op = opcodes.OpClusterActivateMasterIp() |
135 |
|
136 |
self.rpc.call_node_activate_master_ip.return_value = \
|
137 |
self.RpcResultsBuilder() \
|
138 |
.CreateSuccessfulNodeResult(self.master)
|
139 |
|
140 |
self.ExecOpCode(op)
|
141 |
|
142 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
143 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
144 |
|
145 |
def testFailure(self): |
146 |
op = opcodes.OpClusterActivateMasterIp() |
147 |
|
148 |
self.rpc.call_node_activate_master_ip.return_value = \
|
149 |
self.RpcResultsBuilder() \
|
150 |
.CreateFailedNodeResult(self.master) \
|
151 |
|
152 |
self.ExecOpCodeExpectOpExecError(op)
|
153 |
|
154 |
|
155 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
156 |
def testSuccess(self): |
157 |
op = opcodes.OpClusterDeactivateMasterIp() |
158 |
|
159 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
160 |
self.RpcResultsBuilder() \
|
161 |
.CreateSuccessfulNodeResult(self.master)
|
162 |
|
163 |
self.ExecOpCode(op)
|
164 |
|
165 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
166 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
167 |
|
168 |
def testFailure(self): |
169 |
op = opcodes.OpClusterDeactivateMasterIp() |
170 |
|
171 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
172 |
self.RpcResultsBuilder() \
|
173 |
.CreateFailedNodeResult(self.master) \
|
174 |
|
175 |
self.ExecOpCodeExpectOpExecError(op)
|
176 |
|
177 |
|
178 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
179 |
def testInvalidField(self): |
180 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
181 |
|
182 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
183 |
|
184 |
def testAllFields(self): |
185 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
186 |
|
187 |
self.rpc.call_get_watcher_pause.return_value = \
|
188 |
self.RpcResultsBuilder() \
|
189 |
.CreateSuccessfulNodeResult(self.master, -1) |
190 |
|
191 |
ret = self.ExecOpCode(op)
|
192 |
|
193 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
194 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
195 |
|
196 |
def testEmpytFields(self): |
197 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
198 |
|
199 |
self.ExecOpCode(op)
|
200 |
|
201 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
202 |
|
203 |
|
204 |
class TestLUClusterDestroy(CmdlibTestCase): |
205 |
def testExistingNodes(self): |
206 |
op = opcodes.OpClusterDestroy() |
207 |
|
208 |
self.cfg.AddNewNode()
|
209 |
self.cfg.AddNewNode()
|
210 |
|
211 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
212 |
|
213 |
def testExistingInstances(self): |
214 |
op = opcodes.OpClusterDestroy() |
215 |
|
216 |
self.cfg.AddNewInstance()
|
217 |
self.cfg.AddNewInstance()
|
218 |
|
219 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
220 |
|
221 |
def testEmptyCluster(self): |
222 |
op = opcodes.OpClusterDestroy() |
223 |
|
224 |
self.ExecOpCode(op)
|
225 |
|
226 |
self.assertSingleHooksCall([self.master.name], |
227 |
"cluster-destroy",
|
228 |
constants.HOOKS_PHASE_POST) |
229 |
|
230 |
|
231 |
class TestLUClusterPostInit(CmdlibTestCase): |
232 |
def testExecuion(self): |
233 |
op = opcodes.OpClusterPostInit() |
234 |
|
235 |
self.ExecOpCode(op)
|
236 |
|
237 |
self.assertSingleHooksCall([self.master.name], |
238 |
"cluster-init",
|
239 |
constants.HOOKS_PHASE_POST) |
240 |
|
241 |
|
242 |
class TestLUClusterQuery(CmdlibTestCase): |
243 |
def testSimpleInvocation(self): |
244 |
op = opcodes.OpClusterQuery() |
245 |
|
246 |
self.ExecOpCode(op)
|
247 |
|
248 |
def testIPv6Cluster(self): |
249 |
op = opcodes.OpClusterQuery() |
250 |
|
251 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
252 |
|
253 |
self.ExecOpCode(op)
|
254 |
|
255 |
|
256 |
class TestLUClusterRedistConf(CmdlibTestCase): |
257 |
def testSimpleInvocation(self): |
258 |
op = opcodes.OpClusterRedistConf() |
259 |
|
260 |
self.ExecOpCode(op)
|
261 |
|
262 |
|
263 |
class TestLUClusterRename(CmdlibTestCase): |
264 |
NEW_NAME = "new-name.example.com"
|
265 |
NEW_IP = "203.0.113.1"
|
266 |
|
267 |
def testNoChanges(self): |
268 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
269 |
|
270 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
271 |
|
272 |
def testReachableIp(self): |
273 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
274 |
|
275 |
self.netutils_mod.GetHostname.return_value = \
|
276 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
277 |
self.netutils_mod.TcpPing.return_value = True |
278 |
|
279 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
280 |
|
281 |
def testValidRename(self): |
282 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
283 |
|
284 |
self.netutils_mod.GetHostname.return_value = \
|
285 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
286 |
|
287 |
self.ExecOpCode(op)
|
288 |
|
289 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
290 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
291 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
292 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
293 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
294 |
|
295 |
def testRenameOfflineMaster(self): |
296 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
297 |
|
298 |
self.master.offline = True |
299 |
self.netutils_mod.GetHostname.return_value = \
|
300 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
301 |
|
302 |
self.ExecOpCode(op)
|
303 |
|
304 |
|
305 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
306 |
def testNoInstances(self): |
307 |
op = opcodes.OpClusterRepairDiskSizes() |
308 |
|
309 |
self.ExecOpCode(op)
|
310 |
|
311 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN): |
312 |
pnode = self.master
|
313 |
snode = self.cfg.AddNewNode()
|
314 |
|
315 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
316 |
primary_node=pnode, |
317 |
secondary_node=snode) |
318 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
319 |
|
320 |
return (inst, disk)
|
321 |
|
322 |
def testSingleInstanceOnFailingNode(self): |
323 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
324 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
325 |
|
326 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
327 |
self.RpcResultsBuilder() \
|
328 |
.CreateFailedNodeResult(self.master)
|
329 |
|
330 |
self.ExecOpCode(op)
|
331 |
|
332 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
333 |
|
334 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
335 |
# not specifying instances repairs all
|
336 |
op = opcodes.OpClusterRepairDiskSizes() |
337 |
|
338 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
339 |
self.RpcResultsBuilder() \
|
340 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
341 |
|
342 |
return self.ExecOpCode(op) |
343 |
|
344 |
def testInvalidResultData(self): |
345 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
346 |
self.ResetMocks()
|
347 |
|
348 |
self._SetUpInstanceSingleDisk()
|
349 |
self._ExecOpClusterRepairDiskSizes(data)
|
350 |
|
351 |
self.mcpu.assertLogContainsRegex("ignoring") |
352 |
|
353 |
def testCorrectSize(self): |
354 |
self._SetUpInstanceSingleDisk()
|
355 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
356 |
self.mcpu.assertLogIsEmpty()
|
357 |
self.assertEqual(0, len(changed)) |
358 |
|
359 |
def testWrongSize(self): |
360 |
self._SetUpInstanceSingleDisk()
|
361 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
362 |
self.assertEqual(1, len(changed)) |
363 |
|
364 |
def testCorrectDRBD(self): |
365 |
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
366 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
367 |
self.mcpu.assertLogIsEmpty()
|
368 |
self.assertEqual(0, len(changed)) |
369 |
|
370 |
def testWrongDRBDChild(self): |
371 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
372 |
disk.children[0].size = 512 |
373 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
374 |
self.assertEqual(1, len(changed)) |
375 |
|
376 |
def testExclusiveStorageInvalidResultData(self): |
377 |
self._SetUpInstanceSingleDisk()
|
378 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
379 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
380 |
|
381 |
self.mcpu.assertLogContainsRegex(
|
382 |
"did not return valid spindles information")
|
383 |
|
384 |
def testExclusiveStorageCorrectSpindles(self): |
385 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
386 |
disk.spindles = 1
|
387 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
388 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
389 |
self.assertEqual(0, len(changed)) |
390 |
|
391 |
def testExclusiveStorageWrongSpindles(self): |
392 |
self._SetUpInstanceSingleDisk()
|
393 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
394 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
395 |
self.assertEqual(1, len(changed)) |
396 |
|
397 |
|
398 |
class TestLUClusterSetParams(CmdlibTestCase): |
399 |
UID_POOL = [(10, 1000)] |
400 |
|
401 |
def testUidPool(self): |
402 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
403 |
self.ExecOpCode(op)
|
404 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
405 |
|
406 |
def testAddUids(self): |
407 |
old_pool = [(1, 9)] |
408 |
self.cluster.uid_pool = list(old_pool) |
409 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
410 |
self.ExecOpCode(op)
|
411 |
self.assertEqual(set(self.UID_POOL + old_pool), |
412 |
set(self.cluster.uid_pool)) |
413 |
|
414 |
def testRemoveUids(self): |
415 |
additional_pool = [(1, 9)] |
416 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
417 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
418 |
self.ExecOpCode(op)
|
419 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
420 |
|
421 |
def testMasterNetmask(self): |
422 |
op = opcodes.OpClusterSetParams(master_netmask=26)
|
423 |
self.ExecOpCode(op)
|
424 |
self.assertEqual(26, self.cluster.master_netmask) |
425 |
|
426 |
def testInvalidDiskparams(self): |
427 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
428 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}}]:
|
429 |
self.ResetMocks()
|
430 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
431 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
432 |
|
433 |
def testValidDiskparams(self): |
434 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
435 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
436 |
self.ExecOpCode(op)
|
437 |
self.assertEqual(diskparams[constants.DT_RBD],
|
438 |
self.cluster.diskparams[constants.DT_RBD])
|
439 |
|
440 |
def testMinimalDiskparams(self): |
441 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
442 |
self.cluster.diskparams = {}
|
443 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
444 |
self.ExecOpCode(op)
|
445 |
self.assertEqual(diskparams, self.cluster.diskparams) |
446 |
|
447 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
448 |
self.cfg.AddNewInstance(disks=[
|
449 |
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)]) |
450 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
451 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
452 |
|
453 |
def testFileStorageDir(self): |
454 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
455 |
self.ExecOpCode(op)
|
456 |
|
457 |
def testSetFileStorageDirToCurrentValue(self): |
458 |
op = opcodes.OpClusterSetParams( |
459 |
file_storage_dir=self.cluster.file_storage_dir)
|
460 |
self.ExecOpCode(op)
|
461 |
|
462 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
463 |
|
464 |
def testValidDrbdHelper(self): |
465 |
node1 = self.cfg.AddNewNode()
|
466 |
node1.offline = True
|
467 |
self.rpc.call_drbd_helper.return_value = \
|
468 |
self.RpcResultsBuilder() \
|
469 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
470 |
.AddOfflineNode(node1) \ |
471 |
.Build() |
472 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
473 |
self.ExecOpCode(op)
|
474 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
475 |
|
476 |
def testDrbdHelperFailingNode(self): |
477 |
self.rpc.call_drbd_helper.return_value = \
|
478 |
self.RpcResultsBuilder() \
|
479 |
.AddFailedNode(self.master) \
|
480 |
.Build() |
481 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
482 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
483 |
|
484 |
def testInvalidDrbdHelper(self): |
485 |
self.rpc.call_drbd_helper.return_value = \
|
486 |
self.RpcResultsBuilder() \
|
487 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
488 |
.Build() |
489 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
490 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
491 |
|
492 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
493 |
drbd_helper = "/bin/random_helper"
|
494 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
495 |
self.rpc.call_drbd_helper.return_value = \
|
496 |
self.RpcResultsBuilder() \
|
497 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
498 |
.Build() |
499 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
500 |
self.ExecOpCode(op)
|
501 |
|
502 |
self.mcpu.assertLogContainsRegex("but did not enable") |
503 |
|
504 |
def testResetDrbdHelperDrbdDisabled(self): |
505 |
drbd_helper = ""
|
506 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
507 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
508 |
self.ExecOpCode(op)
|
509 |
|
510 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
511 |
|
512 |
def testResetDrbdHelperDrbdEnabled(self): |
513 |
drbd_helper = ""
|
514 |
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
|
515 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
516 |
self.ExecOpCodeExpectOpPrereqError(
|
517 |
op, "Cannot disable drbd helper while DRBD is enabled.")
|
518 |
|
519 |
def testEnableDrbdNoHelper(self): |
520 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
521 |
self.cluster.drbd_usermode_helper = None |
522 |
enabled_disk_templates = [constants.DT_DRBD8] |
523 |
op = opcodes.OpClusterSetParams( |
524 |
enabled_disk_templates=enabled_disk_templates) |
525 |
self.ExecOpCodeExpectOpPrereqError(
|
526 |
op, "Cannot enable DRBD without a DRBD usermode helper set")
|
527 |
|
528 |
def testEnableDrbdHelperSet(self): |
529 |
drbd_helper = "/bin/random_helper"
|
530 |
self.rpc.call_drbd_helper.return_value = \
|
531 |
self.RpcResultsBuilder() \
|
532 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
533 |
.Build() |
534 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
535 |
self.cluster.drbd_usermode_helper = drbd_helper
|
536 |
enabled_disk_templates = [constants.DT_DRBD8] |
537 |
op = opcodes.OpClusterSetParams( |
538 |
enabled_disk_templates=enabled_disk_templates, |
539 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
540 |
self.ExecOpCode(op)
|
541 |
|
542 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
543 |
|
544 |
def testDrbdHelperAlreadySet(self): |
545 |
drbd_helper = "/bin/true"
|
546 |
self.rpc.call_drbd_helper.return_value = \
|
547 |
self.RpcResultsBuilder() \
|
548 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
549 |
.Build() |
550 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
551 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
552 |
self.ExecOpCode(op)
|
553 |
|
554 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
555 |
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state") |
556 |
|
557 |
def testSetDrbdHelper(self): |
558 |
drbd_helper = "/bin/true"
|
559 |
self.rpc.call_drbd_helper.return_value = \
|
560 |
self.RpcResultsBuilder() \
|
561 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
562 |
.Build() |
563 |
self.cluster.drbd_usermode_helper = "/bin/false" |
564 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
|
565 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
566 |
self.ExecOpCode(op)
|
567 |
|
568 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
569 |
|
570 |
def testBeparams(self): |
571 |
beparams = {constants.BE_VCPUS: 32}
|
572 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
573 |
self.ExecOpCode(op)
|
574 |
self.assertEqual(32, self.cluster |
575 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
576 |
|
577 |
def testNdparams(self): |
578 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
579 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
580 |
self.ExecOpCode(op)
|
581 |
self.assertEqual(True, self.cluster |
582 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
583 |
|
584 |
def testNdparamsResetOobProgram(self): |
585 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
586 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
587 |
self.ExecOpCode(op)
|
588 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
589 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
590 |
|
591 |
def testHvState(self): |
592 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
593 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
594 |
self.ExecOpCode(op)
|
595 |
self.assertEqual(8, self.cluster.hv_state_static |
596 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
597 |
|
598 |
def testDiskState(self): |
599 |
disk_state = { |
600 |
constants.DT_PLAIN: { |
601 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
602 |
} |
603 |
} |
604 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
605 |
self.ExecOpCode(op)
|
606 |
self.assertEqual(10, self.cluster |
607 |
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
|
608 |
[constants.DS_DISK_TOTAL]) |
609 |
|
610 |
def testDefaultIPolicy(self): |
611 |
ipolicy = constants.IPOLICY_DEFAULTS |
612 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
613 |
self.ExecOpCode(op)
|
614 |
|
615 |
def testIPolicyNewViolation(self): |
616 |
import ganeti.constants as C |
617 |
ipolicy = C.IPOLICY_DEFAULTS |
618 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
619 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
620 |
|
621 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
622 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
623 |
self.ExecOpCode(op)
|
624 |
|
625 |
self.mcpu.assertLogContainsRegex("instances violate them") |
626 |
|
627 |
def testNicparamsNoInstance(self): |
628 |
nicparams = { |
629 |
constants.NIC_LINK: "mock_bridge"
|
630 |
} |
631 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
632 |
self.ExecOpCode(op)
|
633 |
|
634 |
self.assertEqual("mock_bridge", |
635 |
self.cluster.nicparams
|
636 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
637 |
|
638 |
def testNicparamsInvalidConf(self): |
639 |
nicparams = { |
640 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
641 |
constants.NIC_LINK: ""
|
642 |
} |
643 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
644 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
645 |
|
646 |
def testNicparamsInvalidInstanceConf(self): |
647 |
nicparams = { |
648 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
649 |
constants.NIC_LINK: "mock_bridge"
|
650 |
} |
651 |
self.cfg.AddNewInstance(nics=[
|
652 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
653 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
654 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
655 |
|
656 |
def testNicparamsMissingIp(self): |
657 |
nicparams = { |
658 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
659 |
} |
660 |
self.cfg.AddNewInstance()
|
661 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
662 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
663 |
|
664 |
def testNicparamsWithInstance(self): |
665 |
nicparams = { |
666 |
constants.NIC_LINK: "mock_bridge"
|
667 |
} |
668 |
self.cfg.AddNewInstance()
|
669 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
670 |
self.ExecOpCode(op)
|
671 |
|
672 |
def testDefaultHvparams(self): |
673 |
hvparams = constants.HVC_DEFAULTS |
674 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
675 |
self.ExecOpCode(op)
|
676 |
|
677 |
self.assertEqual(hvparams, self.cluster.hvparams) |
678 |
|
679 |
def testMinimalHvparams(self): |
680 |
hvparams = { |
681 |
constants.HT_FAKE: { |
682 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
683 |
} |
684 |
} |
685 |
self.cluster.hvparams = {}
|
686 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
687 |
self.ExecOpCode(op)
|
688 |
|
689 |
self.assertEqual(hvparams, self.cluster.hvparams) |
690 |
|
691 |
def testOsHvp(self): |
692 |
os_hvp = { |
693 |
"mocked_os": {
|
694 |
constants.HT_FAKE: { |
695 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
696 |
} |
697 |
}, |
698 |
"other_os": constants.HVC_DEFAULTS
|
699 |
} |
700 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
701 |
self.ExecOpCode(op)
|
702 |
|
703 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
704 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
705 |
[constants.HV_MIGRATION_MODE]) |
706 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
707 |
|
708 |
def testRemoveOsHvp(self): |
709 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
710 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
711 |
self.ExecOpCode(op)
|
712 |
|
713 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
714 |
|
715 |
def testDefaultOsHvp(self): |
716 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
717 |
self.cluster.os_hvp = {"mocked_os": {}} |
718 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
719 |
self.ExecOpCode(op)
|
720 |
|
721 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
722 |
|
723 |
def testOsparams(self): |
724 |
osparams = { |
725 |
"mocked_os": {
|
726 |
"param1": "value1", |
727 |
"param2": None |
728 |
}, |
729 |
"other_os": {
|
730 |
"param1": None |
731 |
} |
732 |
} |
733 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
734 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
735 |
self.ExecOpCode(op)
|
736 |
|
737 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
738 |
|
739 |
def testEnabledHypervisors(self): |
740 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
741 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
742 |
self.ExecOpCode(op)
|
743 |
|
744 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
745 |
|
746 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
747 |
enabled_hypervisors = [constants.HT_FAKE] |
748 |
self.cluster.hvparams = {}
|
749 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
750 |
self.ExecOpCode(op)
|
751 |
|
752 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
753 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
754 |
self.cluster.hvparams[constants.HT_FAKE])
|
755 |
|
756 |
@testutils.patch_object(utils, "FindFile") |
757 |
def testValidDefaultIallocator(self, find_file_mock): |
758 |
find_file_mock.return_value = "/random/path"
|
759 |
default_iallocator = "/random/path"
|
760 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
761 |
self.ExecOpCode(op)
|
762 |
|
763 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
764 |
|
765 |
@testutils.patch_object(utils, "FindFile") |
766 |
def testInvalidDefaultIallocator(self, find_file_mock): |
767 |
find_file_mock.return_value = None
|
768 |
default_iallocator = "/random/path"
|
769 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
770 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
771 |
|
772 |
def testEnabledDiskTemplates(self): |
773 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
774 |
op = opcodes.OpClusterSetParams( |
775 |
enabled_disk_templates=enabled_disk_templates, |
776 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
777 |
self.ExecOpCode(op)
|
778 |
|
779 |
self.assertEqual(enabled_disk_templates,
|
780 |
self.cluster.enabled_disk_templates)
|
781 |
|
782 |
def testEnabledDiskTemplatesWithoutVgName(self): |
783 |
enabled_disk_templates = [constants.DT_PLAIN] |
784 |
self.cluster.volume_group_name = None |
785 |
op = opcodes.OpClusterSetParams( |
786 |
enabled_disk_templates=enabled_disk_templates) |
787 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
788 |
|
789 |
def testDisableDiskTemplateWithExistingInstance(self): |
790 |
enabled_disk_templates = [constants.DT_DISKLESS] |
791 |
self.cfg.AddNewInstance(
|
792 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
793 |
op = opcodes.OpClusterSetParams( |
794 |
enabled_disk_templates=enabled_disk_templates, |
795 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
796 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
797 |
|
798 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
799 |
vg_name = "test_vg"
|
800 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
801 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
802 |
self.ExecOpCode(op)
|
803 |
|
804 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
805 |
self.mcpu.assertLogIsEmpty()
|
806 |
|
807 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
808 |
vg_name = ""
|
809 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
810 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
811 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
812 |
|
813 |
def testUnsetVgNameWithLvmInstance(self): |
814 |
vg_name = ""
|
815 |
self.cfg.AddNewInstance(
|
816 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
817 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
818 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
819 |
|
820 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
821 |
vg_name = ""
|
822 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
823 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
824 |
self.ExecOpCode(op)
|
825 |
|
826 |
self.assertEqual(None, self.cluster.volume_group_name) |
827 |
|
828 |
def testVgNameToOldName(self): |
829 |
vg_name = self.cluster.volume_group_name
|
830 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
831 |
self.ExecOpCode(op)
|
832 |
|
833 |
self.mcpu.assertLogContainsRegex("already in desired state") |
834 |
|
835 |
def testVgNameWithFailingNode(self): |
836 |
vg_name = "test_vg"
|
837 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
838 |
self.rpc.call_vg_list.return_value = \
|
839 |
self.RpcResultsBuilder() \
|
840 |
.AddFailedNode(self.master) \
|
841 |
.Build() |
842 |
self.ExecOpCode(op)
|
843 |
|
844 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
845 |
|
846 |
def testVgNameWithValidNode(self): |
847 |
vg_name = "test_vg"
|
848 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
849 |
self.rpc.call_vg_list.return_value = \
|
850 |
self.RpcResultsBuilder() \
|
851 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
852 |
.Build() |
853 |
self.ExecOpCode(op)
|
854 |
|
855 |
def testVgNameWithTooSmallNode(self): |
856 |
vg_name = "test_vg"
|
857 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
858 |
self.rpc.call_vg_list.return_value = \
|
859 |
self.RpcResultsBuilder() \
|
860 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
861 |
.Build() |
862 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
863 |
|
864 |
def testMiscParameters(self): |
865 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
866 |
maintain_node_health=True,
|
867 |
modify_etc_hosts=True,
|
868 |
prealloc_wipe_disks=True,
|
869 |
reserved_lvs=["/dev/mock_lv"],
|
870 |
use_external_mip_script=True)
|
871 |
self.ExecOpCode(op)
|
872 |
|
873 |
self.mcpu.assertLogIsEmpty()
|
874 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
875 |
self.assertEqual(True, self.cluster.maintain_node_health) |
876 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
877 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
878 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
879 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
880 |
|
881 |
def testAddHiddenOs(self): |
882 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
883 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
884 |
(constants.DDM_ADD, "hidden3")])
|
885 |
self.ExecOpCode(op)
|
886 |
|
887 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
888 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
889 |
|
890 |
def testRemoveBlacklistedOs(self): |
891 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
892 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
893 |
(constants.DDM_REMOVE, "blisted2"),
|
894 |
(constants.DDM_REMOVE, "blisted3")])
|
895 |
self.ExecOpCode(op)
|
896 |
|
897 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
898 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
899 |
|
900 |
def testMasterNetdev(self): |
901 |
master_netdev = "test_dev"
|
902 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
903 |
self.ExecOpCode(op)
|
904 |
|
905 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
906 |
|
907 |
def testMasterNetdevFailNoForce(self): |
908 |
master_netdev = "test_dev"
|
909 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
910 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
911 |
self.RpcResultsBuilder() \
|
912 |
.CreateFailedNodeResult(self.master)
|
913 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
914 |
|
915 |
def testMasterNetdevFailForce(self): |
916 |
master_netdev = "test_dev"
|
917 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
918 |
force=True)
|
919 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
920 |
self.RpcResultsBuilder() \
|
921 |
.CreateFailedNodeResult(self.master)
|
922 |
self.ExecOpCode(op)
|
923 |
|
924 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
925 |
|
926 |
|
927 |
class TestLUClusterVerify(CmdlibTestCase): |
928 |
def testVerifyAllGroups(self): |
929 |
op = opcodes.OpClusterVerify() |
930 |
result = self.ExecOpCode(op)
|
931 |
|
932 |
self.assertEqual(2, len(result["jobs"])) |
933 |
|
934 |
def testVerifyDefaultGroups(self): |
935 |
op = opcodes.OpClusterVerify(group_name="default")
|
936 |
result = self.ExecOpCode(op)
|
937 |
|
938 |
self.assertEqual(1, len(result["jobs"])) |
939 |
|
940 |
|
941 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
942 |
|
943 |
def setUp(self): |
944 |
super(TestLUClusterVerifyConfig, self).setUp() |
945 |
|
946 |
self._load_cert_patcher = testutils \
|
947 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
948 |
self._load_cert_mock = self._load_cert_patcher.start() |
949 |
self._verify_cert_patcher = testutils \
|
950 |
.patch_object(utils, "VerifyX509Certificate")
|
951 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
952 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
953 |
self._read_file_mock = self._read_file_patcher.start() |
954 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
955 |
self._can_read_mock = self._can_read_patcher.start() |
956 |
|
957 |
self._can_read_mock.return_value = True |
958 |
self._read_file_mock.return_value = True |
959 |
self._verify_cert_mock.return_value = (None, "") |
960 |
self._load_cert_mock.return_value = True |
961 |
|
962 |
def tearDown(self): |
963 |
super(TestLUClusterVerifyConfig, self).tearDown() |
964 |
|
965 |
self._can_read_patcher.stop()
|
966 |
self._read_file_patcher.stop()
|
967 |
self._verify_cert_patcher.stop()
|
968 |
self._load_cert_patcher.stop()
|
969 |
|
970 |
def testSuccessfulRun(self): |
971 |
self.cfg.AddNewInstance()
|
972 |
op = opcodes.OpClusterVerifyConfig() |
973 |
result = self.ExecOpCode(op)
|
974 |
|
975 |
self.assertTrue(result)
|
976 |
|
977 |
def testDanglingNode(self): |
978 |
node = self.cfg.AddNewNode()
|
979 |
self.cfg.AddNewInstance(primary_node=node)
|
980 |
node.group = "invalid"
|
981 |
op = opcodes.OpClusterVerifyConfig() |
982 |
result = self.ExecOpCode(op)
|
983 |
|
984 |
self.mcpu.assertLogContainsRegex(
|
985 |
"following nodes \(and their instances\) belong to a non existing group")
|
986 |
self.assertFalse(result)
|
987 |
|
988 |
def testDanglingInstance(self): |
989 |
inst = self.cfg.AddNewInstance()
|
990 |
inst.primary_node = "invalid"
|
991 |
op = opcodes.OpClusterVerifyConfig() |
992 |
result = self.ExecOpCode(op)
|
993 |
|
994 |
self.mcpu.assertLogContainsRegex(
|
995 |
"following instances have a non-existing primary-node")
|
996 |
self.assertFalse(result)
|
997 |
|
998 |
|
999 |
class TestLUClusterVerifyGroup(CmdlibTestCase): |
1000 |
def testEmptyNodeGroup(self): |
1001 |
group = self.cfg.AddNewNodeGroup()
|
1002 |
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
|
1003 |
|
1004 |
result = self.ExecOpCode(op)
|
1005 |
|
1006 |
self.assertTrue(result)
|
1007 |
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification") |
1008 |
|
1009 |
def testSimpleInvocation(self): |
1010 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1011 |
|
1012 |
self.ExecOpCode(op)
|
1013 |
|
1014 |
def testSimpleInvocationWithInstance(self): |
1015 |
self.cfg.AddNewInstance(disks=[])
|
1016 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1017 |
|
1018 |
self.ExecOpCode(op)
|
1019 |
|
1020 |
def testGhostNode(self): |
1021 |
group = self.cfg.AddNewNodeGroup()
|
1022 |
node = self.cfg.AddNewNode(group=group.uuid, offline=True) |
1023 |
self.master.offline = True |
1024 |
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
1025 |
primary_node=self.master,
|
1026 |
secondary_node=node) |
1027 |
|
1028 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1029 |
RpcResultsBuilder() \ |
1030 |
.AddOfflineNode(self.master) \
|
1031 |
.Build() |
1032 |
|
1033 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1034 |
|
1035 |
self.ExecOpCode(op)
|
1036 |
|
1037 |
def testValidRpcResult(self): |
1038 |
self.cfg.AddNewInstance(disks=[])
|
1039 |
|
1040 |
self.rpc.call_node_verify.return_value = \
|
1041 |
RpcResultsBuilder() \ |
1042 |
.AddSuccessfulNode(self.master, {}) \
|
1043 |
.Build() |
1044 |
|
1045 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1046 |
|
1047 |
self.ExecOpCode(op)
|
1048 |
|
1049 |
|
1050 |
class TestLUClusterVerifyGroupMethods(CmdlibTestCase): |
1051 |
"""Base class for testing individual methods in LUClusterVerifyGroup.
|
1052 |
|
1053 |
"""
|
1054 |
def setUp(self): |
1055 |
super(TestLUClusterVerifyGroupMethods, self).setUp() |
1056 |
self.op = opcodes.OpClusterVerifyGroup(group_name="default") |
1057 |
|
1058 |
def PrepareLU(self, lu): |
1059 |
lu._exclusive_storage = False
|
1060 |
lu.master_node = self.master_uuid
|
1061 |
lu.group_info = self.group
|
1062 |
cluster.LUClusterVerifyGroup.all_node_info = \ |
1063 |
property(fget=lambda _: self.cfg.GetAllNodesInfo()) |
1064 |
|
1065 |
|
1066 |
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods): |
1067 |
@withLockedLU
|
1068 |
def testInvalidNodeResult(self, lu): |
1069 |
self.assertFalse(lu._VerifyNode(self.master, None)) |
1070 |
self.assertFalse(lu._VerifyNode(self.master, "")) |
1071 |
|
1072 |
@withLockedLU
|
1073 |
def testInvalidVersion(self, lu): |
1074 |
self.assertFalse(lu._VerifyNode(self.master, {"version": None})) |
1075 |
self.assertFalse(lu._VerifyNode(self.master, {"version": ""})) |
1076 |
self.assertFalse(lu._VerifyNode(self.master, { |
1077 |
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION) |
1078 |
})) |
1079 |
|
1080 |
self.mcpu.ClearLogMessages()
|
1081 |
self.assertTrue(lu._VerifyNode(self.master, { |
1082 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x") |
1083 |
})) |
1084 |
self.mcpu.assertLogContainsRegex("software version mismatch") |
1085 |
|
1086 |
def _GetValidNodeResult(self, additional_fields): |
1087 |
ret = { |
1088 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
|
1089 |
constants.NV_NODESETUP: [] |
1090 |
} |
1091 |
ret.update(additional_fields) |
1092 |
return ret
|
1093 |
|
1094 |
@withLockedLU
|
1095 |
def testHypervisor(self, lu): |
1096 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1097 |
constants.NV_HYPERVISOR: { |
1098 |
constants.HT_XEN_PVM: None,
|
1099 |
constants.HT_XEN_HVM: "mock error"
|
1100 |
} |
1101 |
})) |
1102 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1103 |
self.mcpu.assertLogContainsRegex("mock error") |
1104 |
|
1105 |
@withLockedLU
|
1106 |
def testHvParams(self, lu): |
1107 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1108 |
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")] |
1109 |
})) |
1110 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1111 |
self.mcpu.assertLogContainsRegex("mock item") |
1112 |
self.mcpu.assertLogContainsRegex("mock error") |
1113 |
|
1114 |
@withLockedLU
|
1115 |
def testSuccessfulResult(self, lu): |
1116 |
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({}))) |
1117 |
self.mcpu.assertLogIsEmpty()
|
1118 |
|
1119 |
|
1120 |
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods): |
1121 |
@withLockedLU
|
1122 |
def testInvalidNodeResult(self, lu): |
1123 |
for ndata in [{}, {constants.NV_TIME: "invalid"}]: |
1124 |
self.mcpu.ClearLogMessages()
|
1125 |
lu._VerifyNodeTime(self.master, ndata, None, None) |
1126 |
|
1127 |
self.mcpu.assertLogContainsRegex("Node returned invalid time") |
1128 |
|
1129 |
@withLockedLU
|
1130 |
def testNodeDiverges(self, lu): |
1131 |
for ntime in [(0, 0), (2000, 0)]: |
1132 |
self.mcpu.ClearLogMessages()
|
1133 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005) |
1134 |
|
1135 |
self.mcpu.assertLogContainsRegex("Node time diverges") |
1136 |
|
1137 |
@withLockedLU
|
1138 |
def testSuccessfulResult(self, lu): |
1139 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5) |
1140 |
self.mcpu.assertLogIsEmpty()
|
1141 |
|
1142 |
|
1143 |
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM( |
1144 |
TestLUClusterVerifyGroupMethods): |
1145 |
def setUp(self): |
1146 |
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp() |
1147 |
self.VALID_NRESULT = {
|
1148 |
constants.NV_VGLIST: {"mock_vg": 30000}, |
1149 |
constants.NV_PVLIST: [ |
1150 |
{ |
1151 |
"name": "mock_pv", |
1152 |
"vg_name": "mock_vg", |
1153 |
"size": 5000, |
1154 |
"free": 2500, |
1155 |
"attributes": [],
|
1156 |
"lv_list": []
|
1157 |
} |
1158 |
] |
1159 |
} |
1160 |
|
1161 |
@withLockedLU
|
1162 |
def testNoVgName(self, lu): |
1163 |
lu._UpdateVerifyNodeLVM(self.master, {}, None, None) |
1164 |
self.mcpu.assertLogIsEmpty()
|
1165 |
|
1166 |
@withLockedLU
|
1167 |
def testEmptyNodeResult(self, lu): |
1168 |
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None) |
1169 |
self.mcpu.assertLogContainsRegex("unable to check volume groups") |
1170 |
self.mcpu.assertLogContainsRegex("Can't get PV list from node") |
1171 |
|
1172 |
@withLockedLU
|
1173 |
def testValidNodeResult(self, lu): |
1174 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None) |
1175 |
self.mcpu.assertLogIsEmpty()
|
1176 |
|
1177 |
@withLockedLU
|
1178 |
def testValidNodeResultExclusiveStorage(self, lu): |
1179 |
lu._exclusive_storage = True
|
1180 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", |
1181 |
cluster.LUClusterVerifyGroup.NodeImage()) |
1182 |
self.mcpu.assertLogIsEmpty()
|
1183 |
|
1184 |
|
1185 |
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion( |
1186 |
TestLUClusterVerifyGroupMethods): |
1187 |
@withLockedLU
|
1188 |
def testEmptyNodeResult(self, lu): |
1189 |
lu._VerifyGroupDRBDVersion({}) |
1190 |
self.mcpu.assertLogIsEmpty()
|
1191 |
|
1192 |
@withLockedLU
|
1193 |
def testValidNodeResult(self, lu): |
1194 |
lu._VerifyGroupDRBDVersion( |
1195 |
RpcResultsBuilder() |
1196 |
.AddSuccessfulNode(self.master, {
|
1197 |
constants.NV_DRBDVERSION: "8.3.0"
|
1198 |
}) |
1199 |
.Build()) |
1200 |
self.mcpu.assertLogIsEmpty()
|
1201 |
|
1202 |
@withLockedLU
|
1203 |
def testDifferentVersions(self, lu): |
1204 |
node1 = self.cfg.AddNewNode()
|
1205 |
lu._VerifyGroupDRBDVersion( |
1206 |
RpcResultsBuilder() |
1207 |
.AddSuccessfulNode(self.master, {
|
1208 |
constants.NV_DRBDVERSION: "8.3.0"
|
1209 |
}) |
1210 |
.AddSuccessfulNode(node1, { |
1211 |
constants.NV_DRBDVERSION: "8.4.0"
|
1212 |
}) |
1213 |
.Build()) |
1214 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0") |
1215 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0") |
1216 |
|
1217 |
|
1218 |
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods): |
1219 |
@withLockedLU
|
1220 |
def testNoVgName(self, lu): |
1221 |
lu._VerifyGroupLVM(None, None) |
1222 |
self.mcpu.assertLogIsEmpty()
|
1223 |
|
1224 |
@withLockedLU
|
1225 |
def testNoExclusiveStorage(self, lu): |
1226 |
lu._VerifyGroupLVM(None, "mock_vg") |
1227 |
self.mcpu.assertLogIsEmpty()
|
1228 |
|
1229 |
@withLockedLU
|
1230 |
def testNoPvInfo(self, lu): |
1231 |
lu._exclusive_storage = True
|
1232 |
nimg = cluster.LUClusterVerifyGroup.NodeImage() |
1233 |
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg") |
1234 |
self.mcpu.assertLogIsEmpty()
|
1235 |
|
1236 |
@withLockedLU
|
1237 |
def testValidPvInfos(self, lu): |
1238 |
lu._exclusive_storage = True
|
1239 |
node2 = self.cfg.AddNewNode()
|
1240 |
nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
|
1241 |
nimg1.pv_min = 10000
|
1242 |
nimg1.pv_max = 10010
|
1243 |
nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1244 |
nimg2.pv_min = 9998
|
1245 |
nimg2.pv_max = 10005
|
1246 |
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg") |
1247 |
self.mcpu.assertLogIsEmpty()
|
1248 |
|
1249 |
|
1250 |
class TestLUClusterVerifyGroupVerifyNodeBridges( |
1251 |
TestLUClusterVerifyGroupMethods): |
1252 |
@withLockedLU
|
1253 |
def testNoBridges(self, lu): |
1254 |
lu._VerifyNodeBridges(None, None, None) |
1255 |
self.mcpu.assertLogIsEmpty()
|
1256 |
|
1257 |
@withLockedLU
|
1258 |
def testInvalidBridges(self, lu): |
1259 |
for ndata in [{}, {constants.NV_BRIDGES: ""}]: |
1260 |
self.mcpu.ClearLogMessages()
|
1261 |
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"]) |
1262 |
self.mcpu.assertLogContainsRegex("not return valid bridge information") |
1263 |
|
1264 |
self.mcpu.ClearLogMessages()
|
1265 |
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]}, |
1266 |
["mock_bridge"])
|
1267 |
self.mcpu.assertLogContainsRegex("missing bridge") |
1268 |
|
1269 |
|
1270 |
class TestLUClusterVerifyGroupVerifyNodeUserScripts( |
1271 |
TestLUClusterVerifyGroupMethods): |
1272 |
@withLockedLU
|
1273 |
def testNoUserScripts(self, lu): |
1274 |
lu._VerifyNodeUserScripts(self.master, {})
|
1275 |
self.mcpu.assertLogContainsRegex("did not return user scripts information") |
1276 |
|
1277 |
@withLockedLU
|
1278 |
def testBrokenUserScripts(self, lu): |
1279 |
lu._VerifyNodeUserScripts(self.master,
|
1280 |
{constants.NV_USERSCRIPTS: ["script"]})
|
1281 |
self.mcpu.assertLogContainsRegex("scripts not present or not executable") |
1282 |
|
1283 |
|
1284 |
class TestLUClusterVerifyGroupVerifyNodeNetwork( |
1285 |
TestLUClusterVerifyGroupMethods): |
1286 |
|
1287 |
def setUp(self): |
1288 |
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp() |
1289 |
self.VALID_NRESULT = {
|
1290 |
constants.NV_NODELIST: {}, |
1291 |
constants.NV_NODENETTEST: {}, |
1292 |
constants.NV_MASTERIP: True
|
1293 |
} |
1294 |
|
1295 |
@withLockedLU
|
1296 |
def testEmptyNodeResult(self, lu): |
1297 |
lu._VerifyNodeNetwork(self.master, {})
|
1298 |
self.mcpu.assertLogContainsRegex(
|
1299 |
"node hasn't returned node ssh connectivity data")
|
1300 |
self.mcpu.assertLogContainsRegex(
|
1301 |
"node hasn't returned node tcp connectivity data")
|
1302 |
self.mcpu.assertLogContainsRegex(
|
1303 |
"node hasn't returned node master IP reachability data")
|
1304 |
|
1305 |
@withLockedLU
|
1306 |
def testValidResult(self, lu): |
1307 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1308 |
self.mcpu.assertLogIsEmpty()
|
1309 |
|
1310 |
@withLockedLU
|
1311 |
def testSshProblem(self, lu): |
1312 |
self.VALID_NRESULT.update({
|
1313 |
constants.NV_NODELIST: { |
1314 |
"mock_node": "mock_error" |
1315 |
} |
1316 |
}) |
1317 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1318 |
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'") |
1319 |
|
1320 |
@withLockedLU
|
1321 |
def testTcpProblem(self, lu): |
1322 |
self.VALID_NRESULT.update({
|
1323 |
constants.NV_NODENETTEST: { |
1324 |
"mock_node": "mock_error" |
1325 |
} |
1326 |
}) |
1327 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1328 |
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'") |
1329 |
|
1330 |
@withLockedLU
|
1331 |
def testMasterIpNotReachable(self, lu): |
1332 |
self.VALID_NRESULT.update({
|
1333 |
constants.NV_MASTERIP: False
|
1334 |
}) |
1335 |
node1 = self.cfg.AddNewNode()
|
1336 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1337 |
self.mcpu.assertLogContainsRegex(
|
1338 |
"the master node cannot reach the master IP")
|
1339 |
|
1340 |
self.mcpu.ClearLogMessages()
|
1341 |
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
|
1342 |
self.mcpu.assertLogContainsRegex("cannot reach the master IP") |
1343 |
|
1344 |
|
1345 |
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods): |
1346 |
def setUp(self): |
1347 |
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp() |
1348 |
|
1349 |
self.node1 = self.cfg.AddNewNode() |
1350 |
self.drbd_inst = self.cfg.AddNewInstance( |
1351 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1352 |
primary_node=self.master,
|
1353 |
secondary_node=self.node1)])
|
1354 |
self.running_inst = self.cfg.AddNewInstance( |
1355 |
admin_state=constants.ADMINST_UP, disks_active=True)
|
1356 |
self.diskless_inst = self.cfg.AddNewInstance(disks=[]) |
1357 |
|
1358 |
self.master_img = \
|
1359 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1360 |
self.master_img.volumes = ["/".join(disk.logical_id) |
1361 |
for inst in [self.running_inst, |
1362 |
self.diskless_inst]
|
1363 |
for disk in inst.disks] |
1364 |
self.master_img.volumes.extend(
|
1365 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children]) |
1366 |
self.master_img.instances = [self.running_inst.uuid] |
1367 |
self.node1_img = \
|
1368 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
|
1369 |
self.node1_img.volumes = \
|
1370 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children] |
1371 |
self.node_imgs = {
|
1372 |
self.master_uuid: self.master_img, |
1373 |
self.node1.uuid: self.node1_img |
1374 |
} |
1375 |
self.diskstatus = {
|
1376 |
self.master_uuid: [
|
1377 |
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
|
1378 |
for _ in self.running_inst.disks |
1379 |
] |
1380 |
} |
1381 |
|
1382 |
@withLockedLU
|
1383 |
def testDisklessInst(self, lu): |
1384 |
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {}) |
1385 |
self.mcpu.assertLogIsEmpty()
|
1386 |
|
1387 |
@withLockedLU
|
1388 |
def testOfflineNode(self, lu): |
1389 |
self.master_img.offline = True |
1390 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {}) |
1391 |
self.mcpu.assertLogIsEmpty()
|
1392 |
|
1393 |
@withLockedLU
|
1394 |
def testRunningOnOfflineNode(self, lu): |
1395 |
self.master_img.offline = True |
1396 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1397 |
self.mcpu.assertLogContainsRegex(
|
1398 |
"instance is marked as running and lives on offline node")
|
1399 |
|
1400 |
@withLockedLU
|
1401 |
def testMissingVolume(self, lu): |
1402 |
self.master_img.volumes = []
|
1403 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1404 |
self.mcpu.assertLogContainsRegex("volume .* missing") |
1405 |
|
1406 |
@withLockedLU
|
1407 |
def testRunningInstanceOnWrongNode(self, lu): |
1408 |
self.master_img.instances = []
|
1409 |
self.diskless_inst.admin_state = constants.ADMINST_UP
|
1410 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1411 |
self.mcpu.assertLogContainsRegex("instance not running on its primary node") |
1412 |
|
1413 |
@withLockedLU
|
1414 |
def testRunningInstanceOnRightNode(self, lu): |
1415 |
self.master_img.instances = [self.running_inst.uuid] |
1416 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1417 |
self.mcpu.assertLogIsEmpty()
|
1418 |
|
1419 |
@withLockedLU
|
1420 |
def testValidDiskStatus(self, lu): |
1421 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1422 |
self.mcpu.assertLogIsEmpty()
|
1423 |
|
1424 |
@withLockedLU
|
1425 |
def testDegradedDiskStatus(self, lu): |
1426 |
self.diskstatus[self.master_uuid][0][1].is_degraded = True |
1427 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1428 |
self.mcpu.assertLogContainsRegex("instance .* is degraded") |
1429 |
|
1430 |
@withLockedLU
|
1431 |
def testNotOkayDiskStatus(self, lu): |
1432 |
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY |
1433 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1434 |
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'") |
1435 |
|
1436 |
@withLockedLU
|
1437 |
def testExclusiveStorageWithInvalidInstance(self, lu): |
1438 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1439 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1440 |
self.mcpu.assertLogContainsRegex(
|
1441 |
"instance has template drbd, which is not supported")
|
1442 |
|
1443 |
@withLockedLU
|
1444 |
def testExclusiveStorageWithValidInstance(self, lu): |
1445 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1446 |
self.running_inst.disks[0].spindles = 1 |
1447 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1448 |
self.mcpu.assertLogIsEmpty()
|
1449 |
|
1450 |
@withLockedLU
|
1451 |
def testDrbdInTwoGroups(self, lu): |
1452 |
group = self.cfg.AddNewNodeGroup()
|
1453 |
self.node1.group = group.uuid
|
1454 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1455 |
self.mcpu.assertLogContainsRegex(
|
1456 |
"instance has primary and secondary nodes in different groups")
|
1457 |
|
1458 |
@withLockedLU
|
1459 |
def testOfflineSecondary(self, lu): |
1460 |
self.node1_img.offline = True |
1461 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1462 |
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)") |
1463 |
|
1464 |
|
1465 |
class TestLUClusterVerifyGroupVerifyOrphanVolumes( |
1466 |
TestLUClusterVerifyGroupMethods): |
1467 |
@withLockedLU
|
1468 |
def testOrphanedVolume(self, lu): |
1469 |
master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1470 |
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"] |
1471 |
node_imgs = { |
1472 |
self.master_uuid: master_img
|
1473 |
} |
1474 |
node_vol_should = { |
1475 |
self.master_uuid: ["mock_vg/disk_0"] |
1476 |
} |
1477 |
|
1478 |
lu._VerifyOrphanVolumes(node_vol_should, node_imgs, |
1479 |
utils.FieldSet("mock_vg/disk_2"))
|
1480 |
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown") |
1481 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown") |
1482 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown") |
1483 |
|
1484 |
|
1485 |
class TestLUClusterVerifyGroupVerifyNPlusOneMemory( |
1486 |
TestLUClusterVerifyGroupMethods): |
1487 |
@withLockedLU
|
1488 |
def testN1Failure(self, lu): |
1489 |
group1 = self.cfg.AddNewNodeGroup()
|
1490 |
|
1491 |
node1 = self.cfg.AddNewNode()
|
1492 |
node2 = self.cfg.AddNewNode(group=group1)
|
1493 |
node3 = self.cfg.AddNewNode()
|
1494 |
|
1495 |
inst1 = self.cfg.AddNewInstance()
|
1496 |
inst2 = self.cfg.AddNewInstance()
|
1497 |
inst3 = self.cfg.AddNewInstance()
|
1498 |
|
1499 |
node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid) |
1500 |
node1_img.sbp = { |
1501 |
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
|
1502 |
} |
1503 |
|
1504 |
node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1505 |
|
1506 |
node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid) |
1507 |
node3_img.offline = True
|
1508 |
|
1509 |
node_imgs = { |
1510 |
node1.uuid: node1_img, |
1511 |
node2.uuid: node2_img, |
1512 |
node3.uuid: node3_img |
1513 |
} |
1514 |
|
1515 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1516 |
self.mcpu.assertLogContainsRegex(
|
1517 |
"not enough memory to accomodate instance failovers")
|
1518 |
|
1519 |
self.mcpu.ClearLogMessages()
|
1520 |
node1_img.mfree = 1000
|
1521 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1522 |
self.mcpu.assertLogIsEmpty()
|
1523 |
|
1524 |
|
1525 |
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods): |
1526 |
@withLockedLU
|
1527 |
def test(self, lu): |
1528 |
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1529 |
vm_capable=True)
|
1530 |
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False) |
1531 |
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1532 |
vm_capable=True)
|
1533 |
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1534 |
vm_capable=True)
|
1535 |
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True) |
1536 |
|
1537 |
nodeinfo = [self.master, node1, node2, node3, node4, node5]
|
1538 |
files_all = set([
|
1539 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
1540 |
pathutils.RAPI_CERT_FILE, |
1541 |
pathutils.RAPI_USERS_FILE, |
1542 |
]) |
1543 |
files_opt = set([
|
1544 |
pathutils.RAPI_USERS_FILE, |
1545 |
hv_xen.XL_CONFIG_FILE, |
1546 |
pathutils.VNC_PASSWORD_FILE, |
1547 |
]) |
1548 |
files_mc = set([
|
1549 |
pathutils.CLUSTER_CONF_FILE, |
1550 |
]) |
1551 |
files_vm = set([
|
1552 |
hv_xen.XEND_CONFIG_FILE, |
1553 |
hv_xen.XL_CONFIG_FILE, |
1554 |
pathutils.VNC_PASSWORD_FILE, |
1555 |
]) |
1556 |
nvinfo = RpcResultsBuilder() \ |
1557 |
.AddSuccessfulNode(self.master, {
|
1558 |
constants.NV_FILELIST: { |
1559 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
1560 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
1561 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1562 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1563 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1564 |
}}) \ |
1565 |
.AddSuccessfulNode(node1, { |
1566 |
constants.NV_FILELIST: { |
1567 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1568 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1569 |
} |
1570 |
}) \ |
1571 |
.AddSuccessfulNode(node2, { |
1572 |
constants.NV_FILELIST: { |
1573 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1574 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1575 |
} |
1576 |
}) \ |
1577 |
.AddSuccessfulNode(node3, { |
1578 |
constants.NV_FILELIST: { |
1579 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1580 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
1581 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1582 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
1583 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1584 |
} |
1585 |
}) \ |
1586 |
.AddSuccessfulNode(node4, {}) \ |
1587 |
.AddOfflineNode(node5) \ |
1588 |
.Build() |
1589 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
1590 |
|
1591 |
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
|
1592 |
(files_all, files_opt, files_mc, files_vm)) |
1593 |
|
1594 |
expected_msgs = [ |
1595 |
"File %s found with 2 different checksums (variant 1 on"
|
1596 |
" %s, %s, %s; variant 2 on %s)" %
|
1597 |
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name, |
1598 |
self.master.name),
|
1599 |
"File %s is missing from node(s) %s" %
|
1600 |
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name), |
1601 |
"File %s should not exist on node(s) %s" %
|
1602 |
(pathutils.CLUSTER_CONF_FILE, node3.name), |
1603 |
"File %s is missing from node(s) %s" %
|
1604 |
(hv_xen.XEND_CONFIG_FILE, node3.name), |
1605 |
"File %s is missing from node(s) %s" %
|
1606 |
(pathutils.CLUSTER_CONF_FILE, node2.name), |
1607 |
"File %s found with 2 different checksums (variant 1 on"
|
1608 |
" %s; variant 2 on %s)" %
|
1609 |
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
|
1610 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1611 |
" found on %s, %s, %s)" %
|
1612 |
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
|
1613 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1614 |
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
|
1615 |
"Node did not return file checksum data",
|
1616 |
] |
1617 |
|
1618 |
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs)) |
1619 |
for expected_msg in expected_msgs: |
1620 |
self.mcpu.assertLogContainsInLine(expected_msg)
|
1621 |
|
1622 |
|
1623 |
class TestLUClusterVerifyGroupVerifyNodeDrbd(TestLUClusterVerifyGroupMethods): |
1624 |
def setUp(self): |
1625 |
super(TestLUClusterVerifyGroupVerifyNodeDrbd, self).setUp() |
1626 |
|
1627 |
self.node1 = self.cfg.AddNewNode() |
1628 |
self.node2 = self.cfg.AddNewNode() |
1629 |
self.inst = self.cfg.AddNewInstance( |
1630 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1631 |
primary_node=self.node1,
|
1632 |
secondary_node=self.node2)],
|
1633 |
admin_state=constants.ADMINST_UP) |
1634 |
|
1635 |
@withLockedLU
|
1636 |
def testNoDrbdHelper(self, lu): |
1637 |
lu._VerifyNodeDrbd(self.master, {}, self.cfg.GetAllInstancesInfo(), None, |
1638 |
self.cfg.ComputeDRBDMap())
|
1639 |
self.mcpu.assertLogIsEmpty()
|
1640 |
|
1641 |
@withLockedLU
|
1642 |
def testDrbdHelperInvalidNodeResult(self, lu): |
1643 |
for ndata, expected in [({}, "no drbd usermode helper returned"), |
1644 |
({constants.NV_DRBDHELPER: (False, "")}, |
1645 |
"drbd usermode helper check unsuccessful"),
|
1646 |
({constants.NV_DRBDHELPER: (True, "/bin/false")}, |
1647 |
"wrong drbd usermode helper")]:
|
1648 |
self.mcpu.ClearLogMessages()
|
1649 |
lu._VerifyNodeDrbd(self.master, ndata, self.cfg.GetAllInstancesInfo(), |
1650 |
"/bin/true", self.cfg.ComputeDRBDMap()) |
1651 |
self.mcpu.assertLogContainsRegex(expected)
|
1652 |
|
1653 |
@withLockedLU
|
1654 |
def testNoNodeResult(self, lu): |
1655 |
lu._VerifyNodeDrbd(self.node1, {}, self.cfg.GetAllInstancesInfo(), |
1656 |
None, self.cfg.ComputeDRBDMap()) |
1657 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1658 |
|
1659 |
@withLockedLU
|
1660 |
def testInvalidNodeResult(self, lu): |
1661 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: ""}, |
1662 |
self.cfg.GetAllInstancesInfo(), None, |
1663 |
self.cfg.ComputeDRBDMap())
|
1664 |
self.mcpu.assertLogContainsRegex("cannot parse drbd status file") |
1665 |
|
1666 |
@withLockedLU
|
1667 |
def testWrongMinorInUse(self, lu): |
1668 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [2]}, |
1669 |
self.cfg.GetAllInstancesInfo(), None, |
1670 |
self.cfg.ComputeDRBDMap())
|
1671 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1672 |
self.mcpu.assertLogContainsRegex("unallocated drbd minor 2 is in use") |
1673 |
|
1674 |
@withLockedLU
|
1675 |
def testValidResult(self, lu): |
1676 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [1]}, |
1677 |
self.cfg.GetAllInstancesInfo(), None, |
1678 |
self.cfg.ComputeDRBDMap())
|
1679 |
self.mcpu.assertLogIsEmpty()
|
1680 |
|
1681 |
|
1682 |
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods): |
1683 |
@withLockedLU
|
1684 |
def testUpdateNodeOsInvalidNodeResult(self, lu): |
1685 |
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]}, |
1686 |
{constants.NV_OSLIST: [["1", "2"]]}]: |
1687 |
self.mcpu.ClearLogMessages()
|
1688 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1689 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1690 |
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data") |
1691 |
|
1692 |
@withLockedLU
|
1693 |
def testUpdateNodeOsValidNodeResult(self, lu): |
1694 |
ndata = { |
1695 |
constants.NV_OSLIST: [ |
1696 |
["mock_OS", "/mocked/path", True, "", ["default"], [], |
1697 |
[constants.OS_API_V20]], |
1698 |
["Another_Mock", "/random", True, "", ["var1", "var2"], |
1699 |
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS] |
1700 |
] |
1701 |
} |
1702 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1703 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1704 |
self.mcpu.assertLogIsEmpty()
|
1705 |
|
1706 |
@withLockedLU
|
1707 |
def testVerifyNodeOs(self, lu): |
1708 |
node = self.cfg.AddNewNode()
|
1709 |
nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1710 |
nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid) |
1711 |
|
1712 |
nimg_root.os_fail = False
|
1713 |
nimg_root.oslist = { |
1714 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1715 |
set([constants.OS_API_V20]))],
|
1716 |
"broken_base_os": [("/broken", False, "", set(), set(), |
1717 |
set([constants.OS_API_V20]))],
|
1718 |
"only_on_root": [("/random", True, "", set(), set(), set())], |
1719 |
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]), |
1720 |
set([("param1", "val1"), ("param2", "val2")]), |
1721 |
set([constants.OS_API_V20]))]
|
1722 |
} |
1723 |
nimg.os_fail = False
|
1724 |
nimg.oslist = { |
1725 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1726 |
set([constants.OS_API_V20]))],
|
1727 |
"only_on_test": [("/random", True, "", set(), set(), set())], |
1728 |
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]), |
1729 |
set([("param1", "val1"), ("param3", "val3")]), |
1730 |
set([constants.OS_API_V15]))],
|
1731 |
"broken_os": [("/broken", False, "", set(), set(), |
1732 |
set([constants.OS_API_V20]))],
|
1733 |
"multi_entries": [
|
1734 |
("/multi1", True, "", set(), set(), set([constants.OS_API_V20])), |
1735 |
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]))] |
1736 |
} |
1737 |
|
1738 |
lu._VerifyNodeOS(node, nimg, nimg_root) |
1739 |
|
1740 |
expected_msgs = [ |
1741 |
"Extra OS only_on_test not present on reference node",
|
1742 |
"OSes present on reference node .* but missing on this node:" +
|
1743 |
" only_on_root",
|
1744 |
"OS API version for diffing_os differs",
|
1745 |
"OS variants list for diffing_os differs",
|
1746 |
"OS parameters for diffing_os differs",
|
1747 |
"Invalid OS broken_os",
|
1748 |
"Extra OS broken_os not present on reference node",
|
1749 |
"OS 'multi_entries' has multiple entries",
|
1750 |
"Extra OS multi_entries not present on reference node"
|
1751 |
] |
1752 |
|
1753 |
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages())) |
1754 |
for expected_msg in expected_msgs: |
1755 |
self.mcpu.assertLogContainsRegex(expected_msg)
|
1756 |
|
1757 |
|
1758 |
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths( |
1759 |
TestLUClusterVerifyGroupMethods): |
1760 |
@withLockedLU
|
1761 |
def testNotMaster(self, lu): |
1762 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False) |
1763 |
self.mcpu.assertLogIsEmpty()
|
1764 |
|
1765 |
@withLockedLU
|
1766 |
def testNotMasterButRetunedValue(self, lu): |
1767 |
lu._VerifyAcceptedFileStoragePaths( |
1768 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False) |
1769 |
self.mcpu.assertLogContainsRegex(
|
1770 |
"Node should not have returned forbidden file storage paths")
|
1771 |
|
1772 |
@withLockedLU
|
1773 |
def testMasterInvalidNodeResult(self, lu): |
1774 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True) |
1775 |
self.mcpu.assertLogContainsRegex(
|
1776 |
"Node did not return forbidden file storage paths")
|
1777 |
|
1778 |
@withLockedLU
|
1779 |
def testMasterForbiddenPaths(self, lu): |
1780 |
lu._VerifyAcceptedFileStoragePaths( |
1781 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True) |
1782 |
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths") |
1783 |
|
1784 |
@withLockedLU
|
1785 |
def testMasterSuccess(self, lu): |
1786 |
lu._VerifyAcceptedFileStoragePaths( |
1787 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True) |
1788 |
self.mcpu.assertLogIsEmpty()
|
1789 |
|
1790 |
|
1791 |
class TestLUClusterVerifyGroupVerifyStoragePaths( |
1792 |
TestLUClusterVerifyGroupMethods): |
1793 |
@withLockedLU
|
1794 |
def testVerifyFileStoragePathsSuccess(self, lu): |
1795 |
lu._VerifyFileStoragePaths(self.master, {})
|
1796 |
self.mcpu.assertLogIsEmpty()
|
1797 |
|
1798 |
@withLockedLU
|
1799 |
def testVerifyFileStoragePathsFailure(self, lu): |
1800 |
lu._VerifyFileStoragePaths(self.master,
|
1801 |
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
|
1802 |
self.mcpu.assertLogContainsRegex(
|
1803 |
"The configured file storage path is unusable")
|
1804 |
|
1805 |
@withLockedLU
|
1806 |
def testVerifySharedFileStoragePathsSuccess(self, lu): |
1807 |
lu._VerifySharedFileStoragePaths(self.master, {})
|
1808 |
self.mcpu.assertLogIsEmpty()
|
1809 |
|
1810 |
@withLockedLU
|
1811 |
def testVerifySharedFileStoragePathsFailure(self, lu): |
1812 |
lu._VerifySharedFileStoragePaths( |
1813 |
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"}) |
1814 |
self.mcpu.assertLogContainsRegex(
|
1815 |
"The configured sharedfile storage path is unusable")
|
1816 |
|
1817 |
|
1818 |
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods): |
1819 |
@withLockedLU
|
1820 |
def testEmptyResult(self, lu): |
1821 |
lu._VerifyOob(self.master, {})
|
1822 |
self.mcpu.assertLogIsEmpty()
|
1823 |
|
1824 |
@withLockedLU
|
1825 |
def testErrorResults(self, lu): |
1826 |
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]}) |
1827 |
self.mcpu.assertLogContainsRegex("path1") |
1828 |
self.mcpu.assertLogContainsRegex("path2") |
1829 |
|
1830 |
|
1831 |
class TestLUClusterVerifyGroupUpdateNodeVolumes( |
1832 |
TestLUClusterVerifyGroupMethods): |
1833 |
def setUp(self): |
1834 |
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp() |
1835 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1836 |
|
1837 |
@withLockedLU
|
1838 |
def testNoVgName(self, lu): |
1839 |
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None) |
1840 |
self.mcpu.assertLogIsEmpty()
|
1841 |
self.assertTrue(self.nimg.lvm_fail) |
1842 |
|
1843 |
@withLockedLU
|
1844 |
def testErrorMessage(self, lu): |
1845 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"}, |
1846 |
self.nimg, "mock_vg") |
1847 |
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error") |
1848 |
self.assertTrue(self.nimg.lvm_fail) |
1849 |
|
1850 |
@withLockedLU
|
1851 |
def testInvalidNodeResult(self, lu): |
1852 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]}, |
1853 |
self.nimg, "mock_vg") |
1854 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1855 |
self.assertTrue(self.nimg.lvm_fail) |
1856 |
|
1857 |
@withLockedLU
|
1858 |
def testValidNodeResult(self, lu): |
1859 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
|
1860 |
self.nimg, "mock_vg") |
1861 |
self.mcpu.assertLogIsEmpty()
|
1862 |
self.assertFalse(self.nimg.lvm_fail) |
1863 |
|
1864 |
|
1865 |
class TestLUClusterVerifyGroupUpdateNodeInstances( |
1866 |
TestLUClusterVerifyGroupMethods): |
1867 |
def setUp(self): |
1868 |
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp() |
1869 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1870 |
|
1871 |
@withLockedLU
|
1872 |
def testInvalidNodeResult(self, lu): |
1873 |
lu._UpdateNodeInstances(self.master, {}, self.nimg) |
1874 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1875 |
|
1876 |
@withLockedLU
|
1877 |
def testValidNodeResult(self, lu): |
1878 |
inst = self.cfg.AddNewInstance()
|
1879 |
lu._UpdateNodeInstances(self.master,
|
1880 |
{constants.NV_INSTANCELIST: [inst.name]}, |
1881 |
self.nimg)
|
1882 |
self.mcpu.assertLogIsEmpty()
|
1883 |
|
1884 |
|
1885 |
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods): |
1886 |
def setUp(self): |
1887 |
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp() |
1888 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1889 |
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}} |
1890 |
|
1891 |
@withLockedLU
|
1892 |
def testInvalidHvNodeResult(self, lu): |
1893 |
for ndata in [{}, {constants.NV_HVINFO: ""}]: |
1894 |
self.mcpu.ClearLogMessages()
|
1895 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None) |
1896 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1897 |
|
1898 |
@withLockedLU
|
1899 |
def testInvalidMemoryFreeHvNodeResult(self, lu): |
1900 |
lu._UpdateNodeInfo(self.master,
|
1901 |
{constants.NV_HVINFO: {"memory_free": "abc"}}, |
1902 |
self.nimg, None) |
1903 |
self.mcpu.assertLogContainsRegex(
|
1904 |
"node returned invalid nodeinfo, check hypervisor")
|
1905 |
|
1906 |
@withLockedLU
|
1907 |
def testValidHvNodeResult(self, lu): |
1908 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None) |
1909 |
self.mcpu.assertLogIsEmpty()
|
1910 |
|
1911 |
@withLockedLU
|
1912 |
def testInvalidVgNodeResult(self, lu): |
1913 |
for vgdata in [[], ""]: |
1914 |
self.mcpu.ClearLogMessages()
|
1915 |
ndata = {constants.NV_VGLIST: vgdata} |
1916 |
ndata.update(self.valid_hvresult)
|
1917 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg") |
1918 |
self.mcpu.assertLogContainsRegex(
|
1919 |
"node didn't return data for the volume group 'mock_vg'")
|
1920 |
|
1921 |
@withLockedLU
|
1922 |
def testInvalidDiskFreeVgNodeResult(self, lu): |
1923 |
self.valid_hvresult.update({
|
1924 |
constants.NV_VGLIST: {"mock_vg": "abc"} |
1925 |
}) |
1926 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1927 |
self.mcpu.assertLogContainsRegex(
|
1928 |
"node returned invalid LVM info, check LVM status")
|
1929 |
|
1930 |
@withLockedLU
|
1931 |
def testValidVgNodeResult(self, lu): |
1932 |
self.valid_hvresult.update({
|
1933 |
constants.NV_VGLIST: {"mock_vg": 10000} |
1934 |
}) |
1935 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1936 |
self.mcpu.assertLogIsEmpty()
|
1937 |
|
1938 |
|
1939 |
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods): |
1940 |
def setUp(self): |
1941 |
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp() |
1942 |
|
1943 |
self.node1 = self.cfg.AddNewNode() |
1944 |
self.node2 = self.cfg.AddNewNode() |
1945 |
self.node3 = self.cfg.AddNewNode() |
1946 |
|
1947 |
self.diskless_inst = \
|
1948 |
self.cfg.AddNewInstance(primary_node=self.node1, |
1949 |
disk_template=constants.DT_DISKLESS) |
1950 |
self.plain_inst = \
|
1951 |
self.cfg.AddNewInstance(primary_node=self.node2, |
1952 |
disk_template=constants.DT_PLAIN) |
1953 |
self.drbd_inst = \
|
1954 |
self.cfg.AddNewInstance(primary_node=self.node3, |
1955 |
secondary_node=self.node2,
|
1956 |
disk_template=constants.DT_DRBD8) |
1957 |
|
1958 |
self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1959 |
uuid=self.node1.uuid)
|
1960 |
self.node1_img.pinst = [self.diskless_inst.uuid] |
1961 |
self.node1_img.sinst = []
|
1962 |
self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1963 |
uuid=self.node2.uuid)
|
1964 |
self.node2_img.pinst = [self.plain_inst.uuid] |
1965 |
self.node2_img.sinst = [self.drbd_inst.uuid] |
1966 |
self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
|
1967 |
uuid=self.node3.uuid)
|
1968 |
self.node3_img.pinst = [self.drbd_inst.uuid] |
1969 |
self.node3_img.sinst = []
|
1970 |
|
1971 |
self.node_images = {
|
1972 |
self.node1.uuid: self.node1_img, |
1973 |
self.node2.uuid: self.node2_img, |
1974 |
self.node3.uuid: self.node3_img |
1975 |
} |
1976 |
|
1977 |
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid] |
1978 |
|
1979 |
@withLockedLU
|
1980 |
def testSuccessfulRun(self, lu): |
1981 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1982 |
RpcResultsBuilder() \ |
1983 |
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \ |
1984 |
.AddSuccessfulNode(self.node3, [(True, "")]) \ |
1985 |
.Build() |
1986 |
|
1987 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
1988 |
self.cfg.GetAllInstancesInfo())
|
1989 |
|
1990 |
self.mcpu.assertLogIsEmpty()
|
1991 |
|
1992 |
@withLockedLU
|
1993 |
def testOfflineAndFailingNodes(self, lu): |
1994 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1995 |
RpcResultsBuilder() \ |
1996 |
.AddOfflineNode(self.node2) \
|
1997 |
.AddFailedNode(self.node3) \
|
1998 |
.Build() |
1999 |
|
2000 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2001 |
self.cfg.GetAllInstancesInfo())
|
2002 |
|
2003 |
self.mcpu.assertLogContainsRegex("while getting disk information") |
2004 |
|
2005 |
@withLockedLU
|
2006 |
def testInvalidNodeResult(self, lu): |
2007 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2008 |
RpcResultsBuilder() \ |
2009 |
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \ |
2010 |
.AddSuccessfulNode(self.node3, [""]) \ |
2011 |
.Build() |
2012 |
|
2013 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2014 |
self.cfg.GetAllInstancesInfo())
|
2015 |
# logging is not performed through mcpu
|
2016 |
self.mcpu.assertLogIsEmpty()
|
2017 |
|
2018 |
|
2019 |
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods): |
2020 |
def setUp(self): |
2021 |
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp() |
2022 |
|
2023 |
self.feedback_fn = lambda _: None |
2024 |
|
2025 |
def PrepareLU(self, lu): |
2026 |
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu) |
2027 |
|
2028 |
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys()) |
2029 |
|
2030 |
@withLockedLU
|
2031 |
def testEmptyGroup(self, lu): |
2032 |
lu.my_node_uuids = [] |
2033 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None) |
2034 |
|
2035 |
@withLockedLU
|
2036 |
def testFailedResult(self, lu): |
2037 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2038 |
RpcResultsBuilder(use_node_names=True)
|
2039 |
.AddFailedNode(self.master).Build(),
|
2040 |
self.feedback_fn,
|
2041 |
None)
|
2042 |
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution") |
2043 |
|
2044 |
@withLockedLU
|
2045 |
def testOfflineNode(self, lu): |
2046 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2047 |
RpcResultsBuilder(use_node_names=True)
|
2048 |
.AddOfflineNode(self.master).Build(),
|
2049 |
self.feedback_fn,
|
2050 |
None)
|
2051 |
|
2052 |
@withLockedLU
|
2053 |
def testValidResult(self, lu): |
2054 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2055 |
RpcResultsBuilder(use_node_names=True)
|
2056 |
.AddSuccessfulNode(self.master,
|
2057 |
[("mock_script",
|
2058 |
constants.HKR_SUCCESS, |
2059 |
"mock output")])
|
2060 |
.Build(), |
2061 |
self.feedback_fn,
|
2062 |
None)
|
2063 |
|
2064 |
@withLockedLU
|
2065 |
def testFailedScriptResult(self, lu): |
2066 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2067 |
RpcResultsBuilder(use_node_names=True)
|
2068 |
.AddSuccessfulNode(self.master,
|
2069 |
[("mock_script",
|
2070 |
constants.HKR_FAIL, |
2071 |
"mock output")])
|
2072 |
.Build(), |
2073 |
self.feedback_fn,
|
2074 |
None)
|
2075 |
self.mcpu.assertLogContainsRegex("Script mock_script failed") |
2076 |
|
2077 |
|
2078 |
class TestLUClusterVerifyDisks(CmdlibTestCase): |
2079 |
def testVerifyDisks(self): |
2080 |
op = opcodes.OpClusterVerifyDisks() |
2081 |
result = self.ExecOpCode(op)
|
2082 |
|
2083 |
self.assertEqual(1, len(result["jobs"])) |
2084 |
|
2085 |
|
2086 |
if __name__ == "__main__": |
2087 |
testutils.GanetiTestProgram() |