root / test / py / cmdlib / cluster_unittest.py @ 5859dad6
History | View | Annotate | Download (75.4 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import unittest |
29 |
import operator |
30 |
import os |
31 |
import tempfile |
32 |
import shutil |
33 |
|
34 |
from ganeti import constants |
35 |
from ganeti import errors |
36 |
from ganeti import netutils |
37 |
from ganeti import objects |
38 |
from ganeti import opcodes |
39 |
from ganeti import utils |
40 |
from ganeti import pathutils |
41 |
from ganeti import query |
42 |
from ganeti.cmdlib import cluster |
43 |
from ganeti.hypervisor import hv_xen |
44 |
|
45 |
from testsupport import * |
46 |
|
47 |
import testutils |
48 |
|
49 |
|
50 |
class TestCertVerification(testutils.GanetiTestCase): |
51 |
def setUp(self): |
52 |
testutils.GanetiTestCase.setUp(self)
|
53 |
|
54 |
self.tmpdir = tempfile.mkdtemp()
|
55 |
|
56 |
def tearDown(self): |
57 |
shutil.rmtree(self.tmpdir)
|
58 |
|
59 |
def testVerifyCertificate(self): |
60 |
cluster._VerifyCertificate(testutils.TestDataFilename("cert1.pem"))
|
61 |
|
62 |
nonexist_filename = os.path.join(self.tmpdir, "does-not-exist") |
63 |
|
64 |
(errcode, msg) = cluster._VerifyCertificate(nonexist_filename) |
65 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
66 |
|
67 |
# Try to load non-certificate file
|
68 |
invalid_cert = testutils.TestDataFilename("bdev-net.txt")
|
69 |
(errcode, msg) = cluster._VerifyCertificate(invalid_cert) |
70 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
71 |
|
72 |
|
73 |
class TestClusterVerifySsh(unittest.TestCase): |
74 |
def testMultipleGroups(self): |
75 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
76 |
mygroupnodes = [ |
77 |
objects.Node(name="node20", group="my", offline=False), |
78 |
objects.Node(name="node21", group="my", offline=False), |
79 |
objects.Node(name="node22", group="my", offline=False), |
80 |
objects.Node(name="node23", group="my", offline=False), |
81 |
objects.Node(name="node24", group="my", offline=False), |
82 |
objects.Node(name="node25", group="my", offline=False), |
83 |
objects.Node(name="node26", group="my", offline=True), |
84 |
] |
85 |
nodes = [ |
86 |
objects.Node(name="node1", group="g1", offline=True), |
87 |
objects.Node(name="node2", group="g1", offline=False), |
88 |
objects.Node(name="node3", group="g1", offline=False), |
89 |
objects.Node(name="node4", group="g1", offline=True), |
90 |
objects.Node(name="node5", group="g1", offline=False), |
91 |
objects.Node(name="node10", group="xyz", offline=False), |
92 |
objects.Node(name="node11", group="xyz", offline=False), |
93 |
objects.Node(name="node40", group="alloff", offline=True), |
94 |
objects.Node(name="node41", group="alloff", offline=True), |
95 |
objects.Node(name="node50", group="aaa", offline=False), |
96 |
] + mygroupnodes |
97 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
98 |
|
99 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
100 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
101 |
self.assertEqual(set(perhost.keys()), set(online)) |
102 |
|
103 |
self.assertEqual(perhost, {
|
104 |
"node20": ["node10", "node2", "node50"], |
105 |
"node21": ["node11", "node3", "node50"], |
106 |
"node22": ["node10", "node5", "node50"], |
107 |
"node23": ["node11", "node2", "node50"], |
108 |
"node24": ["node10", "node3", "node50"], |
109 |
"node25": ["node11", "node5", "node50"], |
110 |
}) |
111 |
|
112 |
def testSingleGroup(self): |
113 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
114 |
nodes = [ |
115 |
objects.Node(name="node1", group="default", offline=True), |
116 |
objects.Node(name="node2", group="default", offline=False), |
117 |
objects.Node(name="node3", group="default", offline=False), |
118 |
objects.Node(name="node4", group="default", offline=True), |
119 |
] |
120 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
121 |
|
122 |
(online, perhost) = fn(nodes, "default", nodes)
|
123 |
self.assertEqual(online, ["node2", "node3"]) |
124 |
self.assertEqual(set(perhost.keys()), set(online)) |
125 |
|
126 |
self.assertEqual(perhost, {
|
127 |
"node2": [],
|
128 |
"node3": [],
|
129 |
}) |
130 |
|
131 |
|
132 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
133 |
def testSuccess(self): |
134 |
op = opcodes.OpClusterActivateMasterIp() |
135 |
|
136 |
self.rpc.call_node_activate_master_ip.return_value = \
|
137 |
self.RpcResultsBuilder() \
|
138 |
.CreateSuccessfulNodeResult(self.master)
|
139 |
|
140 |
self.ExecOpCode(op)
|
141 |
|
142 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
143 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
144 |
|
145 |
def testFailure(self): |
146 |
op = opcodes.OpClusterActivateMasterIp() |
147 |
|
148 |
self.rpc.call_node_activate_master_ip.return_value = \
|
149 |
self.RpcResultsBuilder() \
|
150 |
.CreateFailedNodeResult(self.master) \
|
151 |
|
152 |
self.ExecOpCodeExpectOpExecError(op)
|
153 |
|
154 |
|
155 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
156 |
def testSuccess(self): |
157 |
op = opcodes.OpClusterDeactivateMasterIp() |
158 |
|
159 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
160 |
self.RpcResultsBuilder() \
|
161 |
.CreateSuccessfulNodeResult(self.master)
|
162 |
|
163 |
self.ExecOpCode(op)
|
164 |
|
165 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
166 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
167 |
|
168 |
def testFailure(self): |
169 |
op = opcodes.OpClusterDeactivateMasterIp() |
170 |
|
171 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
172 |
self.RpcResultsBuilder() \
|
173 |
.CreateFailedNodeResult(self.master) \
|
174 |
|
175 |
self.ExecOpCodeExpectOpExecError(op)
|
176 |
|
177 |
|
178 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
179 |
def testInvalidField(self): |
180 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
181 |
|
182 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
183 |
|
184 |
def testAllFields(self): |
185 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
186 |
|
187 |
self.rpc.call_get_watcher_pause.return_value = \
|
188 |
self.RpcResultsBuilder() \
|
189 |
.CreateSuccessfulNodeResult(self.master, -1) |
190 |
|
191 |
ret = self.ExecOpCode(op)
|
192 |
|
193 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
194 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
195 |
|
196 |
def testEmpytFields(self): |
197 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
198 |
|
199 |
self.ExecOpCode(op)
|
200 |
|
201 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
202 |
|
203 |
|
204 |
class TestLUClusterDestroy(CmdlibTestCase): |
205 |
def testExistingNodes(self): |
206 |
op = opcodes.OpClusterDestroy() |
207 |
|
208 |
self.cfg.AddNewNode()
|
209 |
self.cfg.AddNewNode()
|
210 |
|
211 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
212 |
|
213 |
def testExistingInstances(self): |
214 |
op = opcodes.OpClusterDestroy() |
215 |
|
216 |
self.cfg.AddNewInstance()
|
217 |
self.cfg.AddNewInstance()
|
218 |
|
219 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
220 |
|
221 |
def testEmptyCluster(self): |
222 |
op = opcodes.OpClusterDestroy() |
223 |
|
224 |
self.ExecOpCode(op)
|
225 |
|
226 |
self.assertSingleHooksCall([self.master.name], |
227 |
"cluster-destroy",
|
228 |
constants.HOOKS_PHASE_POST) |
229 |
|
230 |
|
231 |
class TestLUClusterPostInit(CmdlibTestCase): |
232 |
def testExecuion(self): |
233 |
op = opcodes.OpClusterPostInit() |
234 |
|
235 |
self.ExecOpCode(op)
|
236 |
|
237 |
self.assertSingleHooksCall([self.master.name], |
238 |
"cluster-init",
|
239 |
constants.HOOKS_PHASE_POST) |
240 |
|
241 |
|
242 |
class TestLUClusterQuery(CmdlibTestCase): |
243 |
def testSimpleInvocation(self): |
244 |
op = opcodes.OpClusterQuery() |
245 |
|
246 |
self.ExecOpCode(op)
|
247 |
|
248 |
def testIPv6Cluster(self): |
249 |
op = opcodes.OpClusterQuery() |
250 |
|
251 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
252 |
|
253 |
self.ExecOpCode(op)
|
254 |
|
255 |
|
256 |
class TestLUClusterRedistConf(CmdlibTestCase): |
257 |
def testSimpleInvocation(self): |
258 |
op = opcodes.OpClusterRedistConf() |
259 |
|
260 |
self.ExecOpCode(op)
|
261 |
|
262 |
|
263 |
class TestLUClusterRename(CmdlibTestCase): |
264 |
NEW_NAME = "new-name.example.com"
|
265 |
NEW_IP = "203.0.113.100"
|
266 |
|
267 |
def testNoChanges(self): |
268 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
269 |
|
270 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
271 |
|
272 |
def testReachableIp(self): |
273 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
274 |
|
275 |
self.netutils_mod.GetHostname.return_value = \
|
276 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
277 |
self.netutils_mod.TcpPing.return_value = True |
278 |
|
279 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
280 |
|
281 |
def testValidRename(self): |
282 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
283 |
|
284 |
self.netutils_mod.GetHostname.return_value = \
|
285 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
286 |
|
287 |
self.ExecOpCode(op)
|
288 |
|
289 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
290 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
291 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
292 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
293 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
294 |
|
295 |
def testRenameOfflineMaster(self): |
296 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
297 |
|
298 |
self.master.offline = True |
299 |
self.netutils_mod.GetHostname.return_value = \
|
300 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
301 |
|
302 |
self.ExecOpCode(op)
|
303 |
|
304 |
|
305 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
306 |
def testNoInstances(self): |
307 |
op = opcodes.OpClusterRepairDiskSizes() |
308 |
|
309 |
self.ExecOpCode(op)
|
310 |
|
311 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN): |
312 |
pnode = self.master
|
313 |
snode = self.cfg.AddNewNode()
|
314 |
|
315 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
316 |
primary_node=pnode, |
317 |
secondary_node=snode) |
318 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
319 |
|
320 |
return (inst, disk)
|
321 |
|
322 |
def testSingleInstanceOnFailingNode(self): |
323 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
324 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
325 |
|
326 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
327 |
self.RpcResultsBuilder() \
|
328 |
.CreateFailedNodeResult(self.master)
|
329 |
|
330 |
self.ExecOpCode(op)
|
331 |
|
332 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
333 |
|
334 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
335 |
# not specifying instances repairs all
|
336 |
op = opcodes.OpClusterRepairDiskSizes() |
337 |
|
338 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
339 |
self.RpcResultsBuilder() \
|
340 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
341 |
|
342 |
return self.ExecOpCode(op) |
343 |
|
344 |
def testInvalidResultData(self): |
345 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
346 |
self.ResetMocks()
|
347 |
|
348 |
self._SetUpInstanceSingleDisk()
|
349 |
self._ExecOpClusterRepairDiskSizes(data)
|
350 |
|
351 |
self.mcpu.assertLogContainsRegex("ignoring") |
352 |
|
353 |
def testCorrectSize(self): |
354 |
self._SetUpInstanceSingleDisk()
|
355 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
356 |
self.mcpu.assertLogIsEmpty()
|
357 |
self.assertEqual(0, len(changed)) |
358 |
|
359 |
def testWrongSize(self): |
360 |
self._SetUpInstanceSingleDisk()
|
361 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
362 |
self.assertEqual(1, len(changed)) |
363 |
|
364 |
def testCorrectDRBD(self): |
365 |
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
366 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
367 |
self.mcpu.assertLogIsEmpty()
|
368 |
self.assertEqual(0, len(changed)) |
369 |
|
370 |
def testWrongDRBDChild(self): |
371 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
372 |
disk.children[0].size = 512 |
373 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
374 |
self.assertEqual(1, len(changed)) |
375 |
|
376 |
def testExclusiveStorageInvalidResultData(self): |
377 |
self._SetUpInstanceSingleDisk()
|
378 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
379 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
380 |
|
381 |
self.mcpu.assertLogContainsRegex(
|
382 |
"did not return valid spindles information")
|
383 |
|
384 |
def testExclusiveStorageCorrectSpindles(self): |
385 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
386 |
disk.spindles = 1
|
387 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
388 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
389 |
self.assertEqual(0, len(changed)) |
390 |
|
391 |
def testExclusiveStorageWrongSpindles(self): |
392 |
self._SetUpInstanceSingleDisk()
|
393 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
394 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
395 |
self.assertEqual(1, len(changed)) |
396 |
|
397 |
|
398 |
class TestLUClusterSetParams(CmdlibTestCase): |
399 |
UID_POOL = [(10, 1000)] |
400 |
|
401 |
def testUidPool(self): |
402 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
403 |
self.ExecOpCode(op)
|
404 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
405 |
|
406 |
def testAddUids(self): |
407 |
old_pool = [(1, 9)] |
408 |
self.cluster.uid_pool = list(old_pool) |
409 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
410 |
self.ExecOpCode(op)
|
411 |
self.assertEqual(set(self.UID_POOL + old_pool), |
412 |
set(self.cluster.uid_pool)) |
413 |
|
414 |
def testRemoveUids(self): |
415 |
additional_pool = [(1, 9)] |
416 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
417 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
418 |
self.ExecOpCode(op)
|
419 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
420 |
|
421 |
def testMasterNetmask(self): |
422 |
op = opcodes.OpClusterSetParams(master_netmask=26)
|
423 |
self.ExecOpCode(op)
|
424 |
self.assertEqual(26, self.cluster.master_netmask) |
425 |
|
426 |
def testInvalidDiskparams(self): |
427 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
428 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}},
|
429 |
{constants.DT_DRBD8: {constants.RBD_ACCESS: "bunny"}}]:
|
430 |
self.ResetMocks()
|
431 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
432 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
433 |
|
434 |
def testValidDiskparams(self): |
435 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
|
436 |
constants.RBD_ACCESS: "kernelspace"}}
|
437 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
438 |
self.ExecOpCode(op)
|
439 |
self.assertEqual(diskparams[constants.DT_RBD],
|
440 |
self.cluster.diskparams[constants.DT_RBD])
|
441 |
|
442 |
def testMinimalDiskparams(self): |
443 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
444 |
self.cluster.diskparams = {}
|
445 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
446 |
self.ExecOpCode(op)
|
447 |
self.assertEqual(diskparams, self.cluster.diskparams) |
448 |
|
449 |
def testValidDiskparamsAccess(self): |
450 |
for value in constants.DISK_VALID_ACCESS_MODES: |
451 |
self.ResetMocks()
|
452 |
op = opcodes.OpClusterSetParams(diskparams={ |
453 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
454 |
}) |
455 |
self.ExecOpCode(op)
|
456 |
got = self.cluster.diskparams[constants.DT_RBD][constants.RBD_ACCESS]
|
457 |
self.assertEqual(value, got)
|
458 |
|
459 |
def testInvalidDiskparamsAccess(self): |
460 |
for value in ["default", "pinky_bunny"]: |
461 |
self.ResetMocks()
|
462 |
op = opcodes.OpClusterSetParams(diskparams={ |
463 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
464 |
}) |
465 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid value of 'rbd:access'") |
466 |
|
467 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
468 |
self.cfg.AddNewInstance(disks=[
|
469 |
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)]) |
470 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
471 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
472 |
|
473 |
def testFileStorageDir(self): |
474 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
475 |
self.ExecOpCode(op)
|
476 |
|
477 |
def testSetFileStorageDirToCurrentValue(self): |
478 |
op = opcodes.OpClusterSetParams( |
479 |
file_storage_dir=self.cluster.file_storage_dir)
|
480 |
self.ExecOpCode(op)
|
481 |
|
482 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
483 |
|
484 |
def testUnsetFileStorageDirFileStorageEnabled(self): |
485 |
self.cfg.SetEnabledDiskTemplates([constants.DT_FILE])
|
486 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
487 |
self.ExecOpCodeExpectOpPrereqError(op, "Unsetting the 'file' storage") |
488 |
|
489 |
def testUnsetFileStorageDirFileStorageDisabled(self): |
490 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
491 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
492 |
self.ExecOpCode(op)
|
493 |
|
494 |
def testSetFileStorageDirFileStorageDisabled(self): |
495 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
496 |
op = opcodes.OpClusterSetParams(file_storage_dir='/some/path/')
|
497 |
self.ExecOpCode(op)
|
498 |
self.mcpu.assertLogContainsRegex("although file storage is not enabled") |
499 |
|
500 |
def testValidDrbdHelper(self): |
501 |
node1 = self.cfg.AddNewNode()
|
502 |
node1.offline = True
|
503 |
self.rpc.call_drbd_helper.return_value = \
|
504 |
self.RpcResultsBuilder() \
|
505 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
506 |
.AddOfflineNode(node1) \ |
507 |
.Build() |
508 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
509 |
self.ExecOpCode(op)
|
510 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
511 |
|
512 |
def testDrbdHelperFailingNode(self): |
513 |
self.rpc.call_drbd_helper.return_value = \
|
514 |
self.RpcResultsBuilder() \
|
515 |
.AddFailedNode(self.master) \
|
516 |
.Build() |
517 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
518 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
519 |
|
520 |
def testInvalidDrbdHelper(self): |
521 |
self.rpc.call_drbd_helper.return_value = \
|
522 |
self.RpcResultsBuilder() \
|
523 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
524 |
.Build() |
525 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
526 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
527 |
|
528 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
529 |
drbd_helper = "/bin/random_helper"
|
530 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
531 |
self.rpc.call_drbd_helper.return_value = \
|
532 |
self.RpcResultsBuilder() \
|
533 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
534 |
.Build() |
535 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
536 |
self.ExecOpCode(op)
|
537 |
|
538 |
self.mcpu.assertLogContainsRegex("but did not enable") |
539 |
|
540 |
def testResetDrbdHelperDrbdDisabled(self): |
541 |
drbd_helper = ""
|
542 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
543 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
544 |
self.ExecOpCode(op)
|
545 |
|
546 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
547 |
|
548 |
def testResetDrbdHelperDrbdEnabled(self): |
549 |
drbd_helper = ""
|
550 |
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
|
551 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
552 |
self.ExecOpCodeExpectOpPrereqError(
|
553 |
op, "Cannot disable drbd helper while DRBD is enabled.")
|
554 |
|
555 |
def testEnableDrbdNoHelper(self): |
556 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
557 |
self.cluster.drbd_usermode_helper = None |
558 |
enabled_disk_templates = [constants.DT_DRBD8] |
559 |
op = opcodes.OpClusterSetParams( |
560 |
enabled_disk_templates=enabled_disk_templates) |
561 |
self.ExecOpCodeExpectOpPrereqError(
|
562 |
op, "Cannot enable DRBD without a DRBD usermode helper set")
|
563 |
|
564 |
def testEnableDrbdHelperSet(self): |
565 |
drbd_helper = "/bin/random_helper"
|
566 |
self.rpc.call_drbd_helper.return_value = \
|
567 |
self.RpcResultsBuilder() \
|
568 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
569 |
.Build() |
570 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
571 |
self.cluster.drbd_usermode_helper = drbd_helper
|
572 |
enabled_disk_templates = [constants.DT_DRBD8] |
573 |
op = opcodes.OpClusterSetParams( |
574 |
enabled_disk_templates=enabled_disk_templates, |
575 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
576 |
self.ExecOpCode(op)
|
577 |
|
578 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
579 |
|
580 |
def testDrbdHelperAlreadySet(self): |
581 |
drbd_helper = "/bin/true"
|
582 |
self.rpc.call_drbd_helper.return_value = \
|
583 |
self.RpcResultsBuilder() \
|
584 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
585 |
.Build() |
586 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
587 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
588 |
self.ExecOpCode(op)
|
589 |
|
590 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
591 |
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state") |
592 |
|
593 |
def testSetDrbdHelper(self): |
594 |
drbd_helper = "/bin/true"
|
595 |
self.rpc.call_drbd_helper.return_value = \
|
596 |
self.RpcResultsBuilder() \
|
597 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
598 |
.Build() |
599 |
self.cluster.drbd_usermode_helper = "/bin/false" |
600 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
|
601 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
602 |
self.ExecOpCode(op)
|
603 |
|
604 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
605 |
|
606 |
def testBeparams(self): |
607 |
beparams = {constants.BE_VCPUS: 32}
|
608 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
609 |
self.ExecOpCode(op)
|
610 |
self.assertEqual(32, self.cluster |
611 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
612 |
|
613 |
def testNdparams(self): |
614 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
615 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
616 |
self.ExecOpCode(op)
|
617 |
self.assertEqual(True, self.cluster |
618 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
619 |
|
620 |
def testNdparamsResetOobProgram(self): |
621 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
622 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
623 |
self.ExecOpCode(op)
|
624 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
625 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
626 |
|
627 |
def testHvState(self): |
628 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
629 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
630 |
self.ExecOpCode(op)
|
631 |
self.assertEqual(8, self.cluster.hv_state_static |
632 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
633 |
|
634 |
def testDiskState(self): |
635 |
disk_state = { |
636 |
constants.DT_PLAIN: { |
637 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
638 |
} |
639 |
} |
640 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
641 |
self.ExecOpCode(op)
|
642 |
self.assertEqual(10, self.cluster |
643 |
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
|
644 |
[constants.DS_DISK_TOTAL]) |
645 |
|
646 |
def testDefaultIPolicy(self): |
647 |
ipolicy = constants.IPOLICY_DEFAULTS |
648 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
649 |
self.ExecOpCode(op)
|
650 |
|
651 |
def testIPolicyNewViolation(self): |
652 |
import ganeti.constants as C |
653 |
ipolicy = C.IPOLICY_DEFAULTS |
654 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
655 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
656 |
|
657 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
658 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
659 |
self.ExecOpCode(op)
|
660 |
|
661 |
self.mcpu.assertLogContainsRegex("instances violate them") |
662 |
|
663 |
def testNicparamsNoInstance(self): |
664 |
nicparams = { |
665 |
constants.NIC_LINK: "mock_bridge"
|
666 |
} |
667 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
668 |
self.ExecOpCode(op)
|
669 |
|
670 |
self.assertEqual("mock_bridge", |
671 |
self.cluster.nicparams
|
672 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
673 |
|
674 |
def testNicparamsInvalidConf(self): |
675 |
nicparams = { |
676 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
677 |
constants.NIC_LINK: ""
|
678 |
} |
679 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
680 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
681 |
|
682 |
def testNicparamsInvalidInstanceConf(self): |
683 |
nicparams = { |
684 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
685 |
constants.NIC_LINK: "mock_bridge"
|
686 |
} |
687 |
self.cfg.AddNewInstance(nics=[
|
688 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
689 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
690 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
691 |
|
692 |
def testNicparamsMissingIp(self): |
693 |
nicparams = { |
694 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
695 |
} |
696 |
self.cfg.AddNewInstance()
|
697 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
698 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
699 |
|
700 |
def testNicparamsWithInstance(self): |
701 |
nicparams = { |
702 |
constants.NIC_LINK: "mock_bridge"
|
703 |
} |
704 |
self.cfg.AddNewInstance()
|
705 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
706 |
self.ExecOpCode(op)
|
707 |
|
708 |
def testDefaultHvparams(self): |
709 |
hvparams = constants.HVC_DEFAULTS |
710 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
711 |
self.ExecOpCode(op)
|
712 |
|
713 |
self.assertEqual(hvparams, self.cluster.hvparams) |
714 |
|
715 |
def testMinimalHvparams(self): |
716 |
hvparams = { |
717 |
constants.HT_FAKE: { |
718 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
719 |
} |
720 |
} |
721 |
self.cluster.hvparams = {}
|
722 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
723 |
self.ExecOpCode(op)
|
724 |
|
725 |
self.assertEqual(hvparams, self.cluster.hvparams) |
726 |
|
727 |
def testOsHvp(self): |
728 |
os_hvp = { |
729 |
"mocked_os": {
|
730 |
constants.HT_FAKE: { |
731 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
732 |
} |
733 |
}, |
734 |
"other_os": constants.HVC_DEFAULTS
|
735 |
} |
736 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
737 |
self.ExecOpCode(op)
|
738 |
|
739 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
740 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
741 |
[constants.HV_MIGRATION_MODE]) |
742 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
743 |
|
744 |
def testRemoveOsHvp(self): |
745 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
746 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
747 |
self.ExecOpCode(op)
|
748 |
|
749 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
750 |
|
751 |
def testDefaultOsHvp(self): |
752 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
753 |
self.cluster.os_hvp = {"mocked_os": {}} |
754 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
755 |
self.ExecOpCode(op)
|
756 |
|
757 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
758 |
|
759 |
def testOsparams(self): |
760 |
osparams = { |
761 |
"mocked_os": {
|
762 |
"param1": "value1", |
763 |
"param2": None |
764 |
}, |
765 |
"other_os": {
|
766 |
"param1": None |
767 |
} |
768 |
} |
769 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
770 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
771 |
self.ExecOpCode(op)
|
772 |
|
773 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
774 |
|
775 |
def testEnabledHypervisors(self): |
776 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
777 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
778 |
self.ExecOpCode(op)
|
779 |
|
780 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
781 |
|
782 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
783 |
enabled_hypervisors = [constants.HT_FAKE] |
784 |
self.cluster.hvparams = {}
|
785 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
786 |
self.ExecOpCode(op)
|
787 |
|
788 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
789 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
790 |
self.cluster.hvparams[constants.HT_FAKE])
|
791 |
|
792 |
@testutils.patch_object(utils, "FindFile") |
793 |
def testValidDefaultIallocator(self, find_file_mock): |
794 |
find_file_mock.return_value = "/random/path"
|
795 |
default_iallocator = "/random/path"
|
796 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
797 |
self.ExecOpCode(op)
|
798 |
|
799 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
800 |
|
801 |
@testutils.patch_object(utils, "FindFile") |
802 |
def testInvalidDefaultIallocator(self, find_file_mock): |
803 |
find_file_mock.return_value = None
|
804 |
default_iallocator = "/random/path"
|
805 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
806 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
807 |
|
808 |
def testEnabledDiskTemplates(self): |
809 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
810 |
op = opcodes.OpClusterSetParams( |
811 |
enabled_disk_templates=enabled_disk_templates, |
812 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
813 |
self.ExecOpCode(op)
|
814 |
|
815 |
self.assertEqual(enabled_disk_templates,
|
816 |
self.cluster.enabled_disk_templates)
|
817 |
|
818 |
def testEnabledDiskTemplatesVsIpolicy(self): |
819 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
820 |
op = opcodes.OpClusterSetParams( |
821 |
enabled_disk_templates=enabled_disk_templates, |
822 |
ipolicy={constants.IPOLICY_DTS: [constants.DT_FILE]}) |
823 |
self.ExecOpCodeExpectOpPrereqError(op, "but not enabled on the cluster") |
824 |
|
825 |
def testDisablingDiskTemplatesOfInstances(self): |
826 |
old_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
827 |
self.cfg.SetEnabledDiskTemplates(old_disk_templates)
|
828 |
self.cfg.AddNewInstance(
|
829 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
830 |
new_disk_templates = [constants.DT_DISKLESS, constants.DT_DRBD8] |
831 |
op = opcodes.OpClusterSetParams( |
832 |
enabled_disk_templates=new_disk_templates, |
833 |
ipolicy={constants.IPOLICY_DTS: new_disk_templates}) |
834 |
self.ExecOpCodeExpectOpPrereqError(op, "least one instance using it") |
835 |
|
836 |
def testEnabledDiskTemplatesWithoutVgName(self): |
837 |
enabled_disk_templates = [constants.DT_PLAIN] |
838 |
self.cluster.volume_group_name = None |
839 |
op = opcodes.OpClusterSetParams( |
840 |
enabled_disk_templates=enabled_disk_templates) |
841 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
842 |
|
843 |
def testDisableDiskTemplateWithExistingInstance(self): |
844 |
enabled_disk_templates = [constants.DT_DISKLESS] |
845 |
self.cfg.AddNewInstance(
|
846 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
847 |
op = opcodes.OpClusterSetParams( |
848 |
enabled_disk_templates=enabled_disk_templates, |
849 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
850 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
851 |
|
852 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
853 |
vg_name = "test_vg"
|
854 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
855 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
856 |
self.ExecOpCode(op)
|
857 |
|
858 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
859 |
self.mcpu.assertLogIsEmpty()
|
860 |
|
861 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
862 |
vg_name = ""
|
863 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
864 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
865 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
866 |
|
867 |
def testUnsetVgNameWithLvmInstance(self): |
868 |
vg_name = ""
|
869 |
self.cfg.AddNewInstance(
|
870 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
871 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
872 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
873 |
|
874 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
875 |
vg_name = ""
|
876 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
877 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
878 |
self.ExecOpCode(op)
|
879 |
|
880 |
self.assertEqual(None, self.cluster.volume_group_name) |
881 |
|
882 |
def testVgNameToOldName(self): |
883 |
vg_name = self.cluster.volume_group_name
|
884 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
885 |
self.ExecOpCode(op)
|
886 |
|
887 |
self.mcpu.assertLogContainsRegex("already in desired state") |
888 |
|
889 |
def testVgNameWithFailingNode(self): |
890 |
vg_name = "test_vg"
|
891 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
892 |
self.rpc.call_vg_list.return_value = \
|
893 |
self.RpcResultsBuilder() \
|
894 |
.AddFailedNode(self.master) \
|
895 |
.Build() |
896 |
self.ExecOpCode(op)
|
897 |
|
898 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
899 |
|
900 |
def testVgNameWithValidNode(self): |
901 |
vg_name = "test_vg"
|
902 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
903 |
self.rpc.call_vg_list.return_value = \
|
904 |
self.RpcResultsBuilder() \
|
905 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
906 |
.Build() |
907 |
self.ExecOpCode(op)
|
908 |
|
909 |
def testVgNameWithTooSmallNode(self): |
910 |
vg_name = "test_vg"
|
911 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
912 |
self.rpc.call_vg_list.return_value = \
|
913 |
self.RpcResultsBuilder() \
|
914 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
915 |
.Build() |
916 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
917 |
|
918 |
def testMiscParameters(self): |
919 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
920 |
maintain_node_health=True,
|
921 |
modify_etc_hosts=True,
|
922 |
prealloc_wipe_disks=True,
|
923 |
reserved_lvs=["/dev/mock_lv"],
|
924 |
use_external_mip_script=True)
|
925 |
self.ExecOpCode(op)
|
926 |
|
927 |
self.mcpu.assertLogIsEmpty()
|
928 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
929 |
self.assertEqual(True, self.cluster.maintain_node_health) |
930 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
931 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
932 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
933 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
934 |
|
935 |
def testAddHiddenOs(self): |
936 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
937 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
938 |
(constants.DDM_ADD, "hidden3")])
|
939 |
self.ExecOpCode(op)
|
940 |
|
941 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
942 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
943 |
|
944 |
def testRemoveBlacklistedOs(self): |
945 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
946 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
947 |
(constants.DDM_REMOVE, "blisted2"),
|
948 |
(constants.DDM_REMOVE, "blisted3")])
|
949 |
self.ExecOpCode(op)
|
950 |
|
951 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
952 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
953 |
|
954 |
def testMasterNetdev(self): |
955 |
master_netdev = "test_dev"
|
956 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
957 |
self.ExecOpCode(op)
|
958 |
|
959 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
960 |
|
961 |
def testMasterNetdevFailNoForce(self): |
962 |
master_netdev = "test_dev"
|
963 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
964 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
965 |
self.RpcResultsBuilder() \
|
966 |
.CreateFailedNodeResult(self.master)
|
967 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
968 |
|
969 |
def testMasterNetdevFailForce(self): |
970 |
master_netdev = "test_dev"
|
971 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
972 |
force=True)
|
973 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
974 |
self.RpcResultsBuilder() \
|
975 |
.CreateFailedNodeResult(self.master)
|
976 |
self.ExecOpCode(op)
|
977 |
|
978 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
979 |
|
980 |
|
981 |
class TestLUClusterVerify(CmdlibTestCase): |
982 |
def testVerifyAllGroups(self): |
983 |
op = opcodes.OpClusterVerify() |
984 |
result = self.ExecOpCode(op)
|
985 |
|
986 |
self.assertEqual(2, len(result["jobs"])) |
987 |
|
988 |
def testVerifyDefaultGroups(self): |
989 |
op = opcodes.OpClusterVerify(group_name="default")
|
990 |
result = self.ExecOpCode(op)
|
991 |
|
992 |
self.assertEqual(1, len(result["jobs"])) |
993 |
|
994 |
|
995 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
996 |
|
997 |
def setUp(self): |
998 |
super(TestLUClusterVerifyConfig, self).setUp() |
999 |
|
1000 |
self._load_cert_patcher = testutils \
|
1001 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
1002 |
self._load_cert_mock = self._load_cert_patcher.start() |
1003 |
self._verify_cert_patcher = testutils \
|
1004 |
.patch_object(utils, "VerifyX509Certificate")
|
1005 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
1006 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
1007 |
self._read_file_mock = self._read_file_patcher.start() |
1008 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
1009 |
self._can_read_mock = self._can_read_patcher.start() |
1010 |
|
1011 |
self._can_read_mock.return_value = True |
1012 |
self._read_file_mock.return_value = True |
1013 |
self._verify_cert_mock.return_value = (None, "") |
1014 |
self._load_cert_mock.return_value = True |
1015 |
|
1016 |
def tearDown(self): |
1017 |
super(TestLUClusterVerifyConfig, self).tearDown() |
1018 |
|
1019 |
self._can_read_patcher.stop()
|
1020 |
self._read_file_patcher.stop()
|
1021 |
self._verify_cert_patcher.stop()
|
1022 |
self._load_cert_patcher.stop()
|
1023 |
|
1024 |
def testSuccessfulRun(self): |
1025 |
self.cfg.AddNewInstance()
|
1026 |
op = opcodes.OpClusterVerifyConfig() |
1027 |
result = self.ExecOpCode(op)
|
1028 |
|
1029 |
self.assertTrue(result)
|
1030 |
|
1031 |
def testDanglingNode(self): |
1032 |
node = self.cfg.AddNewNode()
|
1033 |
self.cfg.AddNewInstance(primary_node=node)
|
1034 |
node.group = "invalid"
|
1035 |
op = opcodes.OpClusterVerifyConfig() |
1036 |
result = self.ExecOpCode(op)
|
1037 |
|
1038 |
self.mcpu.assertLogContainsRegex(
|
1039 |
"following nodes \(and their instances\) belong to a non existing group")
|
1040 |
self.assertFalse(result)
|
1041 |
|
1042 |
def testDanglingInstance(self): |
1043 |
inst = self.cfg.AddNewInstance()
|
1044 |
inst.primary_node = "invalid"
|
1045 |
op = opcodes.OpClusterVerifyConfig() |
1046 |
result = self.ExecOpCode(op)
|
1047 |
|
1048 |
self.mcpu.assertLogContainsRegex(
|
1049 |
"following instances have a non-existing primary-node")
|
1050 |
self.assertFalse(result)
|
1051 |
|
1052 |
|
1053 |
class TestLUClusterVerifyGroup(CmdlibTestCase): |
1054 |
def testEmptyNodeGroup(self): |
1055 |
group = self.cfg.AddNewNodeGroup()
|
1056 |
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
|
1057 |
|
1058 |
result = self.ExecOpCode(op)
|
1059 |
|
1060 |
self.assertTrue(result)
|
1061 |
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification") |
1062 |
|
1063 |
def testSimpleInvocation(self): |
1064 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1065 |
|
1066 |
self.ExecOpCode(op)
|
1067 |
|
1068 |
def testSimpleInvocationWithInstance(self): |
1069 |
self.cfg.AddNewInstance(disks=[])
|
1070 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1071 |
|
1072 |
self.ExecOpCode(op)
|
1073 |
|
1074 |
def testGhostNode(self): |
1075 |
group = self.cfg.AddNewNodeGroup()
|
1076 |
node = self.cfg.AddNewNode(group=group.uuid, offline=True) |
1077 |
self.master.offline = True |
1078 |
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
1079 |
primary_node=self.master,
|
1080 |
secondary_node=node) |
1081 |
|
1082 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1083 |
RpcResultsBuilder() \ |
1084 |
.AddOfflineNode(self.master) \
|
1085 |
.Build() |
1086 |
|
1087 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1088 |
|
1089 |
self.ExecOpCode(op)
|
1090 |
|
1091 |
def testValidRpcResult(self): |
1092 |
self.cfg.AddNewInstance(disks=[])
|
1093 |
|
1094 |
self.rpc.call_node_verify.return_value = \
|
1095 |
RpcResultsBuilder() \ |
1096 |
.AddSuccessfulNode(self.master, {}) \
|
1097 |
.Build() |
1098 |
|
1099 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1100 |
|
1101 |
self.ExecOpCode(op)
|
1102 |
|
1103 |
|
1104 |
class TestLUClusterVerifyGroupMethods(CmdlibTestCase): |
1105 |
"""Base class for testing individual methods in LUClusterVerifyGroup.
|
1106 |
|
1107 |
"""
|
1108 |
def setUp(self): |
1109 |
super(TestLUClusterVerifyGroupMethods, self).setUp() |
1110 |
self.op = opcodes.OpClusterVerifyGroup(group_name="default") |
1111 |
|
1112 |
def PrepareLU(self, lu): |
1113 |
lu._exclusive_storage = False
|
1114 |
lu.master_node = self.master_uuid
|
1115 |
lu.group_info = self.group
|
1116 |
cluster.LUClusterVerifyGroup.all_node_info = \ |
1117 |
property(fget=lambda _: self.cfg.GetAllNodesInfo()) |
1118 |
|
1119 |
|
1120 |
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods): |
1121 |
@withLockedLU
|
1122 |
def testInvalidNodeResult(self, lu): |
1123 |
self.assertFalse(lu._VerifyNode(self.master, None)) |
1124 |
self.assertFalse(lu._VerifyNode(self.master, "")) |
1125 |
|
1126 |
@withLockedLU
|
1127 |
def testInvalidVersion(self, lu): |
1128 |
self.assertFalse(lu._VerifyNode(self.master, {"version": None})) |
1129 |
self.assertFalse(lu._VerifyNode(self.master, {"version": ""})) |
1130 |
self.assertFalse(lu._VerifyNode(self.master, { |
1131 |
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION) |
1132 |
})) |
1133 |
|
1134 |
self.mcpu.ClearLogMessages()
|
1135 |
self.assertTrue(lu._VerifyNode(self.master, { |
1136 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x") |
1137 |
})) |
1138 |
self.mcpu.assertLogContainsRegex("software version mismatch") |
1139 |
|
1140 |
def _GetValidNodeResult(self, additional_fields): |
1141 |
ret = { |
1142 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
|
1143 |
constants.NV_NODESETUP: [] |
1144 |
} |
1145 |
ret.update(additional_fields) |
1146 |
return ret
|
1147 |
|
1148 |
@withLockedLU
|
1149 |
def testHypervisor(self, lu): |
1150 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1151 |
constants.NV_HYPERVISOR: { |
1152 |
constants.HT_XEN_PVM: None,
|
1153 |
constants.HT_XEN_HVM: "mock error"
|
1154 |
} |
1155 |
})) |
1156 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1157 |
self.mcpu.assertLogContainsRegex("mock error") |
1158 |
|
1159 |
@withLockedLU
|
1160 |
def testHvParams(self, lu): |
1161 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1162 |
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")] |
1163 |
})) |
1164 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1165 |
self.mcpu.assertLogContainsRegex("mock item") |
1166 |
self.mcpu.assertLogContainsRegex("mock error") |
1167 |
|
1168 |
@withLockedLU
|
1169 |
def testSuccessfulResult(self, lu): |
1170 |
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({}))) |
1171 |
self.mcpu.assertLogIsEmpty()
|
1172 |
|
1173 |
|
1174 |
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods): |
1175 |
@withLockedLU
|
1176 |
def testInvalidNodeResult(self, lu): |
1177 |
for ndata in [{}, {constants.NV_TIME: "invalid"}]: |
1178 |
self.mcpu.ClearLogMessages()
|
1179 |
lu._VerifyNodeTime(self.master, ndata, None, None) |
1180 |
|
1181 |
self.mcpu.assertLogContainsRegex("Node returned invalid time") |
1182 |
|
1183 |
@withLockedLU
|
1184 |
def testNodeDiverges(self, lu): |
1185 |
for ntime in [(0, 0), (2000, 0)]: |
1186 |
self.mcpu.ClearLogMessages()
|
1187 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005) |
1188 |
|
1189 |
self.mcpu.assertLogContainsRegex("Node time diverges") |
1190 |
|
1191 |
@withLockedLU
|
1192 |
def testSuccessfulResult(self, lu): |
1193 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5) |
1194 |
self.mcpu.assertLogIsEmpty()
|
1195 |
|
1196 |
|
1197 |
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM( |
1198 |
TestLUClusterVerifyGroupMethods): |
1199 |
def setUp(self): |
1200 |
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp() |
1201 |
self.VALID_NRESULT = {
|
1202 |
constants.NV_VGLIST: {"mock_vg": 30000}, |
1203 |
constants.NV_PVLIST: [ |
1204 |
{ |
1205 |
"name": "mock_pv", |
1206 |
"vg_name": "mock_vg", |
1207 |
"size": 5000, |
1208 |
"free": 2500, |
1209 |
"attributes": [],
|
1210 |
"lv_list": []
|
1211 |
} |
1212 |
] |
1213 |
} |
1214 |
|
1215 |
@withLockedLU
|
1216 |
def testNoVgName(self, lu): |
1217 |
lu._UpdateVerifyNodeLVM(self.master, {}, None, None) |
1218 |
self.mcpu.assertLogIsEmpty()
|
1219 |
|
1220 |
@withLockedLU
|
1221 |
def testEmptyNodeResult(self, lu): |
1222 |
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None) |
1223 |
self.mcpu.assertLogContainsRegex("unable to check volume groups") |
1224 |
self.mcpu.assertLogContainsRegex("Can't get PV list from node") |
1225 |
|
1226 |
@withLockedLU
|
1227 |
def testValidNodeResult(self, lu): |
1228 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None) |
1229 |
self.mcpu.assertLogIsEmpty()
|
1230 |
|
1231 |
@withLockedLU
|
1232 |
def testValidNodeResultExclusiveStorage(self, lu): |
1233 |
lu._exclusive_storage = True
|
1234 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", |
1235 |
cluster.LUClusterVerifyGroup.NodeImage()) |
1236 |
self.mcpu.assertLogIsEmpty()
|
1237 |
|
1238 |
|
1239 |
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion( |
1240 |
TestLUClusterVerifyGroupMethods): |
1241 |
@withLockedLU
|
1242 |
def testEmptyNodeResult(self, lu): |
1243 |
lu._VerifyGroupDRBDVersion({}) |
1244 |
self.mcpu.assertLogIsEmpty()
|
1245 |
|
1246 |
@withLockedLU
|
1247 |
def testValidNodeResult(self, lu): |
1248 |
lu._VerifyGroupDRBDVersion( |
1249 |
RpcResultsBuilder() |
1250 |
.AddSuccessfulNode(self.master, {
|
1251 |
constants.NV_DRBDVERSION: "8.3.0"
|
1252 |
}) |
1253 |
.Build()) |
1254 |
self.mcpu.assertLogIsEmpty()
|
1255 |
|
1256 |
@withLockedLU
|
1257 |
def testDifferentVersions(self, lu): |
1258 |
node1 = self.cfg.AddNewNode()
|
1259 |
lu._VerifyGroupDRBDVersion( |
1260 |
RpcResultsBuilder() |
1261 |
.AddSuccessfulNode(self.master, {
|
1262 |
constants.NV_DRBDVERSION: "8.3.0"
|
1263 |
}) |
1264 |
.AddSuccessfulNode(node1, { |
1265 |
constants.NV_DRBDVERSION: "8.4.0"
|
1266 |
}) |
1267 |
.Build()) |
1268 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0") |
1269 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0") |
1270 |
|
1271 |
|
1272 |
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods): |
1273 |
@withLockedLU
|
1274 |
def testNoVgName(self, lu): |
1275 |
lu._VerifyGroupLVM(None, None) |
1276 |
self.mcpu.assertLogIsEmpty()
|
1277 |
|
1278 |
@withLockedLU
|
1279 |
def testNoExclusiveStorage(self, lu): |
1280 |
lu._VerifyGroupLVM(None, "mock_vg") |
1281 |
self.mcpu.assertLogIsEmpty()
|
1282 |
|
1283 |
@withLockedLU
|
1284 |
def testNoPvInfo(self, lu): |
1285 |
lu._exclusive_storage = True
|
1286 |
nimg = cluster.LUClusterVerifyGroup.NodeImage() |
1287 |
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg") |
1288 |
self.mcpu.assertLogIsEmpty()
|
1289 |
|
1290 |
@withLockedLU
|
1291 |
def testValidPvInfos(self, lu): |
1292 |
lu._exclusive_storage = True
|
1293 |
node2 = self.cfg.AddNewNode()
|
1294 |
nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
|
1295 |
nimg1.pv_min = 10000
|
1296 |
nimg1.pv_max = 10010
|
1297 |
nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1298 |
nimg2.pv_min = 9998
|
1299 |
nimg2.pv_max = 10005
|
1300 |
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg") |
1301 |
self.mcpu.assertLogIsEmpty()
|
1302 |
|
1303 |
|
1304 |
class TestLUClusterVerifyGroupVerifyNodeBridges( |
1305 |
TestLUClusterVerifyGroupMethods): |
1306 |
@withLockedLU
|
1307 |
def testNoBridges(self, lu): |
1308 |
lu._VerifyNodeBridges(None, None, None) |
1309 |
self.mcpu.assertLogIsEmpty()
|
1310 |
|
1311 |
@withLockedLU
|
1312 |
def testInvalidBridges(self, lu): |
1313 |
for ndata in [{}, {constants.NV_BRIDGES: ""}]: |
1314 |
self.mcpu.ClearLogMessages()
|
1315 |
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"]) |
1316 |
self.mcpu.assertLogContainsRegex("not return valid bridge information") |
1317 |
|
1318 |
self.mcpu.ClearLogMessages()
|
1319 |
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]}, |
1320 |
["mock_bridge"])
|
1321 |
self.mcpu.assertLogContainsRegex("missing bridge") |
1322 |
|
1323 |
|
1324 |
class TestLUClusterVerifyGroupVerifyNodeUserScripts( |
1325 |
TestLUClusterVerifyGroupMethods): |
1326 |
@withLockedLU
|
1327 |
def testNoUserScripts(self, lu): |
1328 |
lu._VerifyNodeUserScripts(self.master, {})
|
1329 |
self.mcpu.assertLogContainsRegex("did not return user scripts information") |
1330 |
|
1331 |
@withLockedLU
|
1332 |
def testBrokenUserScripts(self, lu): |
1333 |
lu._VerifyNodeUserScripts(self.master,
|
1334 |
{constants.NV_USERSCRIPTS: ["script"]})
|
1335 |
self.mcpu.assertLogContainsRegex("scripts not present or not executable") |
1336 |
|
1337 |
|
1338 |
class TestLUClusterVerifyGroupVerifyNodeNetwork( |
1339 |
TestLUClusterVerifyGroupMethods): |
1340 |
|
1341 |
def setUp(self): |
1342 |
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp() |
1343 |
self.VALID_NRESULT = {
|
1344 |
constants.NV_NODELIST: {}, |
1345 |
constants.NV_NODENETTEST: {}, |
1346 |
constants.NV_MASTERIP: True
|
1347 |
} |
1348 |
|
1349 |
@withLockedLU
|
1350 |
def testEmptyNodeResult(self, lu): |
1351 |
lu._VerifyNodeNetwork(self.master, {})
|
1352 |
self.mcpu.assertLogContainsRegex(
|
1353 |
"node hasn't returned node ssh connectivity data")
|
1354 |
self.mcpu.assertLogContainsRegex(
|
1355 |
"node hasn't returned node tcp connectivity data")
|
1356 |
self.mcpu.assertLogContainsRegex(
|
1357 |
"node hasn't returned node master IP reachability data")
|
1358 |
|
1359 |
@withLockedLU
|
1360 |
def testValidResult(self, lu): |
1361 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1362 |
self.mcpu.assertLogIsEmpty()
|
1363 |
|
1364 |
@withLockedLU
|
1365 |
def testSshProblem(self, lu): |
1366 |
self.VALID_NRESULT.update({
|
1367 |
constants.NV_NODELIST: { |
1368 |
"mock_node": "mock_error" |
1369 |
} |
1370 |
}) |
1371 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1372 |
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'") |
1373 |
|
1374 |
@withLockedLU
|
1375 |
def testTcpProblem(self, lu): |
1376 |
self.VALID_NRESULT.update({
|
1377 |
constants.NV_NODENETTEST: { |
1378 |
"mock_node": "mock_error" |
1379 |
} |
1380 |
}) |
1381 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1382 |
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'") |
1383 |
|
1384 |
@withLockedLU
|
1385 |
def testMasterIpNotReachable(self, lu): |
1386 |
self.VALID_NRESULT.update({
|
1387 |
constants.NV_MASTERIP: False
|
1388 |
}) |
1389 |
node1 = self.cfg.AddNewNode()
|
1390 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1391 |
self.mcpu.assertLogContainsRegex(
|
1392 |
"the master node cannot reach the master IP")
|
1393 |
|
1394 |
self.mcpu.ClearLogMessages()
|
1395 |
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
|
1396 |
self.mcpu.assertLogContainsRegex("cannot reach the master IP") |
1397 |
|
1398 |
|
1399 |
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods): |
1400 |
def setUp(self): |
1401 |
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp() |
1402 |
|
1403 |
self.node1 = self.cfg.AddNewNode() |
1404 |
self.drbd_inst = self.cfg.AddNewInstance( |
1405 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1406 |
primary_node=self.master,
|
1407 |
secondary_node=self.node1)])
|
1408 |
self.running_inst = self.cfg.AddNewInstance( |
1409 |
admin_state=constants.ADMINST_UP, disks_active=True)
|
1410 |
self.diskless_inst = self.cfg.AddNewInstance(disks=[]) |
1411 |
|
1412 |
self.master_img = \
|
1413 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1414 |
self.master_img.volumes = ["/".join(disk.logical_id) |
1415 |
for inst in [self.running_inst, |
1416 |
self.diskless_inst]
|
1417 |
for disk in inst.disks] |
1418 |
self.master_img.volumes.extend(
|
1419 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children]) |
1420 |
self.master_img.instances = [self.running_inst.uuid] |
1421 |
self.node1_img = \
|
1422 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
|
1423 |
self.node1_img.volumes = \
|
1424 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children] |
1425 |
self.node_imgs = {
|
1426 |
self.master_uuid: self.master_img, |
1427 |
self.node1.uuid: self.node1_img |
1428 |
} |
1429 |
self.diskstatus = {
|
1430 |
self.master_uuid: [
|
1431 |
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
|
1432 |
for _ in self.running_inst.disks |
1433 |
] |
1434 |
} |
1435 |
|
1436 |
@withLockedLU
|
1437 |
def testDisklessInst(self, lu): |
1438 |
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {}) |
1439 |
self.mcpu.assertLogIsEmpty()
|
1440 |
|
1441 |
@withLockedLU
|
1442 |
def testOfflineNode(self, lu): |
1443 |
self.master_img.offline = True |
1444 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {}) |
1445 |
self.mcpu.assertLogIsEmpty()
|
1446 |
|
1447 |
@withLockedLU
|
1448 |
def testRunningOnOfflineNode(self, lu): |
1449 |
self.master_img.offline = True |
1450 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1451 |
self.mcpu.assertLogContainsRegex(
|
1452 |
"instance is marked as running and lives on offline node")
|
1453 |
|
1454 |
@withLockedLU
|
1455 |
def testMissingVolume(self, lu): |
1456 |
self.master_img.volumes = []
|
1457 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1458 |
self.mcpu.assertLogContainsRegex("volume .* missing") |
1459 |
|
1460 |
@withLockedLU
|
1461 |
def testRunningInstanceOnWrongNode(self, lu): |
1462 |
self.master_img.instances = []
|
1463 |
self.diskless_inst.admin_state = constants.ADMINST_UP
|
1464 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1465 |
self.mcpu.assertLogContainsRegex("instance not running on its primary node") |
1466 |
|
1467 |
@withLockedLU
|
1468 |
def testRunningInstanceOnRightNode(self, lu): |
1469 |
self.master_img.instances = [self.running_inst.uuid] |
1470 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1471 |
self.mcpu.assertLogIsEmpty()
|
1472 |
|
1473 |
@withLockedLU
|
1474 |
def testValidDiskStatus(self, lu): |
1475 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1476 |
self.mcpu.assertLogIsEmpty()
|
1477 |
|
1478 |
@withLockedLU
|
1479 |
def testDegradedDiskStatus(self, lu): |
1480 |
self.diskstatus[self.master_uuid][0][1].is_degraded = True |
1481 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1482 |
self.mcpu.assertLogContainsRegex("instance .* is degraded") |
1483 |
|
1484 |
@withLockedLU
|
1485 |
def testNotOkayDiskStatus(self, lu): |
1486 |
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY |
1487 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1488 |
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'") |
1489 |
|
1490 |
@withLockedLU
|
1491 |
def testExclusiveStorageWithInvalidInstance(self, lu): |
1492 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1493 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1494 |
self.mcpu.assertLogContainsRegex(
|
1495 |
"instance has template drbd, which is not supported")
|
1496 |
|
1497 |
@withLockedLU
|
1498 |
def testExclusiveStorageWithValidInstance(self, lu): |
1499 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1500 |
self.running_inst.disks[0].spindles = 1 |
1501 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1502 |
self.mcpu.assertLogIsEmpty()
|
1503 |
|
1504 |
@withLockedLU
|
1505 |
def testDrbdInTwoGroups(self, lu): |
1506 |
group = self.cfg.AddNewNodeGroup()
|
1507 |
self.node1.group = group.uuid
|
1508 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1509 |
self.mcpu.assertLogContainsRegex(
|
1510 |
"instance has primary and secondary nodes in different groups")
|
1511 |
|
1512 |
@withLockedLU
|
1513 |
def testOfflineSecondary(self, lu): |
1514 |
self.node1_img.offline = True |
1515 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1516 |
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)") |
1517 |
|
1518 |
|
1519 |
class TestLUClusterVerifyGroupVerifyOrphanVolumes( |
1520 |
TestLUClusterVerifyGroupMethods): |
1521 |
@withLockedLU
|
1522 |
def testOrphanedVolume(self, lu): |
1523 |
master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1524 |
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"] |
1525 |
node_imgs = { |
1526 |
self.master_uuid: master_img
|
1527 |
} |
1528 |
node_vol_should = { |
1529 |
self.master_uuid: ["mock_vg/disk_0"] |
1530 |
} |
1531 |
|
1532 |
lu._VerifyOrphanVolumes(node_vol_should, node_imgs, |
1533 |
utils.FieldSet("mock_vg/disk_2"))
|
1534 |
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown") |
1535 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown") |
1536 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown") |
1537 |
|
1538 |
|
1539 |
class TestLUClusterVerifyGroupVerifyNPlusOneMemory( |
1540 |
TestLUClusterVerifyGroupMethods): |
1541 |
@withLockedLU
|
1542 |
def testN1Failure(self, lu): |
1543 |
group1 = self.cfg.AddNewNodeGroup()
|
1544 |
|
1545 |
node1 = self.cfg.AddNewNode()
|
1546 |
node2 = self.cfg.AddNewNode(group=group1)
|
1547 |
node3 = self.cfg.AddNewNode()
|
1548 |
|
1549 |
inst1 = self.cfg.AddNewInstance()
|
1550 |
inst2 = self.cfg.AddNewInstance()
|
1551 |
inst3 = self.cfg.AddNewInstance()
|
1552 |
|
1553 |
node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid) |
1554 |
node1_img.sbp = { |
1555 |
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
|
1556 |
} |
1557 |
|
1558 |
node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1559 |
|
1560 |
node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid) |
1561 |
node3_img.offline = True
|
1562 |
|
1563 |
node_imgs = { |
1564 |
node1.uuid: node1_img, |
1565 |
node2.uuid: node2_img, |
1566 |
node3.uuid: node3_img |
1567 |
} |
1568 |
|
1569 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1570 |
self.mcpu.assertLogContainsRegex(
|
1571 |
"not enough memory to accomodate instance failovers")
|
1572 |
|
1573 |
self.mcpu.ClearLogMessages()
|
1574 |
node1_img.mfree = 1000
|
1575 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1576 |
self.mcpu.assertLogIsEmpty()
|
1577 |
|
1578 |
|
1579 |
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods): |
1580 |
@withLockedLU
|
1581 |
def test(self, lu): |
1582 |
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1583 |
vm_capable=True)
|
1584 |
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False) |
1585 |
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1586 |
vm_capable=True)
|
1587 |
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1588 |
vm_capable=True)
|
1589 |
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True) |
1590 |
|
1591 |
nodeinfo = [self.master, node1, node2, node3, node4, node5]
|
1592 |
files_all = set([
|
1593 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
1594 |
pathutils.RAPI_CERT_FILE, |
1595 |
pathutils.RAPI_USERS_FILE, |
1596 |
]) |
1597 |
files_opt = set([
|
1598 |
pathutils.RAPI_USERS_FILE, |
1599 |
hv_xen.XL_CONFIG_FILE, |
1600 |
pathutils.VNC_PASSWORD_FILE, |
1601 |
]) |
1602 |
files_mc = set([
|
1603 |
pathutils.CLUSTER_CONF_FILE, |
1604 |
]) |
1605 |
files_vm = set([
|
1606 |
hv_xen.XEND_CONFIG_FILE, |
1607 |
hv_xen.XL_CONFIG_FILE, |
1608 |
pathutils.VNC_PASSWORD_FILE, |
1609 |
]) |
1610 |
nvinfo = RpcResultsBuilder() \ |
1611 |
.AddSuccessfulNode(self.master, {
|
1612 |
constants.NV_FILELIST: { |
1613 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
1614 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
1615 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1616 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1617 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1618 |
}}) \ |
1619 |
.AddSuccessfulNode(node1, { |
1620 |
constants.NV_FILELIST: { |
1621 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1622 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1623 |
} |
1624 |
}) \ |
1625 |
.AddSuccessfulNode(node2, { |
1626 |
constants.NV_FILELIST: { |
1627 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1628 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1629 |
} |
1630 |
}) \ |
1631 |
.AddSuccessfulNode(node3, { |
1632 |
constants.NV_FILELIST: { |
1633 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1634 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
1635 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1636 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
1637 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1638 |
} |
1639 |
}) \ |
1640 |
.AddSuccessfulNode(node4, {}) \ |
1641 |
.AddOfflineNode(node5) \ |
1642 |
.Build() |
1643 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
1644 |
|
1645 |
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
|
1646 |
(files_all, files_opt, files_mc, files_vm)) |
1647 |
|
1648 |
expected_msgs = [ |
1649 |
"File %s found with 2 different checksums (variant 1 on"
|
1650 |
" %s, %s, %s; variant 2 on %s)" %
|
1651 |
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name, |
1652 |
self.master.name),
|
1653 |
"File %s is missing from node(s) %s" %
|
1654 |
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name), |
1655 |
"File %s should not exist on node(s) %s" %
|
1656 |
(pathutils.CLUSTER_CONF_FILE, node3.name), |
1657 |
"File %s is missing from node(s) %s" %
|
1658 |
(hv_xen.XEND_CONFIG_FILE, node3.name), |
1659 |
"File %s is missing from node(s) %s" %
|
1660 |
(pathutils.CLUSTER_CONF_FILE, node2.name), |
1661 |
"File %s found with 2 different checksums (variant 1 on"
|
1662 |
" %s; variant 2 on %s)" %
|
1663 |
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
|
1664 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1665 |
" found on %s, %s, %s)" %
|
1666 |
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
|
1667 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1668 |
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
|
1669 |
"Node did not return file checksum data",
|
1670 |
] |
1671 |
|
1672 |
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs)) |
1673 |
for expected_msg in expected_msgs: |
1674 |
self.mcpu.assertLogContainsInLine(expected_msg)
|
1675 |
|
1676 |
|
1677 |
class TestLUClusterVerifyGroupVerifyNodeDrbd(TestLUClusterVerifyGroupMethods): |
1678 |
def setUp(self): |
1679 |
super(TestLUClusterVerifyGroupVerifyNodeDrbd, self).setUp() |
1680 |
|
1681 |
self.node1 = self.cfg.AddNewNode() |
1682 |
self.node2 = self.cfg.AddNewNode() |
1683 |
self.inst = self.cfg.AddNewInstance( |
1684 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1685 |
primary_node=self.node1,
|
1686 |
secondary_node=self.node2)],
|
1687 |
admin_state=constants.ADMINST_UP) |
1688 |
|
1689 |
@withLockedLU
|
1690 |
def testNoDrbdHelper(self, lu): |
1691 |
lu._VerifyNodeDrbd(self.master, {}, self.cfg.GetAllInstancesInfo(), None, |
1692 |
self.cfg.ComputeDRBDMap())
|
1693 |
self.mcpu.assertLogIsEmpty()
|
1694 |
|
1695 |
@withLockedLU
|
1696 |
def testDrbdHelperInvalidNodeResult(self, lu): |
1697 |
for ndata, expected in [({}, "no drbd usermode helper returned"), |
1698 |
({constants.NV_DRBDHELPER: (False, "")}, |
1699 |
"drbd usermode helper check unsuccessful"),
|
1700 |
({constants.NV_DRBDHELPER: (True, "/bin/false")}, |
1701 |
"wrong drbd usermode helper")]:
|
1702 |
self.mcpu.ClearLogMessages()
|
1703 |
lu._VerifyNodeDrbd(self.master, ndata, self.cfg.GetAllInstancesInfo(), |
1704 |
"/bin/true", self.cfg.ComputeDRBDMap()) |
1705 |
self.mcpu.assertLogContainsRegex(expected)
|
1706 |
|
1707 |
@withLockedLU
|
1708 |
def testNoNodeResult(self, lu): |
1709 |
lu._VerifyNodeDrbd(self.node1, {}, self.cfg.GetAllInstancesInfo(), |
1710 |
None, self.cfg.ComputeDRBDMap()) |
1711 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1712 |
|
1713 |
@withLockedLU
|
1714 |
def testInvalidNodeResult(self, lu): |
1715 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: ""}, |
1716 |
self.cfg.GetAllInstancesInfo(), None, |
1717 |
self.cfg.ComputeDRBDMap())
|
1718 |
self.mcpu.assertLogContainsRegex("cannot parse drbd status file") |
1719 |
|
1720 |
@withLockedLU
|
1721 |
def testWrongMinorInUse(self, lu): |
1722 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [2]}, |
1723 |
self.cfg.GetAllInstancesInfo(), None, |
1724 |
self.cfg.ComputeDRBDMap())
|
1725 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1726 |
self.mcpu.assertLogContainsRegex("unallocated drbd minor 2 is in use") |
1727 |
|
1728 |
@withLockedLU
|
1729 |
def testValidResult(self, lu): |
1730 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [1]}, |
1731 |
self.cfg.GetAllInstancesInfo(), None, |
1732 |
self.cfg.ComputeDRBDMap())
|
1733 |
self.mcpu.assertLogIsEmpty()
|
1734 |
|
1735 |
|
1736 |
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods): |
1737 |
@withLockedLU
|
1738 |
def testUpdateNodeOsInvalidNodeResult(self, lu): |
1739 |
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]}, |
1740 |
{constants.NV_OSLIST: [["1", "2"]]}]: |
1741 |
self.mcpu.ClearLogMessages()
|
1742 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1743 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1744 |
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data") |
1745 |
|
1746 |
@withLockedLU
|
1747 |
def testUpdateNodeOsValidNodeResult(self, lu): |
1748 |
ndata = { |
1749 |
constants.NV_OSLIST: [ |
1750 |
["mock_OS", "/mocked/path", True, "", ["default"], [], |
1751 |
[constants.OS_API_V20]], |
1752 |
["Another_Mock", "/random", True, "", ["var1", "var2"], |
1753 |
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS] |
1754 |
] |
1755 |
} |
1756 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1757 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1758 |
self.mcpu.assertLogIsEmpty()
|
1759 |
|
1760 |
@withLockedLU
|
1761 |
def testVerifyNodeOs(self, lu): |
1762 |
node = self.cfg.AddNewNode()
|
1763 |
nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1764 |
nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid) |
1765 |
|
1766 |
nimg_root.os_fail = False
|
1767 |
nimg_root.oslist = { |
1768 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1769 |
set([constants.OS_API_V20]))],
|
1770 |
"broken_base_os": [("/broken", False, "", set(), set(), |
1771 |
set([constants.OS_API_V20]))],
|
1772 |
"only_on_root": [("/random", True, "", set(), set(), set())], |
1773 |
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]), |
1774 |
set([("param1", "val1"), ("param2", "val2")]), |
1775 |
set([constants.OS_API_V20]))]
|
1776 |
} |
1777 |
nimg.os_fail = False
|
1778 |
nimg.oslist = { |
1779 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1780 |
set([constants.OS_API_V20]))],
|
1781 |
"only_on_test": [("/random", True, "", set(), set(), set())], |
1782 |
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]), |
1783 |
set([("param1", "val1"), ("param3", "val3")]), |
1784 |
set([constants.OS_API_V15]))],
|
1785 |
"broken_os": [("/broken", False, "", set(), set(), |
1786 |
set([constants.OS_API_V20]))],
|
1787 |
"multi_entries": [
|
1788 |
("/multi1", True, "", set(), set(), set([constants.OS_API_V20])), |
1789 |
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]))] |
1790 |
} |
1791 |
|
1792 |
lu._VerifyNodeOS(node, nimg, nimg_root) |
1793 |
|
1794 |
expected_msgs = [ |
1795 |
"Extra OS only_on_test not present on reference node",
|
1796 |
"OSes present on reference node .* but missing on this node:" +
|
1797 |
" only_on_root",
|
1798 |
"OS API version for diffing_os differs",
|
1799 |
"OS variants list for diffing_os differs",
|
1800 |
"OS parameters for diffing_os differs",
|
1801 |
"Invalid OS broken_os",
|
1802 |
"Extra OS broken_os not present on reference node",
|
1803 |
"OS 'multi_entries' has multiple entries",
|
1804 |
"Extra OS multi_entries not present on reference node"
|
1805 |
] |
1806 |
|
1807 |
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages())) |
1808 |
for expected_msg in expected_msgs: |
1809 |
self.mcpu.assertLogContainsRegex(expected_msg)
|
1810 |
|
1811 |
|
1812 |
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths( |
1813 |
TestLUClusterVerifyGroupMethods): |
1814 |
@withLockedLU
|
1815 |
def testNotMaster(self, lu): |
1816 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False) |
1817 |
self.mcpu.assertLogIsEmpty()
|
1818 |
|
1819 |
@withLockedLU
|
1820 |
def testNotMasterButRetunedValue(self, lu): |
1821 |
lu._VerifyAcceptedFileStoragePaths( |
1822 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False) |
1823 |
self.mcpu.assertLogContainsRegex(
|
1824 |
"Node should not have returned forbidden file storage paths")
|
1825 |
|
1826 |
@withLockedLU
|
1827 |
def testMasterInvalidNodeResult(self, lu): |
1828 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True) |
1829 |
self.mcpu.assertLogContainsRegex(
|
1830 |
"Node did not return forbidden file storage paths")
|
1831 |
|
1832 |
@withLockedLU
|
1833 |
def testMasterForbiddenPaths(self, lu): |
1834 |
lu._VerifyAcceptedFileStoragePaths( |
1835 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True) |
1836 |
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths") |
1837 |
|
1838 |
@withLockedLU
|
1839 |
def testMasterSuccess(self, lu): |
1840 |
lu._VerifyAcceptedFileStoragePaths( |
1841 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True) |
1842 |
self.mcpu.assertLogIsEmpty()
|
1843 |
|
1844 |
|
1845 |
class TestLUClusterVerifyGroupVerifyStoragePaths( |
1846 |
TestLUClusterVerifyGroupMethods): |
1847 |
@withLockedLU
|
1848 |
def testVerifyFileStoragePathsSuccess(self, lu): |
1849 |
lu._VerifyFileStoragePaths(self.master, {})
|
1850 |
self.mcpu.assertLogIsEmpty()
|
1851 |
|
1852 |
@withLockedLU
|
1853 |
def testVerifyFileStoragePathsFailure(self, lu): |
1854 |
lu._VerifyFileStoragePaths(self.master,
|
1855 |
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
|
1856 |
self.mcpu.assertLogContainsRegex(
|
1857 |
"The configured file storage path is unusable")
|
1858 |
|
1859 |
@withLockedLU
|
1860 |
def testVerifySharedFileStoragePathsSuccess(self, lu): |
1861 |
lu._VerifySharedFileStoragePaths(self.master, {})
|
1862 |
self.mcpu.assertLogIsEmpty()
|
1863 |
|
1864 |
@withLockedLU
|
1865 |
def testVerifySharedFileStoragePathsFailure(self, lu): |
1866 |
lu._VerifySharedFileStoragePaths( |
1867 |
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"}) |
1868 |
self.mcpu.assertLogContainsRegex(
|
1869 |
"The configured sharedfile storage path is unusable")
|
1870 |
|
1871 |
|
1872 |
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods): |
1873 |
@withLockedLU
|
1874 |
def testEmptyResult(self, lu): |
1875 |
lu._VerifyOob(self.master, {})
|
1876 |
self.mcpu.assertLogIsEmpty()
|
1877 |
|
1878 |
@withLockedLU
|
1879 |
def testErrorResults(self, lu): |
1880 |
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]}) |
1881 |
self.mcpu.assertLogContainsRegex("path1") |
1882 |
self.mcpu.assertLogContainsRegex("path2") |
1883 |
|
1884 |
|
1885 |
class TestLUClusterVerifyGroupUpdateNodeVolumes( |
1886 |
TestLUClusterVerifyGroupMethods): |
1887 |
def setUp(self): |
1888 |
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp() |
1889 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1890 |
|
1891 |
@withLockedLU
|
1892 |
def testNoVgName(self, lu): |
1893 |
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None) |
1894 |
self.mcpu.assertLogIsEmpty()
|
1895 |
self.assertTrue(self.nimg.lvm_fail) |
1896 |
|
1897 |
@withLockedLU
|
1898 |
def testErrorMessage(self, lu): |
1899 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"}, |
1900 |
self.nimg, "mock_vg") |
1901 |
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error") |
1902 |
self.assertTrue(self.nimg.lvm_fail) |
1903 |
|
1904 |
@withLockedLU
|
1905 |
def testInvalidNodeResult(self, lu): |
1906 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]}, |
1907 |
self.nimg, "mock_vg") |
1908 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1909 |
self.assertTrue(self.nimg.lvm_fail) |
1910 |
|
1911 |
@withLockedLU
|
1912 |
def testValidNodeResult(self, lu): |
1913 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
|
1914 |
self.nimg, "mock_vg") |
1915 |
self.mcpu.assertLogIsEmpty()
|
1916 |
self.assertFalse(self.nimg.lvm_fail) |
1917 |
|
1918 |
|
1919 |
class TestLUClusterVerifyGroupUpdateNodeInstances( |
1920 |
TestLUClusterVerifyGroupMethods): |
1921 |
def setUp(self): |
1922 |
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp() |
1923 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1924 |
|
1925 |
@withLockedLU
|
1926 |
def testInvalidNodeResult(self, lu): |
1927 |
lu._UpdateNodeInstances(self.master, {}, self.nimg) |
1928 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1929 |
|
1930 |
@withLockedLU
|
1931 |
def testValidNodeResult(self, lu): |
1932 |
inst = self.cfg.AddNewInstance()
|
1933 |
lu._UpdateNodeInstances(self.master,
|
1934 |
{constants.NV_INSTANCELIST: [inst.name]}, |
1935 |
self.nimg)
|
1936 |
self.mcpu.assertLogIsEmpty()
|
1937 |
|
1938 |
|
1939 |
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods): |
1940 |
def setUp(self): |
1941 |
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp() |
1942 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1943 |
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}} |
1944 |
|
1945 |
@withLockedLU
|
1946 |
def testInvalidHvNodeResult(self, lu): |
1947 |
for ndata in [{}, {constants.NV_HVINFO: ""}]: |
1948 |
self.mcpu.ClearLogMessages()
|
1949 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None) |
1950 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1951 |
|
1952 |
@withLockedLU
|
1953 |
def testInvalidMemoryFreeHvNodeResult(self, lu): |
1954 |
lu._UpdateNodeInfo(self.master,
|
1955 |
{constants.NV_HVINFO: {"memory_free": "abc"}}, |
1956 |
self.nimg, None) |
1957 |
self.mcpu.assertLogContainsRegex(
|
1958 |
"node returned invalid nodeinfo, check hypervisor")
|
1959 |
|
1960 |
@withLockedLU
|
1961 |
def testValidHvNodeResult(self, lu): |
1962 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None) |
1963 |
self.mcpu.assertLogIsEmpty()
|
1964 |
|
1965 |
@withLockedLU
|
1966 |
def testInvalidVgNodeResult(self, lu): |
1967 |
for vgdata in [[], ""]: |
1968 |
self.mcpu.ClearLogMessages()
|
1969 |
ndata = {constants.NV_VGLIST: vgdata} |
1970 |
ndata.update(self.valid_hvresult)
|
1971 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg") |
1972 |
self.mcpu.assertLogContainsRegex(
|
1973 |
"node didn't return data for the volume group 'mock_vg'")
|
1974 |
|
1975 |
@withLockedLU
|
1976 |
def testInvalidDiskFreeVgNodeResult(self, lu): |
1977 |
self.valid_hvresult.update({
|
1978 |
constants.NV_VGLIST: {"mock_vg": "abc"} |
1979 |
}) |
1980 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1981 |
self.mcpu.assertLogContainsRegex(
|
1982 |
"node returned invalid LVM info, check LVM status")
|
1983 |
|
1984 |
@withLockedLU
|
1985 |
def testValidVgNodeResult(self, lu): |
1986 |
self.valid_hvresult.update({
|
1987 |
constants.NV_VGLIST: {"mock_vg": 10000} |
1988 |
}) |
1989 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1990 |
self.mcpu.assertLogIsEmpty()
|
1991 |
|
1992 |
|
1993 |
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods): |
1994 |
def setUp(self): |
1995 |
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp() |
1996 |
|
1997 |
self.node1 = self.cfg.AddNewNode() |
1998 |
self.node2 = self.cfg.AddNewNode() |
1999 |
self.node3 = self.cfg.AddNewNode() |
2000 |
|
2001 |
self.diskless_inst = \
|
2002 |
self.cfg.AddNewInstance(primary_node=self.node1, |
2003 |
disk_template=constants.DT_DISKLESS) |
2004 |
self.plain_inst = \
|
2005 |
self.cfg.AddNewInstance(primary_node=self.node2, |
2006 |
disk_template=constants.DT_PLAIN) |
2007 |
self.drbd_inst = \
|
2008 |
self.cfg.AddNewInstance(primary_node=self.node3, |
2009 |
secondary_node=self.node2,
|
2010 |
disk_template=constants.DT_DRBD8) |
2011 |
|
2012 |
self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2013 |
uuid=self.node1.uuid)
|
2014 |
self.node1_img.pinst = [self.diskless_inst.uuid] |
2015 |
self.node1_img.sinst = []
|
2016 |
self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2017 |
uuid=self.node2.uuid)
|
2018 |
self.node2_img.pinst = [self.plain_inst.uuid] |
2019 |
self.node2_img.sinst = [self.drbd_inst.uuid] |
2020 |
self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2021 |
uuid=self.node3.uuid)
|
2022 |
self.node3_img.pinst = [self.drbd_inst.uuid] |
2023 |
self.node3_img.sinst = []
|
2024 |
|
2025 |
self.node_images = {
|
2026 |
self.node1.uuid: self.node1_img, |
2027 |
self.node2.uuid: self.node2_img, |
2028 |
self.node3.uuid: self.node3_img |
2029 |
} |
2030 |
|
2031 |
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid] |
2032 |
|
2033 |
@withLockedLU
|
2034 |
def testSuccessfulRun(self, lu): |
2035 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2036 |
RpcResultsBuilder() \ |
2037 |
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \ |
2038 |
.AddSuccessfulNode(self.node3, [(True, "")]) \ |
2039 |
.Build() |
2040 |
|
2041 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2042 |
self.cfg.GetAllInstancesInfo())
|
2043 |
|
2044 |
self.mcpu.assertLogIsEmpty()
|
2045 |
|
2046 |
@withLockedLU
|
2047 |
def testOfflineAndFailingNodes(self, lu): |
2048 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2049 |
RpcResultsBuilder() \ |
2050 |
.AddOfflineNode(self.node2) \
|
2051 |
.AddFailedNode(self.node3) \
|
2052 |
.Build() |
2053 |
|
2054 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2055 |
self.cfg.GetAllInstancesInfo())
|
2056 |
|
2057 |
self.mcpu.assertLogContainsRegex("while getting disk information") |
2058 |
|
2059 |
@withLockedLU
|
2060 |
def testInvalidNodeResult(self, lu): |
2061 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2062 |
RpcResultsBuilder() \ |
2063 |
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \ |
2064 |
.AddSuccessfulNode(self.node3, [""]) \ |
2065 |
.Build() |
2066 |
|
2067 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2068 |
self.cfg.GetAllInstancesInfo())
|
2069 |
# logging is not performed through mcpu
|
2070 |
self.mcpu.assertLogIsEmpty()
|
2071 |
|
2072 |
|
2073 |
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods): |
2074 |
def setUp(self): |
2075 |
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp() |
2076 |
|
2077 |
self.feedback_fn = lambda _: None |
2078 |
|
2079 |
def PrepareLU(self, lu): |
2080 |
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu) |
2081 |
|
2082 |
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys()) |
2083 |
|
2084 |
@withLockedLU
|
2085 |
def testEmptyGroup(self, lu): |
2086 |
lu.my_node_uuids = [] |
2087 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None) |
2088 |
|
2089 |
@withLockedLU
|
2090 |
def testFailedResult(self, lu): |
2091 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2092 |
RpcResultsBuilder(use_node_names=True)
|
2093 |
.AddFailedNode(self.master).Build(),
|
2094 |
self.feedback_fn,
|
2095 |
None)
|
2096 |
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution") |
2097 |
|
2098 |
@withLockedLU
|
2099 |
def testOfflineNode(self, lu): |
2100 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2101 |
RpcResultsBuilder(use_node_names=True)
|
2102 |
.AddOfflineNode(self.master).Build(),
|
2103 |
self.feedback_fn,
|
2104 |
None)
|
2105 |
|
2106 |
@withLockedLU
|
2107 |
def testValidResult(self, lu): |
2108 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2109 |
RpcResultsBuilder(use_node_names=True)
|
2110 |
.AddSuccessfulNode(self.master,
|
2111 |
[("mock_script",
|
2112 |
constants.HKR_SUCCESS, |
2113 |
"mock output")])
|
2114 |
.Build(), |
2115 |
self.feedback_fn,
|
2116 |
None)
|
2117 |
|
2118 |
@withLockedLU
|
2119 |
def testFailedScriptResult(self, lu): |
2120 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2121 |
RpcResultsBuilder(use_node_names=True)
|
2122 |
.AddSuccessfulNode(self.master,
|
2123 |
[("mock_script",
|
2124 |
constants.HKR_FAIL, |
2125 |
"mock output")])
|
2126 |
.Build(), |
2127 |
self.feedback_fn,
|
2128 |
None)
|
2129 |
self.mcpu.assertLogContainsRegex("Script mock_script failed") |
2130 |
|
2131 |
|
2132 |
class TestLUClusterVerifyDisks(CmdlibTestCase): |
2133 |
def testVerifyDisks(self): |
2134 |
op = opcodes.OpClusterVerifyDisks() |
2135 |
result = self.ExecOpCode(op)
|
2136 |
|
2137 |
self.assertEqual(1, len(result["jobs"])) |
2138 |
|
2139 |
|
2140 |
if __name__ == "__main__": |
2141 |
testutils.GanetiTestProgram() |