root / test / py / cmdlib / cluster_unittest.py @ b3cc1646
History | View | Annotate | Download (76 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import unittest |
29 |
import operator |
30 |
import os |
31 |
import tempfile |
32 |
import shutil |
33 |
|
34 |
from collections import defaultdict |
35 |
|
36 |
from ganeti.cmdlib import cluster |
37 |
from ganeti import constants |
38 |
from ganeti import errors |
39 |
from ganeti import netutils |
40 |
from ganeti import objects |
41 |
from ganeti import opcodes |
42 |
from ganeti import utils |
43 |
from ganeti import pathutils |
44 |
from ganeti import query |
45 |
from ganeti.hypervisor import hv_xen |
46 |
|
47 |
from testsupport import * |
48 |
|
49 |
import testutils |
50 |
|
51 |
|
52 |
class TestCertVerification(testutils.GanetiTestCase): |
53 |
def setUp(self): |
54 |
testutils.GanetiTestCase.setUp(self)
|
55 |
|
56 |
self.tmpdir = tempfile.mkdtemp()
|
57 |
|
58 |
def tearDown(self): |
59 |
shutil.rmtree(self.tmpdir)
|
60 |
|
61 |
def testVerifyCertificate(self): |
62 |
cluster._VerifyCertificate(testutils.TestDataFilename("cert1.pem"))
|
63 |
|
64 |
nonexist_filename = os.path.join(self.tmpdir, "does-not-exist") |
65 |
|
66 |
(errcode, msg) = cluster._VerifyCertificate(nonexist_filename) |
67 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
68 |
|
69 |
# Try to load non-certificate file
|
70 |
invalid_cert = testutils.TestDataFilename("bdev-net.txt")
|
71 |
(errcode, msg) = cluster._VerifyCertificate(invalid_cert) |
72 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
73 |
|
74 |
|
75 |
class TestClusterVerifySsh(unittest.TestCase): |
76 |
def testMultipleGroups(self): |
77 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
78 |
mygroupnodes = [ |
79 |
objects.Node(name="node20", group="my", offline=False), |
80 |
objects.Node(name="node21", group="my", offline=False), |
81 |
objects.Node(name="node22", group="my", offline=False), |
82 |
objects.Node(name="node23", group="my", offline=False), |
83 |
objects.Node(name="node24", group="my", offline=False), |
84 |
objects.Node(name="node25", group="my", offline=False), |
85 |
objects.Node(name="node26", group="my", offline=True), |
86 |
] |
87 |
nodes = [ |
88 |
objects.Node(name="node1", group="g1", offline=True), |
89 |
objects.Node(name="node2", group="g1", offline=False), |
90 |
objects.Node(name="node3", group="g1", offline=False), |
91 |
objects.Node(name="node4", group="g1", offline=True), |
92 |
objects.Node(name="node5", group="g1", offline=False), |
93 |
objects.Node(name="node10", group="xyz", offline=False), |
94 |
objects.Node(name="node11", group="xyz", offline=False), |
95 |
objects.Node(name="node40", group="alloff", offline=True), |
96 |
objects.Node(name="node41", group="alloff", offline=True), |
97 |
objects.Node(name="node50", group="aaa", offline=False), |
98 |
] + mygroupnodes |
99 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
100 |
|
101 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
102 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
103 |
self.assertEqual(set(perhost.keys()), set(online)) |
104 |
|
105 |
self.assertEqual(perhost, {
|
106 |
"node20": ["node10", "node2", "node50"], |
107 |
"node21": ["node11", "node3", "node50"], |
108 |
"node22": ["node10", "node5", "node50"], |
109 |
"node23": ["node11", "node2", "node50"], |
110 |
"node24": ["node10", "node3", "node50"], |
111 |
"node25": ["node11", "node5", "node50"], |
112 |
}) |
113 |
|
114 |
def testSingleGroup(self): |
115 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
116 |
nodes = [ |
117 |
objects.Node(name="node1", group="default", offline=True), |
118 |
objects.Node(name="node2", group="default", offline=False), |
119 |
objects.Node(name="node3", group="default", offline=False), |
120 |
objects.Node(name="node4", group="default", offline=True), |
121 |
] |
122 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
123 |
|
124 |
(online, perhost) = fn(nodes, "default", nodes)
|
125 |
self.assertEqual(online, ["node2", "node3"]) |
126 |
self.assertEqual(set(perhost.keys()), set(online)) |
127 |
|
128 |
self.assertEqual(perhost, {
|
129 |
"node2": [],
|
130 |
"node3": [],
|
131 |
}) |
132 |
|
133 |
|
134 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
135 |
def testSuccess(self): |
136 |
op = opcodes.OpClusterActivateMasterIp() |
137 |
|
138 |
self.rpc.call_node_activate_master_ip.return_value = \
|
139 |
self.RpcResultsBuilder() \
|
140 |
.CreateSuccessfulNodeResult(self.master)
|
141 |
|
142 |
self.ExecOpCode(op)
|
143 |
|
144 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
145 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
146 |
|
147 |
def testFailure(self): |
148 |
op = opcodes.OpClusterActivateMasterIp() |
149 |
|
150 |
self.rpc.call_node_activate_master_ip.return_value = \
|
151 |
self.RpcResultsBuilder() \
|
152 |
.CreateFailedNodeResult(self.master) \
|
153 |
|
154 |
self.ExecOpCodeExpectOpExecError(op)
|
155 |
|
156 |
|
157 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
158 |
def testSuccess(self): |
159 |
op = opcodes.OpClusterDeactivateMasterIp() |
160 |
|
161 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
162 |
self.RpcResultsBuilder() \
|
163 |
.CreateSuccessfulNodeResult(self.master)
|
164 |
|
165 |
self.ExecOpCode(op)
|
166 |
|
167 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
168 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
169 |
|
170 |
def testFailure(self): |
171 |
op = opcodes.OpClusterDeactivateMasterIp() |
172 |
|
173 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
174 |
self.RpcResultsBuilder() \
|
175 |
.CreateFailedNodeResult(self.master) \
|
176 |
|
177 |
self.ExecOpCodeExpectOpExecError(op)
|
178 |
|
179 |
|
180 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
181 |
def testInvalidField(self): |
182 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
183 |
|
184 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
185 |
|
186 |
def testAllFields(self): |
187 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
188 |
|
189 |
self.rpc.call_get_watcher_pause.return_value = \
|
190 |
self.RpcResultsBuilder() \
|
191 |
.CreateSuccessfulNodeResult(self.master, -1) |
192 |
|
193 |
ret = self.ExecOpCode(op)
|
194 |
|
195 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
196 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
197 |
|
198 |
def testEmpytFields(self): |
199 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
200 |
|
201 |
self.ExecOpCode(op)
|
202 |
|
203 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
204 |
|
205 |
|
206 |
class TestLUClusterDestroy(CmdlibTestCase): |
207 |
def testExistingNodes(self): |
208 |
op = opcodes.OpClusterDestroy() |
209 |
|
210 |
self.cfg.AddNewNode()
|
211 |
self.cfg.AddNewNode()
|
212 |
|
213 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
214 |
|
215 |
def testExistingInstances(self): |
216 |
op = opcodes.OpClusterDestroy() |
217 |
|
218 |
self.cfg.AddNewInstance()
|
219 |
self.cfg.AddNewInstance()
|
220 |
|
221 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
222 |
|
223 |
def testEmptyCluster(self): |
224 |
op = opcodes.OpClusterDestroy() |
225 |
|
226 |
self.ExecOpCode(op)
|
227 |
|
228 |
self.assertSingleHooksCall([self.master.name], |
229 |
"cluster-destroy",
|
230 |
constants.HOOKS_PHASE_POST) |
231 |
|
232 |
|
233 |
class TestLUClusterPostInit(CmdlibTestCase): |
234 |
|
235 |
@testutils.patch_object(cluster, "_UpdateMasterClientCert") |
236 |
def testExecution(self, update_client_cert_mock): |
237 |
# mock the client certificate creation as it is tested separately
|
238 |
update_client_cert_mock.return_value = None
|
239 |
# For the purpose of this test, return the same certificate digest for all
|
240 |
# nodes
|
241 |
self.rpc.call_node_crypto_tokens = \
|
242 |
lambda node_uuid, _: self.RpcResultsBuilder() \ |
243 |
.CreateSuccessfulNodeResult(node_uuid, |
244 |
[(constants.CRYPTO_TYPE_SSL_DIGEST, "IA:MA:FA:KE:DI:GE:ST")])
|
245 |
op = opcodes.OpClusterPostInit() |
246 |
|
247 |
self.ExecOpCode(op)
|
248 |
|
249 |
self.assertSingleHooksCall([self.master.name], |
250 |
"cluster-init",
|
251 |
constants.HOOKS_PHASE_POST) |
252 |
|
253 |
|
254 |
class TestLUClusterQuery(CmdlibTestCase): |
255 |
def testSimpleInvocation(self): |
256 |
op = opcodes.OpClusterQuery() |
257 |
|
258 |
self.ExecOpCode(op)
|
259 |
|
260 |
def testIPv6Cluster(self): |
261 |
op = opcodes.OpClusterQuery() |
262 |
|
263 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
264 |
|
265 |
self.ExecOpCode(op)
|
266 |
|
267 |
|
268 |
class TestLUClusterRedistConf(CmdlibTestCase): |
269 |
def testSimpleInvocation(self): |
270 |
op = opcodes.OpClusterRedistConf() |
271 |
|
272 |
self.ExecOpCode(op)
|
273 |
|
274 |
|
275 |
class TestLUClusterRename(CmdlibTestCase): |
276 |
NEW_NAME = "new-name.example.com"
|
277 |
NEW_IP = "203.0.113.100"
|
278 |
|
279 |
def testNoChanges(self): |
280 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
281 |
|
282 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
283 |
|
284 |
def testReachableIp(self): |
285 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
286 |
|
287 |
self.netutils_mod.GetHostname.return_value = \
|
288 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
289 |
self.netutils_mod.TcpPing.return_value = True |
290 |
|
291 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
292 |
|
293 |
def testValidRename(self): |
294 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
295 |
|
296 |
self.netutils_mod.GetHostname.return_value = \
|
297 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
298 |
|
299 |
self.ExecOpCode(op)
|
300 |
|
301 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
302 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
303 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
304 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
305 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
306 |
|
307 |
def testRenameOfflineMaster(self): |
308 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
309 |
|
310 |
self.master.offline = True |
311 |
self.netutils_mod.GetHostname.return_value = \
|
312 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
313 |
|
314 |
self.ExecOpCode(op)
|
315 |
|
316 |
|
317 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
318 |
def testNoInstances(self): |
319 |
op = opcodes.OpClusterRepairDiskSizes() |
320 |
|
321 |
self.ExecOpCode(op)
|
322 |
|
323 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN): |
324 |
pnode = self.master
|
325 |
snode = self.cfg.AddNewNode()
|
326 |
|
327 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
328 |
primary_node=pnode, |
329 |
secondary_node=snode) |
330 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
331 |
|
332 |
return (inst, disk)
|
333 |
|
334 |
def testSingleInstanceOnFailingNode(self): |
335 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
336 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
337 |
|
338 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
339 |
self.RpcResultsBuilder() \
|
340 |
.CreateFailedNodeResult(self.master)
|
341 |
|
342 |
self.ExecOpCode(op)
|
343 |
|
344 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
345 |
|
346 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
347 |
# not specifying instances repairs all
|
348 |
op = opcodes.OpClusterRepairDiskSizes() |
349 |
|
350 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
351 |
self.RpcResultsBuilder() \
|
352 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
353 |
|
354 |
return self.ExecOpCode(op) |
355 |
|
356 |
def testInvalidResultData(self): |
357 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
358 |
self.ResetMocks()
|
359 |
|
360 |
self._SetUpInstanceSingleDisk()
|
361 |
self._ExecOpClusterRepairDiskSizes(data)
|
362 |
|
363 |
self.mcpu.assertLogContainsRegex("ignoring") |
364 |
|
365 |
def testCorrectSize(self): |
366 |
self._SetUpInstanceSingleDisk()
|
367 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
368 |
self.mcpu.assertLogIsEmpty()
|
369 |
self.assertEqual(0, len(changed)) |
370 |
|
371 |
def testWrongSize(self): |
372 |
self._SetUpInstanceSingleDisk()
|
373 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
374 |
self.assertEqual(1, len(changed)) |
375 |
|
376 |
def testCorrectDRBD(self): |
377 |
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
378 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
379 |
self.mcpu.assertLogIsEmpty()
|
380 |
self.assertEqual(0, len(changed)) |
381 |
|
382 |
def testWrongDRBDChild(self): |
383 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
384 |
disk.children[0].size = 512 |
385 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
386 |
self.assertEqual(1, len(changed)) |
387 |
|
388 |
def testExclusiveStorageInvalidResultData(self): |
389 |
self._SetUpInstanceSingleDisk()
|
390 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
391 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
392 |
|
393 |
self.mcpu.assertLogContainsRegex(
|
394 |
"did not return valid spindles information")
|
395 |
|
396 |
def testExclusiveStorageCorrectSpindles(self): |
397 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
398 |
disk.spindles = 1
|
399 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
400 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
401 |
self.assertEqual(0, len(changed)) |
402 |
|
403 |
def testExclusiveStorageWrongSpindles(self): |
404 |
self._SetUpInstanceSingleDisk()
|
405 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
406 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
407 |
self.assertEqual(1, len(changed)) |
408 |
|
409 |
|
410 |
class TestLUClusterSetParams(CmdlibTestCase): |
411 |
UID_POOL = [(10, 1000)] |
412 |
|
413 |
def testUidPool(self): |
414 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
415 |
self.ExecOpCode(op)
|
416 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
417 |
|
418 |
def testAddUids(self): |
419 |
old_pool = [(1, 9)] |
420 |
self.cluster.uid_pool = list(old_pool) |
421 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
422 |
self.ExecOpCode(op)
|
423 |
self.assertEqual(set(self.UID_POOL + old_pool), |
424 |
set(self.cluster.uid_pool)) |
425 |
|
426 |
def testRemoveUids(self): |
427 |
additional_pool = [(1, 9)] |
428 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
429 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
430 |
self.ExecOpCode(op)
|
431 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
432 |
|
433 |
def testMasterNetmask(self): |
434 |
op = opcodes.OpClusterSetParams(master_netmask=26)
|
435 |
self.ExecOpCode(op)
|
436 |
self.assertEqual(26, self.cluster.master_netmask) |
437 |
|
438 |
def testInvalidDiskparams(self): |
439 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
440 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}},
|
441 |
{constants.DT_DRBD8: {constants.RBD_ACCESS: "bunny"}}]:
|
442 |
self.ResetMocks()
|
443 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
444 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
445 |
|
446 |
def testValidDiskparams(self): |
447 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
|
448 |
constants.RBD_ACCESS: "kernelspace"}}
|
449 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
450 |
self.ExecOpCode(op)
|
451 |
self.assertEqual(diskparams[constants.DT_RBD],
|
452 |
self.cluster.diskparams[constants.DT_RBD])
|
453 |
|
454 |
def testMinimalDiskparams(self): |
455 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
456 |
self.cluster.diskparams = {}
|
457 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
458 |
self.ExecOpCode(op)
|
459 |
self.assertEqual(diskparams, self.cluster.diskparams) |
460 |
|
461 |
def testValidDiskparamsAccess(self): |
462 |
for value in constants.DISK_VALID_ACCESS_MODES: |
463 |
self.ResetMocks()
|
464 |
op = opcodes.OpClusterSetParams(diskparams={ |
465 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
466 |
}) |
467 |
self.ExecOpCode(op)
|
468 |
got = self.cluster.diskparams[constants.DT_RBD][constants.RBD_ACCESS]
|
469 |
self.assertEqual(value, got)
|
470 |
|
471 |
def testInvalidDiskparamsAccess(self): |
472 |
for value in ["default", "pinky_bunny"]: |
473 |
self.ResetMocks()
|
474 |
op = opcodes.OpClusterSetParams(diskparams={ |
475 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
476 |
}) |
477 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid value of 'rbd:access'") |
478 |
|
479 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
480 |
self.cfg.AddNewInstance(disks=[
|
481 |
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)]) |
482 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
483 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
484 |
|
485 |
def testFileStorageDir(self): |
486 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
487 |
self.ExecOpCode(op)
|
488 |
|
489 |
def testSetFileStorageDirToCurrentValue(self): |
490 |
op = opcodes.OpClusterSetParams( |
491 |
file_storage_dir=self.cluster.file_storage_dir)
|
492 |
self.ExecOpCode(op)
|
493 |
|
494 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
495 |
|
496 |
def testUnsetFileStorageDirFileStorageEnabled(self): |
497 |
self.cfg.SetEnabledDiskTemplates([constants.DT_FILE])
|
498 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
499 |
self.ExecOpCodeExpectOpPrereqError(op, "Unsetting the 'file' storage") |
500 |
|
501 |
def testUnsetFileStorageDirFileStorageDisabled(self): |
502 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
503 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
504 |
self.ExecOpCode(op)
|
505 |
|
506 |
def testSetFileStorageDirFileStorageDisabled(self): |
507 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
508 |
op = opcodes.OpClusterSetParams(file_storage_dir='/some/path/')
|
509 |
self.ExecOpCode(op)
|
510 |
self.mcpu.assertLogContainsRegex("although file storage is not enabled") |
511 |
|
512 |
def testValidDrbdHelper(self): |
513 |
node1 = self.cfg.AddNewNode()
|
514 |
node1.offline = True
|
515 |
self.rpc.call_drbd_helper.return_value = \
|
516 |
self.RpcResultsBuilder() \
|
517 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
518 |
.AddOfflineNode(node1) \ |
519 |
.Build() |
520 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
521 |
self.ExecOpCode(op)
|
522 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
523 |
|
524 |
def testDrbdHelperFailingNode(self): |
525 |
self.rpc.call_drbd_helper.return_value = \
|
526 |
self.RpcResultsBuilder() \
|
527 |
.AddFailedNode(self.master) \
|
528 |
.Build() |
529 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
530 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
531 |
|
532 |
def testInvalidDrbdHelper(self): |
533 |
self.rpc.call_drbd_helper.return_value = \
|
534 |
self.RpcResultsBuilder() \
|
535 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
536 |
.Build() |
537 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
538 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
539 |
|
540 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
541 |
drbd_helper = "/bin/random_helper"
|
542 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
543 |
self.rpc.call_drbd_helper.return_value = \
|
544 |
self.RpcResultsBuilder() \
|
545 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
546 |
.Build() |
547 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
548 |
self.ExecOpCode(op)
|
549 |
|
550 |
self.mcpu.assertLogContainsRegex("but did not enable") |
551 |
|
552 |
def testResetDrbdHelperDrbdDisabled(self): |
553 |
drbd_helper = ""
|
554 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
555 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
556 |
self.ExecOpCode(op)
|
557 |
|
558 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
559 |
|
560 |
def testResetDrbdHelperDrbdEnabled(self): |
561 |
drbd_helper = ""
|
562 |
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
|
563 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
564 |
self.ExecOpCodeExpectOpPrereqError(
|
565 |
op, "Cannot disable drbd helper while DRBD is enabled.")
|
566 |
|
567 |
def testEnableDrbdNoHelper(self): |
568 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
569 |
self.cluster.drbd_usermode_helper = None |
570 |
enabled_disk_templates = [constants.DT_DRBD8] |
571 |
op = opcodes.OpClusterSetParams( |
572 |
enabled_disk_templates=enabled_disk_templates) |
573 |
self.ExecOpCodeExpectOpPrereqError(
|
574 |
op, "Cannot enable DRBD without a DRBD usermode helper set")
|
575 |
|
576 |
def testEnableDrbdHelperSet(self): |
577 |
drbd_helper = "/bin/random_helper"
|
578 |
self.rpc.call_drbd_helper.return_value = \
|
579 |
self.RpcResultsBuilder() \
|
580 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
581 |
.Build() |
582 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
583 |
self.cluster.drbd_usermode_helper = drbd_helper
|
584 |
enabled_disk_templates = [constants.DT_DRBD8] |
585 |
op = opcodes.OpClusterSetParams( |
586 |
enabled_disk_templates=enabled_disk_templates, |
587 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
588 |
self.ExecOpCode(op)
|
589 |
|
590 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
591 |
|
592 |
def testDrbdHelperAlreadySet(self): |
593 |
drbd_helper = "/bin/true"
|
594 |
self.rpc.call_drbd_helper.return_value = \
|
595 |
self.RpcResultsBuilder() \
|
596 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
597 |
.Build() |
598 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
599 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
600 |
self.ExecOpCode(op)
|
601 |
|
602 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
603 |
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state") |
604 |
|
605 |
def testSetDrbdHelper(self): |
606 |
drbd_helper = "/bin/true"
|
607 |
self.rpc.call_drbd_helper.return_value = \
|
608 |
self.RpcResultsBuilder() \
|
609 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
610 |
.Build() |
611 |
self.cluster.drbd_usermode_helper = "/bin/false" |
612 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
|
613 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
614 |
self.ExecOpCode(op)
|
615 |
|
616 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
617 |
|
618 |
def testBeparams(self): |
619 |
beparams = {constants.BE_VCPUS: 32}
|
620 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
621 |
self.ExecOpCode(op)
|
622 |
self.assertEqual(32, self.cluster |
623 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
624 |
|
625 |
def testNdparams(self): |
626 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
627 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
628 |
self.ExecOpCode(op)
|
629 |
self.assertEqual(True, self.cluster |
630 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
631 |
|
632 |
def testNdparamsResetOobProgram(self): |
633 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
634 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
635 |
self.ExecOpCode(op)
|
636 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
637 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
638 |
|
639 |
def testHvState(self): |
640 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
641 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
642 |
self.ExecOpCode(op)
|
643 |
self.assertEqual(8, self.cluster.hv_state_static |
644 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
645 |
|
646 |
def testDiskState(self): |
647 |
disk_state = { |
648 |
constants.DT_PLAIN: { |
649 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
650 |
} |
651 |
} |
652 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
653 |
self.ExecOpCode(op)
|
654 |
self.assertEqual(10, self.cluster |
655 |
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
|
656 |
[constants.DS_DISK_TOTAL]) |
657 |
|
658 |
def testDefaultIPolicy(self): |
659 |
ipolicy = constants.IPOLICY_DEFAULTS |
660 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
661 |
self.ExecOpCode(op)
|
662 |
|
663 |
def testIPolicyNewViolation(self): |
664 |
import ganeti.constants as C |
665 |
ipolicy = C.IPOLICY_DEFAULTS |
666 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
667 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
668 |
|
669 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
670 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
671 |
self.ExecOpCode(op)
|
672 |
|
673 |
self.mcpu.assertLogContainsRegex("instances violate them") |
674 |
|
675 |
def testNicparamsNoInstance(self): |
676 |
nicparams = { |
677 |
constants.NIC_LINK: "mock_bridge"
|
678 |
} |
679 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
680 |
self.ExecOpCode(op)
|
681 |
|
682 |
self.assertEqual("mock_bridge", |
683 |
self.cluster.nicparams
|
684 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
685 |
|
686 |
def testNicparamsInvalidConf(self): |
687 |
nicparams = { |
688 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
689 |
constants.NIC_LINK: ""
|
690 |
} |
691 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
692 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
693 |
|
694 |
def testNicparamsInvalidInstanceConf(self): |
695 |
nicparams = { |
696 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
697 |
constants.NIC_LINK: "mock_bridge"
|
698 |
} |
699 |
self.cfg.AddNewInstance(nics=[
|
700 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
701 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
702 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
703 |
|
704 |
def testNicparamsMissingIp(self): |
705 |
nicparams = { |
706 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
707 |
} |
708 |
self.cfg.AddNewInstance()
|
709 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
710 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
711 |
|
712 |
def testNicparamsWithInstance(self): |
713 |
nicparams = { |
714 |
constants.NIC_LINK: "mock_bridge"
|
715 |
} |
716 |
self.cfg.AddNewInstance()
|
717 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
718 |
self.ExecOpCode(op)
|
719 |
|
720 |
def testDefaultHvparams(self): |
721 |
hvparams = constants.HVC_DEFAULTS |
722 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
723 |
self.ExecOpCode(op)
|
724 |
|
725 |
self.assertEqual(hvparams, self.cluster.hvparams) |
726 |
|
727 |
def testMinimalHvparams(self): |
728 |
hvparams = { |
729 |
constants.HT_FAKE: { |
730 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
731 |
} |
732 |
} |
733 |
self.cluster.hvparams = {}
|
734 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
735 |
self.ExecOpCode(op)
|
736 |
|
737 |
self.assertEqual(hvparams, self.cluster.hvparams) |
738 |
|
739 |
def testOsHvp(self): |
740 |
os_hvp = { |
741 |
"mocked_os": {
|
742 |
constants.HT_FAKE: { |
743 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
744 |
} |
745 |
}, |
746 |
"other_os": constants.HVC_DEFAULTS
|
747 |
} |
748 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
749 |
self.ExecOpCode(op)
|
750 |
|
751 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
752 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
753 |
[constants.HV_MIGRATION_MODE]) |
754 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
755 |
|
756 |
def testRemoveOsHvp(self): |
757 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
758 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
759 |
self.ExecOpCode(op)
|
760 |
|
761 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
762 |
|
763 |
def testDefaultOsHvp(self): |
764 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
765 |
self.cluster.os_hvp = {"mocked_os": {}} |
766 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
767 |
self.ExecOpCode(op)
|
768 |
|
769 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
770 |
|
771 |
def testOsparams(self): |
772 |
osparams = { |
773 |
"mocked_os": {
|
774 |
"param1": "value1", |
775 |
"param2": None |
776 |
}, |
777 |
"other_os": {
|
778 |
"param1": None |
779 |
} |
780 |
} |
781 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
782 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
783 |
self.ExecOpCode(op)
|
784 |
|
785 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
786 |
|
787 |
def testEnabledHypervisors(self): |
788 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
789 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
790 |
self.ExecOpCode(op)
|
791 |
|
792 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
793 |
|
794 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
795 |
enabled_hypervisors = [constants.HT_FAKE] |
796 |
self.cluster.hvparams = {}
|
797 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
798 |
self.ExecOpCode(op)
|
799 |
|
800 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
801 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
802 |
self.cluster.hvparams[constants.HT_FAKE])
|
803 |
|
804 |
@testutils.patch_object(utils, "FindFile") |
805 |
def testValidDefaultIallocator(self, find_file_mock): |
806 |
find_file_mock.return_value = "/random/path"
|
807 |
default_iallocator = "/random/path"
|
808 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
809 |
self.ExecOpCode(op)
|
810 |
|
811 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
812 |
|
813 |
@testutils.patch_object(utils, "FindFile") |
814 |
def testInvalidDefaultIallocator(self, find_file_mock): |
815 |
find_file_mock.return_value = None
|
816 |
default_iallocator = "/random/path"
|
817 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
818 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
819 |
|
820 |
def testEnabledDiskTemplates(self): |
821 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
822 |
op = opcodes.OpClusterSetParams( |
823 |
enabled_disk_templates=enabled_disk_templates, |
824 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
825 |
self.ExecOpCode(op)
|
826 |
|
827 |
self.assertEqual(enabled_disk_templates,
|
828 |
self.cluster.enabled_disk_templates)
|
829 |
|
830 |
def testEnabledDiskTemplatesVsIpolicy(self): |
831 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
832 |
op = opcodes.OpClusterSetParams( |
833 |
enabled_disk_templates=enabled_disk_templates, |
834 |
ipolicy={constants.IPOLICY_DTS: [constants.DT_FILE]}) |
835 |
self.ExecOpCodeExpectOpPrereqError(op, "but not enabled on the cluster") |
836 |
|
837 |
def testDisablingDiskTemplatesOfInstances(self): |
838 |
old_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
839 |
self.cfg.SetEnabledDiskTemplates(old_disk_templates)
|
840 |
self.cfg.AddNewInstance(
|
841 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
842 |
new_disk_templates = [constants.DT_DISKLESS, constants.DT_DRBD8] |
843 |
op = opcodes.OpClusterSetParams( |
844 |
enabled_disk_templates=new_disk_templates, |
845 |
ipolicy={constants.IPOLICY_DTS: new_disk_templates}) |
846 |
self.ExecOpCodeExpectOpPrereqError(op, "least one instance using it") |
847 |
|
848 |
def testEnabledDiskTemplatesWithoutVgName(self): |
849 |
enabled_disk_templates = [constants.DT_PLAIN] |
850 |
self.cluster.volume_group_name = None |
851 |
op = opcodes.OpClusterSetParams( |
852 |
enabled_disk_templates=enabled_disk_templates) |
853 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
854 |
|
855 |
def testDisableDiskTemplateWithExistingInstance(self): |
856 |
enabled_disk_templates = [constants.DT_DISKLESS] |
857 |
self.cfg.AddNewInstance(
|
858 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
859 |
op = opcodes.OpClusterSetParams( |
860 |
enabled_disk_templates=enabled_disk_templates, |
861 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
862 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
863 |
|
864 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
865 |
vg_name = "test_vg"
|
866 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
867 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
868 |
self.ExecOpCode(op)
|
869 |
|
870 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
871 |
self.mcpu.assertLogIsEmpty()
|
872 |
|
873 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
874 |
vg_name = ""
|
875 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
876 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
877 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
878 |
|
879 |
def testUnsetVgNameWithLvmInstance(self): |
880 |
vg_name = ""
|
881 |
self.cfg.AddNewInstance(
|
882 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
883 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
884 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
885 |
|
886 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
887 |
vg_name = ""
|
888 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
889 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
890 |
self.ExecOpCode(op)
|
891 |
|
892 |
self.assertEqual(None, self.cluster.volume_group_name) |
893 |
|
894 |
def testVgNameToOldName(self): |
895 |
vg_name = self.cluster.volume_group_name
|
896 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
897 |
self.ExecOpCode(op)
|
898 |
|
899 |
self.mcpu.assertLogContainsRegex("already in desired state") |
900 |
|
901 |
def testVgNameWithFailingNode(self): |
902 |
vg_name = "test_vg"
|
903 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
904 |
self.rpc.call_vg_list.return_value = \
|
905 |
self.RpcResultsBuilder() \
|
906 |
.AddFailedNode(self.master) \
|
907 |
.Build() |
908 |
self.ExecOpCode(op)
|
909 |
|
910 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
911 |
|
912 |
def testVgNameWithValidNode(self): |
913 |
vg_name = "test_vg"
|
914 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
915 |
self.rpc.call_vg_list.return_value = \
|
916 |
self.RpcResultsBuilder() \
|
917 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
918 |
.Build() |
919 |
self.ExecOpCode(op)
|
920 |
|
921 |
def testVgNameWithTooSmallNode(self): |
922 |
vg_name = "test_vg"
|
923 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
924 |
self.rpc.call_vg_list.return_value = \
|
925 |
self.RpcResultsBuilder() \
|
926 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
927 |
.Build() |
928 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
929 |
|
930 |
def testMiscParameters(self): |
931 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
932 |
maintain_node_health=True,
|
933 |
modify_etc_hosts=True,
|
934 |
prealloc_wipe_disks=True,
|
935 |
reserved_lvs=["/dev/mock_lv"],
|
936 |
use_external_mip_script=True)
|
937 |
self.ExecOpCode(op)
|
938 |
|
939 |
self.mcpu.assertLogIsEmpty()
|
940 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
941 |
self.assertEqual(True, self.cluster.maintain_node_health) |
942 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
943 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
944 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
945 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
946 |
|
947 |
def testAddHiddenOs(self): |
948 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
949 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
950 |
(constants.DDM_ADD, "hidden3")])
|
951 |
self.ExecOpCode(op)
|
952 |
|
953 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
954 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
955 |
|
956 |
def testRemoveBlacklistedOs(self): |
957 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
958 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
959 |
(constants.DDM_REMOVE, "blisted2"),
|
960 |
(constants.DDM_REMOVE, "blisted3")])
|
961 |
self.ExecOpCode(op)
|
962 |
|
963 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
964 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
965 |
|
966 |
def testMasterNetdev(self): |
967 |
master_netdev = "test_dev"
|
968 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
969 |
self.ExecOpCode(op)
|
970 |
|
971 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
972 |
|
973 |
def testMasterNetdevFailNoForce(self): |
974 |
master_netdev = "test_dev"
|
975 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
976 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
977 |
self.RpcResultsBuilder() \
|
978 |
.CreateFailedNodeResult(self.master)
|
979 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
980 |
|
981 |
def testMasterNetdevFailForce(self): |
982 |
master_netdev = "test_dev"
|
983 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
984 |
force=True)
|
985 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
986 |
self.RpcResultsBuilder() \
|
987 |
.CreateFailedNodeResult(self.master)
|
988 |
self.ExecOpCode(op)
|
989 |
|
990 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
991 |
|
992 |
|
993 |
class TestLUClusterVerify(CmdlibTestCase): |
994 |
def testVerifyAllGroups(self): |
995 |
op = opcodes.OpClusterVerify() |
996 |
result = self.ExecOpCode(op)
|
997 |
|
998 |
self.assertEqual(2, len(result["jobs"])) |
999 |
|
1000 |
def testVerifyDefaultGroups(self): |
1001 |
op = opcodes.OpClusterVerify(group_name="default")
|
1002 |
result = self.ExecOpCode(op)
|
1003 |
|
1004 |
self.assertEqual(1, len(result["jobs"])) |
1005 |
|
1006 |
|
1007 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
1008 |
|
1009 |
def setUp(self): |
1010 |
super(TestLUClusterVerifyConfig, self).setUp() |
1011 |
|
1012 |
self._load_cert_patcher = testutils \
|
1013 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
1014 |
self._load_cert_mock = self._load_cert_patcher.start() |
1015 |
self._verify_cert_patcher = testutils \
|
1016 |
.patch_object(utils, "VerifyX509Certificate")
|
1017 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
1018 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
1019 |
self._read_file_mock = self._read_file_patcher.start() |
1020 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
1021 |
self._can_read_mock = self._can_read_patcher.start() |
1022 |
|
1023 |
self._can_read_mock.return_value = True |
1024 |
self._read_file_mock.return_value = True |
1025 |
self._verify_cert_mock.return_value = (None, "") |
1026 |
self._load_cert_mock.return_value = True |
1027 |
|
1028 |
def tearDown(self): |
1029 |
super(TestLUClusterVerifyConfig, self).tearDown() |
1030 |
|
1031 |
self._can_read_patcher.stop()
|
1032 |
self._read_file_patcher.stop()
|
1033 |
self._verify_cert_patcher.stop()
|
1034 |
self._load_cert_patcher.stop()
|
1035 |
|
1036 |
def testSuccessfulRun(self): |
1037 |
self.cfg.AddNewInstance()
|
1038 |
op = opcodes.OpClusterVerifyConfig() |
1039 |
result = self.ExecOpCode(op)
|
1040 |
|
1041 |
self.assertTrue(result)
|
1042 |
|
1043 |
def testDanglingNode(self): |
1044 |
node = self.cfg.AddNewNode()
|
1045 |
self.cfg.AddNewInstance(primary_node=node)
|
1046 |
node.group = "invalid"
|
1047 |
op = opcodes.OpClusterVerifyConfig() |
1048 |
result = self.ExecOpCode(op)
|
1049 |
|
1050 |
self.mcpu.assertLogContainsRegex(
|
1051 |
"following nodes \(and their instances\) belong to a non existing group")
|
1052 |
self.assertFalse(result)
|
1053 |
|
1054 |
def testDanglingInstance(self): |
1055 |
inst = self.cfg.AddNewInstance()
|
1056 |
inst.primary_node = "invalid"
|
1057 |
op = opcodes.OpClusterVerifyConfig() |
1058 |
result = self.ExecOpCode(op)
|
1059 |
|
1060 |
self.mcpu.assertLogContainsRegex(
|
1061 |
"following instances have a non-existing primary-node")
|
1062 |
self.assertFalse(result)
|
1063 |
|
1064 |
|
1065 |
class TestLUClusterVerifyGroup(CmdlibTestCase): |
1066 |
def testEmptyNodeGroup(self): |
1067 |
group = self.cfg.AddNewNodeGroup()
|
1068 |
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
|
1069 |
|
1070 |
result = self.ExecOpCode(op)
|
1071 |
|
1072 |
self.assertTrue(result)
|
1073 |
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification") |
1074 |
|
1075 |
def testSimpleInvocation(self): |
1076 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1077 |
|
1078 |
self.ExecOpCode(op)
|
1079 |
|
1080 |
def testSimpleInvocationWithInstance(self): |
1081 |
self.cfg.AddNewInstance(disks=[])
|
1082 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1083 |
|
1084 |
self.ExecOpCode(op)
|
1085 |
|
1086 |
def testGhostNode(self): |
1087 |
group = self.cfg.AddNewNodeGroup()
|
1088 |
node = self.cfg.AddNewNode(group=group.uuid, offline=True) |
1089 |
self.master.offline = True |
1090 |
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
1091 |
primary_node=self.master,
|
1092 |
secondary_node=node) |
1093 |
|
1094 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1095 |
RpcResultsBuilder() \ |
1096 |
.AddOfflineNode(self.master) \
|
1097 |
.Build() |
1098 |
|
1099 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1100 |
|
1101 |
self.ExecOpCode(op)
|
1102 |
|
1103 |
def testValidRpcResult(self): |
1104 |
self.cfg.AddNewInstance(disks=[])
|
1105 |
|
1106 |
self.rpc.call_node_verify.return_value = \
|
1107 |
RpcResultsBuilder() \ |
1108 |
.AddSuccessfulNode(self.master, {}) \
|
1109 |
.Build() |
1110 |
|
1111 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1112 |
|
1113 |
self.ExecOpCode(op)
|
1114 |
|
1115 |
|
1116 |
class TestLUClusterVerifyGroupMethods(CmdlibTestCase): |
1117 |
"""Base class for testing individual methods in LUClusterVerifyGroup.
|
1118 |
|
1119 |
"""
|
1120 |
def setUp(self): |
1121 |
super(TestLUClusterVerifyGroupMethods, self).setUp() |
1122 |
self.op = opcodes.OpClusterVerifyGroup(group_name="default") |
1123 |
|
1124 |
def PrepareLU(self, lu): |
1125 |
lu._exclusive_storage = False
|
1126 |
lu.master_node = self.master_uuid
|
1127 |
lu.group_info = self.group
|
1128 |
cluster.LUClusterVerifyGroup.all_node_info = \ |
1129 |
property(fget=lambda _: self.cfg.GetAllNodesInfo()) |
1130 |
|
1131 |
|
1132 |
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods): |
1133 |
@withLockedLU
|
1134 |
def testInvalidNodeResult(self, lu): |
1135 |
self.assertFalse(lu._VerifyNode(self.master, None)) |
1136 |
self.assertFalse(lu._VerifyNode(self.master, "")) |
1137 |
|
1138 |
@withLockedLU
|
1139 |
def testInvalidVersion(self, lu): |
1140 |
self.assertFalse(lu._VerifyNode(self.master, {"version": None})) |
1141 |
self.assertFalse(lu._VerifyNode(self.master, {"version": ""})) |
1142 |
self.assertFalse(lu._VerifyNode(self.master, { |
1143 |
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION) |
1144 |
})) |
1145 |
|
1146 |
self.mcpu.ClearLogMessages()
|
1147 |
self.assertTrue(lu._VerifyNode(self.master, { |
1148 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x") |
1149 |
})) |
1150 |
self.mcpu.assertLogContainsRegex("software version mismatch") |
1151 |
|
1152 |
def _GetValidNodeResult(self, additional_fields): |
1153 |
ret = { |
1154 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
|
1155 |
constants.NV_NODESETUP: [] |
1156 |
} |
1157 |
ret.update(additional_fields) |
1158 |
return ret
|
1159 |
|
1160 |
@withLockedLU
|
1161 |
def testHypervisor(self, lu): |
1162 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1163 |
constants.NV_HYPERVISOR: { |
1164 |
constants.HT_XEN_PVM: None,
|
1165 |
constants.HT_XEN_HVM: "mock error"
|
1166 |
} |
1167 |
})) |
1168 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1169 |
self.mcpu.assertLogContainsRegex("mock error") |
1170 |
|
1171 |
@withLockedLU
|
1172 |
def testHvParams(self, lu): |
1173 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1174 |
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")] |
1175 |
})) |
1176 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1177 |
self.mcpu.assertLogContainsRegex("mock item") |
1178 |
self.mcpu.assertLogContainsRegex("mock error") |
1179 |
|
1180 |
@withLockedLU
|
1181 |
def testSuccessfulResult(self, lu): |
1182 |
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({}))) |
1183 |
self.mcpu.assertLogIsEmpty()
|
1184 |
|
1185 |
|
1186 |
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods): |
1187 |
@withLockedLU
|
1188 |
def testInvalidNodeResult(self, lu): |
1189 |
for ndata in [{}, {constants.NV_TIME: "invalid"}]: |
1190 |
self.mcpu.ClearLogMessages()
|
1191 |
lu._VerifyNodeTime(self.master, ndata, None, None) |
1192 |
|
1193 |
self.mcpu.assertLogContainsRegex("Node returned invalid time") |
1194 |
|
1195 |
@withLockedLU
|
1196 |
def testNodeDiverges(self, lu): |
1197 |
for ntime in [(0, 0), (2000, 0)]: |
1198 |
self.mcpu.ClearLogMessages()
|
1199 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005) |
1200 |
|
1201 |
self.mcpu.assertLogContainsRegex("Node time diverges") |
1202 |
|
1203 |
@withLockedLU
|
1204 |
def testSuccessfulResult(self, lu): |
1205 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5) |
1206 |
self.mcpu.assertLogIsEmpty()
|
1207 |
|
1208 |
|
1209 |
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM( |
1210 |
TestLUClusterVerifyGroupMethods): |
1211 |
def setUp(self): |
1212 |
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp() |
1213 |
self.VALID_NRESULT = {
|
1214 |
constants.NV_VGLIST: {"mock_vg": 30000}, |
1215 |
constants.NV_PVLIST: [ |
1216 |
{ |
1217 |
"name": "mock_pv", |
1218 |
"vg_name": "mock_vg", |
1219 |
"size": 5000, |
1220 |
"free": 2500, |
1221 |
"attributes": [],
|
1222 |
"lv_list": []
|
1223 |
} |
1224 |
] |
1225 |
} |
1226 |
|
1227 |
@withLockedLU
|
1228 |
def testNoVgName(self, lu): |
1229 |
lu._UpdateVerifyNodeLVM(self.master, {}, None, None) |
1230 |
self.mcpu.assertLogIsEmpty()
|
1231 |
|
1232 |
@withLockedLU
|
1233 |
def testEmptyNodeResult(self, lu): |
1234 |
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None) |
1235 |
self.mcpu.assertLogContainsRegex("unable to check volume groups") |
1236 |
self.mcpu.assertLogContainsRegex("Can't get PV list from node") |
1237 |
|
1238 |
@withLockedLU
|
1239 |
def testValidNodeResult(self, lu): |
1240 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None) |
1241 |
self.mcpu.assertLogIsEmpty()
|
1242 |
|
1243 |
@withLockedLU
|
1244 |
def testValidNodeResultExclusiveStorage(self, lu): |
1245 |
lu._exclusive_storage = True
|
1246 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", |
1247 |
cluster.LUClusterVerifyGroup.NodeImage()) |
1248 |
self.mcpu.assertLogIsEmpty()
|
1249 |
|
1250 |
|
1251 |
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion( |
1252 |
TestLUClusterVerifyGroupMethods): |
1253 |
@withLockedLU
|
1254 |
def testEmptyNodeResult(self, lu): |
1255 |
lu._VerifyGroupDRBDVersion({}) |
1256 |
self.mcpu.assertLogIsEmpty()
|
1257 |
|
1258 |
@withLockedLU
|
1259 |
def testValidNodeResult(self, lu): |
1260 |
lu._VerifyGroupDRBDVersion( |
1261 |
RpcResultsBuilder() |
1262 |
.AddSuccessfulNode(self.master, {
|
1263 |
constants.NV_DRBDVERSION: "8.3.0"
|
1264 |
}) |
1265 |
.Build()) |
1266 |
self.mcpu.assertLogIsEmpty()
|
1267 |
|
1268 |
@withLockedLU
|
1269 |
def testDifferentVersions(self, lu): |
1270 |
node1 = self.cfg.AddNewNode()
|
1271 |
lu._VerifyGroupDRBDVersion( |
1272 |
RpcResultsBuilder() |
1273 |
.AddSuccessfulNode(self.master, {
|
1274 |
constants.NV_DRBDVERSION: "8.3.0"
|
1275 |
}) |
1276 |
.AddSuccessfulNode(node1, { |
1277 |
constants.NV_DRBDVERSION: "8.4.0"
|
1278 |
}) |
1279 |
.Build()) |
1280 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0") |
1281 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0") |
1282 |
|
1283 |
|
1284 |
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods): |
1285 |
@withLockedLU
|
1286 |
def testNoVgName(self, lu): |
1287 |
lu._VerifyGroupLVM(None, None) |
1288 |
self.mcpu.assertLogIsEmpty()
|
1289 |
|
1290 |
@withLockedLU
|
1291 |
def testNoExclusiveStorage(self, lu): |
1292 |
lu._VerifyGroupLVM(None, "mock_vg") |
1293 |
self.mcpu.assertLogIsEmpty()
|
1294 |
|
1295 |
@withLockedLU
|
1296 |
def testNoPvInfo(self, lu): |
1297 |
lu._exclusive_storage = True
|
1298 |
nimg = cluster.LUClusterVerifyGroup.NodeImage() |
1299 |
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg") |
1300 |
self.mcpu.assertLogIsEmpty()
|
1301 |
|
1302 |
@withLockedLU
|
1303 |
def testValidPvInfos(self, lu): |
1304 |
lu._exclusive_storage = True
|
1305 |
node2 = self.cfg.AddNewNode()
|
1306 |
nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
|
1307 |
nimg1.pv_min = 10000
|
1308 |
nimg1.pv_max = 10010
|
1309 |
nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1310 |
nimg2.pv_min = 9998
|
1311 |
nimg2.pv_max = 10005
|
1312 |
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg") |
1313 |
self.mcpu.assertLogIsEmpty()
|
1314 |
|
1315 |
|
1316 |
class TestLUClusterVerifyGroupVerifyNodeBridges( |
1317 |
TestLUClusterVerifyGroupMethods): |
1318 |
@withLockedLU
|
1319 |
def testNoBridges(self, lu): |
1320 |
lu._VerifyNodeBridges(None, None, None) |
1321 |
self.mcpu.assertLogIsEmpty()
|
1322 |
|
1323 |
@withLockedLU
|
1324 |
def testInvalidBridges(self, lu): |
1325 |
for ndata in [{}, {constants.NV_BRIDGES: ""}]: |
1326 |
self.mcpu.ClearLogMessages()
|
1327 |
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"]) |
1328 |
self.mcpu.assertLogContainsRegex("not return valid bridge information") |
1329 |
|
1330 |
self.mcpu.ClearLogMessages()
|
1331 |
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]}, |
1332 |
["mock_bridge"])
|
1333 |
self.mcpu.assertLogContainsRegex("missing bridge") |
1334 |
|
1335 |
|
1336 |
class TestLUClusterVerifyGroupVerifyNodeUserScripts( |
1337 |
TestLUClusterVerifyGroupMethods): |
1338 |
@withLockedLU
|
1339 |
def testNoUserScripts(self, lu): |
1340 |
lu._VerifyNodeUserScripts(self.master, {})
|
1341 |
self.mcpu.assertLogContainsRegex("did not return user scripts information") |
1342 |
|
1343 |
@withLockedLU
|
1344 |
def testBrokenUserScripts(self, lu): |
1345 |
lu._VerifyNodeUserScripts(self.master,
|
1346 |
{constants.NV_USERSCRIPTS: ["script"]})
|
1347 |
self.mcpu.assertLogContainsRegex("scripts not present or not executable") |
1348 |
|
1349 |
|
1350 |
class TestLUClusterVerifyGroupVerifyNodeNetwork( |
1351 |
TestLUClusterVerifyGroupMethods): |
1352 |
|
1353 |
def setUp(self): |
1354 |
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp() |
1355 |
self.VALID_NRESULT = {
|
1356 |
constants.NV_NODELIST: {}, |
1357 |
constants.NV_NODENETTEST: {}, |
1358 |
constants.NV_MASTERIP: True
|
1359 |
} |
1360 |
|
1361 |
@withLockedLU
|
1362 |
def testEmptyNodeResult(self, lu): |
1363 |
lu._VerifyNodeNetwork(self.master, {})
|
1364 |
self.mcpu.assertLogContainsRegex(
|
1365 |
"node hasn't returned node ssh connectivity data")
|
1366 |
self.mcpu.assertLogContainsRegex(
|
1367 |
"node hasn't returned node tcp connectivity data")
|
1368 |
self.mcpu.assertLogContainsRegex(
|
1369 |
"node hasn't returned node master IP reachability data")
|
1370 |
|
1371 |
@withLockedLU
|
1372 |
def testValidResult(self, lu): |
1373 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1374 |
self.mcpu.assertLogIsEmpty()
|
1375 |
|
1376 |
@withLockedLU
|
1377 |
def testSshProblem(self, lu): |
1378 |
self.VALID_NRESULT.update({
|
1379 |
constants.NV_NODELIST: { |
1380 |
"mock_node": "mock_error" |
1381 |
} |
1382 |
}) |
1383 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1384 |
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'") |
1385 |
|
1386 |
@withLockedLU
|
1387 |
def testTcpProblem(self, lu): |
1388 |
self.VALID_NRESULT.update({
|
1389 |
constants.NV_NODENETTEST: { |
1390 |
"mock_node": "mock_error" |
1391 |
} |
1392 |
}) |
1393 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1394 |
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'") |
1395 |
|
1396 |
@withLockedLU
|
1397 |
def testMasterIpNotReachable(self, lu): |
1398 |
self.VALID_NRESULT.update({
|
1399 |
constants.NV_MASTERIP: False
|
1400 |
}) |
1401 |
node1 = self.cfg.AddNewNode()
|
1402 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1403 |
self.mcpu.assertLogContainsRegex(
|
1404 |
"the master node cannot reach the master IP")
|
1405 |
|
1406 |
self.mcpu.ClearLogMessages()
|
1407 |
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
|
1408 |
self.mcpu.assertLogContainsRegex("cannot reach the master IP") |
1409 |
|
1410 |
|
1411 |
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods): |
1412 |
def setUp(self): |
1413 |
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp() |
1414 |
|
1415 |
self.node1 = self.cfg.AddNewNode() |
1416 |
self.drbd_inst = self.cfg.AddNewInstance( |
1417 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1418 |
primary_node=self.master,
|
1419 |
secondary_node=self.node1)])
|
1420 |
self.running_inst = self.cfg.AddNewInstance( |
1421 |
admin_state=constants.ADMINST_UP, disks_active=True)
|
1422 |
self.diskless_inst = self.cfg.AddNewInstance(disks=[]) |
1423 |
|
1424 |
self.master_img = \
|
1425 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1426 |
self.master_img.volumes = ["/".join(disk.logical_id) |
1427 |
for inst in [self.running_inst, |
1428 |
self.diskless_inst]
|
1429 |
for disk in inst.disks] |
1430 |
self.master_img.volumes.extend(
|
1431 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children]) |
1432 |
self.master_img.instances = [self.running_inst.uuid] |
1433 |
self.node1_img = \
|
1434 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
|
1435 |
self.node1_img.volumes = \
|
1436 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children] |
1437 |
self.node_imgs = {
|
1438 |
self.master_uuid: self.master_img, |
1439 |
self.node1.uuid: self.node1_img |
1440 |
} |
1441 |
self.diskstatus = {
|
1442 |
self.master_uuid: [
|
1443 |
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
|
1444 |
for _ in self.running_inst.disks |
1445 |
] |
1446 |
} |
1447 |
|
1448 |
@withLockedLU
|
1449 |
def testDisklessInst(self, lu): |
1450 |
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {}) |
1451 |
self.mcpu.assertLogIsEmpty()
|
1452 |
|
1453 |
@withLockedLU
|
1454 |
def testOfflineNode(self, lu): |
1455 |
self.master_img.offline = True |
1456 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {}) |
1457 |
self.mcpu.assertLogIsEmpty()
|
1458 |
|
1459 |
@withLockedLU
|
1460 |
def testRunningOnOfflineNode(self, lu): |
1461 |
self.master_img.offline = True |
1462 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1463 |
self.mcpu.assertLogContainsRegex(
|
1464 |
"instance is marked as running and lives on offline node")
|
1465 |
|
1466 |
@withLockedLU
|
1467 |
def testMissingVolume(self, lu): |
1468 |
self.master_img.volumes = []
|
1469 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1470 |
self.mcpu.assertLogContainsRegex("volume .* missing") |
1471 |
|
1472 |
@withLockedLU
|
1473 |
def testRunningInstanceOnWrongNode(self, lu): |
1474 |
self.master_img.instances = []
|
1475 |
self.diskless_inst.admin_state = constants.ADMINST_UP
|
1476 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1477 |
self.mcpu.assertLogContainsRegex("instance not running on its primary node") |
1478 |
|
1479 |
@withLockedLU
|
1480 |
def testRunningInstanceOnRightNode(self, lu): |
1481 |
self.master_img.instances = [self.running_inst.uuid] |
1482 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1483 |
self.mcpu.assertLogIsEmpty()
|
1484 |
|
1485 |
@withLockedLU
|
1486 |
def testValidDiskStatus(self, lu): |
1487 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1488 |
self.mcpu.assertLogIsEmpty()
|
1489 |
|
1490 |
@withLockedLU
|
1491 |
def testDegradedDiskStatus(self, lu): |
1492 |
self.diskstatus[self.master_uuid][0][1].is_degraded = True |
1493 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1494 |
self.mcpu.assertLogContainsRegex("instance .* is degraded") |
1495 |
|
1496 |
@withLockedLU
|
1497 |
def testNotOkayDiskStatus(self, lu): |
1498 |
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY |
1499 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1500 |
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'") |
1501 |
|
1502 |
@withLockedLU
|
1503 |
def testExclusiveStorageWithInvalidInstance(self, lu): |
1504 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1505 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1506 |
self.mcpu.assertLogContainsRegex(
|
1507 |
"instance has template drbd, which is not supported")
|
1508 |
|
1509 |
@withLockedLU
|
1510 |
def testExclusiveStorageWithValidInstance(self, lu): |
1511 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1512 |
self.running_inst.disks[0].spindles = 1 |
1513 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1514 |
self.mcpu.assertLogIsEmpty()
|
1515 |
|
1516 |
@withLockedLU
|
1517 |
def testDrbdInTwoGroups(self, lu): |
1518 |
group = self.cfg.AddNewNodeGroup()
|
1519 |
self.node1.group = group.uuid
|
1520 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1521 |
self.mcpu.assertLogContainsRegex(
|
1522 |
"instance has primary and secondary nodes in different groups")
|
1523 |
|
1524 |
@withLockedLU
|
1525 |
def testOfflineSecondary(self, lu): |
1526 |
self.node1_img.offline = True |
1527 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1528 |
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)") |
1529 |
|
1530 |
|
1531 |
class TestLUClusterVerifyGroupVerifyOrphanVolumes( |
1532 |
TestLUClusterVerifyGroupMethods): |
1533 |
@withLockedLU
|
1534 |
def testOrphanedVolume(self, lu): |
1535 |
master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1536 |
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"] |
1537 |
node_imgs = { |
1538 |
self.master_uuid: master_img
|
1539 |
} |
1540 |
node_vol_should = { |
1541 |
self.master_uuid: ["mock_vg/disk_0"] |
1542 |
} |
1543 |
|
1544 |
lu._VerifyOrphanVolumes(node_vol_should, node_imgs, |
1545 |
utils.FieldSet("mock_vg/disk_2"))
|
1546 |
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown") |
1547 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown") |
1548 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown") |
1549 |
|
1550 |
|
1551 |
class TestLUClusterVerifyGroupVerifyNPlusOneMemory( |
1552 |
TestLUClusterVerifyGroupMethods): |
1553 |
@withLockedLU
|
1554 |
def testN1Failure(self, lu): |
1555 |
group1 = self.cfg.AddNewNodeGroup()
|
1556 |
|
1557 |
node1 = self.cfg.AddNewNode()
|
1558 |
node2 = self.cfg.AddNewNode(group=group1)
|
1559 |
node3 = self.cfg.AddNewNode()
|
1560 |
|
1561 |
inst1 = self.cfg.AddNewInstance()
|
1562 |
inst2 = self.cfg.AddNewInstance()
|
1563 |
inst3 = self.cfg.AddNewInstance()
|
1564 |
|
1565 |
node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid) |
1566 |
node1_img.sbp = { |
1567 |
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
|
1568 |
} |
1569 |
|
1570 |
node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1571 |
|
1572 |
node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid) |
1573 |
node3_img.offline = True
|
1574 |
|
1575 |
node_imgs = { |
1576 |
node1.uuid: node1_img, |
1577 |
node2.uuid: node2_img, |
1578 |
node3.uuid: node3_img |
1579 |
} |
1580 |
|
1581 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1582 |
self.mcpu.assertLogContainsRegex(
|
1583 |
"not enough memory to accomodate instance failovers")
|
1584 |
|
1585 |
self.mcpu.ClearLogMessages()
|
1586 |
node1_img.mfree = 1000
|
1587 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1588 |
self.mcpu.assertLogIsEmpty()
|
1589 |
|
1590 |
|
1591 |
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods): |
1592 |
@withLockedLU
|
1593 |
def test(self, lu): |
1594 |
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1595 |
vm_capable=True)
|
1596 |
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False) |
1597 |
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1598 |
vm_capable=True)
|
1599 |
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1600 |
vm_capable=True)
|
1601 |
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True) |
1602 |
|
1603 |
nodeinfo = [self.master, node1, node2, node3, node4, node5]
|
1604 |
files_all = set([
|
1605 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
1606 |
pathutils.RAPI_CERT_FILE, |
1607 |
pathutils.RAPI_USERS_FILE, |
1608 |
]) |
1609 |
files_opt = set([
|
1610 |
pathutils.RAPI_USERS_FILE, |
1611 |
hv_xen.XL_CONFIG_FILE, |
1612 |
pathutils.VNC_PASSWORD_FILE, |
1613 |
]) |
1614 |
files_mc = set([
|
1615 |
pathutils.CLUSTER_CONF_FILE, |
1616 |
]) |
1617 |
files_vm = set([
|
1618 |
hv_xen.XEND_CONFIG_FILE, |
1619 |
hv_xen.XL_CONFIG_FILE, |
1620 |
pathutils.VNC_PASSWORD_FILE, |
1621 |
]) |
1622 |
nvinfo = RpcResultsBuilder() \ |
1623 |
.AddSuccessfulNode(self.master, {
|
1624 |
constants.NV_FILELIST: { |
1625 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
1626 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
1627 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1628 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1629 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1630 |
}}) \ |
1631 |
.AddSuccessfulNode(node1, { |
1632 |
constants.NV_FILELIST: { |
1633 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1634 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1635 |
} |
1636 |
}) \ |
1637 |
.AddSuccessfulNode(node2, { |
1638 |
constants.NV_FILELIST: { |
1639 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1640 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1641 |
} |
1642 |
}) \ |
1643 |
.AddSuccessfulNode(node3, { |
1644 |
constants.NV_FILELIST: { |
1645 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1646 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
1647 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1648 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
1649 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1650 |
} |
1651 |
}) \ |
1652 |
.AddSuccessfulNode(node4, {}) \ |
1653 |
.AddOfflineNode(node5) \ |
1654 |
.Build() |
1655 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
1656 |
|
1657 |
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
|
1658 |
(files_all, files_opt, files_mc, files_vm)) |
1659 |
|
1660 |
expected_msgs = [ |
1661 |
"File %s found with 2 different checksums (variant 1 on"
|
1662 |
" %s, %s, %s; variant 2 on %s)" %
|
1663 |
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name, |
1664 |
self.master.name),
|
1665 |
"File %s is missing from node(s) %s" %
|
1666 |
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name), |
1667 |
"File %s should not exist on node(s) %s" %
|
1668 |
(pathutils.CLUSTER_CONF_FILE, node3.name), |
1669 |
"File %s is missing from node(s) %s" %
|
1670 |
(hv_xen.XEND_CONFIG_FILE, node3.name), |
1671 |
"File %s is missing from node(s) %s" %
|
1672 |
(pathutils.CLUSTER_CONF_FILE, node2.name), |
1673 |
"File %s found with 2 different checksums (variant 1 on"
|
1674 |
" %s; variant 2 on %s)" %
|
1675 |
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
|
1676 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1677 |
" found on %s, %s, %s)" %
|
1678 |
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
|
1679 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1680 |
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
|
1681 |
"Node did not return file checksum data",
|
1682 |
] |
1683 |
|
1684 |
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs)) |
1685 |
for expected_msg in expected_msgs: |
1686 |
self.mcpu.assertLogContainsInLine(expected_msg)
|
1687 |
|
1688 |
|
1689 |
class TestLUClusterVerifyGroupVerifyNodeDrbd(TestLUClusterVerifyGroupMethods): |
1690 |
def setUp(self): |
1691 |
super(TestLUClusterVerifyGroupVerifyNodeDrbd, self).setUp() |
1692 |
|
1693 |
self.node1 = self.cfg.AddNewNode() |
1694 |
self.node2 = self.cfg.AddNewNode() |
1695 |
self.inst = self.cfg.AddNewInstance( |
1696 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1697 |
primary_node=self.node1,
|
1698 |
secondary_node=self.node2)],
|
1699 |
admin_state=constants.ADMINST_UP) |
1700 |
|
1701 |
@withLockedLU
|
1702 |
def testNoDrbdHelper(self, lu): |
1703 |
lu._VerifyNodeDrbd(self.master, {}, self.cfg.GetAllInstancesInfo(), None, |
1704 |
self.cfg.ComputeDRBDMap())
|
1705 |
self.mcpu.assertLogIsEmpty()
|
1706 |
|
1707 |
@withLockedLU
|
1708 |
def testDrbdHelperInvalidNodeResult(self, lu): |
1709 |
for ndata, expected in [({}, "no drbd usermode helper returned"), |
1710 |
({constants.NV_DRBDHELPER: (False, "")}, |
1711 |
"drbd usermode helper check unsuccessful"),
|
1712 |
({constants.NV_DRBDHELPER: (True, "/bin/false")}, |
1713 |
"wrong drbd usermode helper")]:
|
1714 |
self.mcpu.ClearLogMessages()
|
1715 |
lu._VerifyNodeDrbd(self.master, ndata, self.cfg.GetAllInstancesInfo(), |
1716 |
"/bin/true", self.cfg.ComputeDRBDMap()) |
1717 |
self.mcpu.assertLogContainsRegex(expected)
|
1718 |
|
1719 |
@withLockedLU
|
1720 |
def testNoNodeResult(self, lu): |
1721 |
lu._VerifyNodeDrbd(self.node1, {}, self.cfg.GetAllInstancesInfo(), |
1722 |
None, self.cfg.ComputeDRBDMap()) |
1723 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1724 |
|
1725 |
@withLockedLU
|
1726 |
def testInvalidNodeResult(self, lu): |
1727 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: ""}, |
1728 |
self.cfg.GetAllInstancesInfo(), None, |
1729 |
self.cfg.ComputeDRBDMap())
|
1730 |
self.mcpu.assertLogContainsRegex("cannot parse drbd status file") |
1731 |
|
1732 |
@withLockedLU
|
1733 |
def testWrongMinorInUse(self, lu): |
1734 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [2]}, |
1735 |
self.cfg.GetAllInstancesInfo(), None, |
1736 |
self.cfg.ComputeDRBDMap())
|
1737 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1738 |
self.mcpu.assertLogContainsRegex("unallocated drbd minor 2 is in use") |
1739 |
|
1740 |
@withLockedLU
|
1741 |
def testValidResult(self, lu): |
1742 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [1]}, |
1743 |
self.cfg.GetAllInstancesInfo(), None, |
1744 |
self.cfg.ComputeDRBDMap())
|
1745 |
self.mcpu.assertLogIsEmpty()
|
1746 |
|
1747 |
|
1748 |
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods): |
1749 |
@withLockedLU
|
1750 |
def testUpdateNodeOsInvalidNodeResult(self, lu): |
1751 |
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]}, |
1752 |
{constants.NV_OSLIST: [["1", "2"]]}]: |
1753 |
self.mcpu.ClearLogMessages()
|
1754 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1755 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1756 |
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data") |
1757 |
|
1758 |
@withLockedLU
|
1759 |
def testUpdateNodeOsValidNodeResult(self, lu): |
1760 |
ndata = { |
1761 |
constants.NV_OSLIST: [ |
1762 |
["mock_OS", "/mocked/path", True, "", ["default"], [], |
1763 |
[constants.OS_API_V20]], |
1764 |
["Another_Mock", "/random", True, "", ["var1", "var2"], |
1765 |
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS] |
1766 |
] |
1767 |
} |
1768 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1769 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1770 |
self.mcpu.assertLogIsEmpty()
|
1771 |
|
1772 |
@withLockedLU
|
1773 |
def testVerifyNodeOs(self, lu): |
1774 |
node = self.cfg.AddNewNode()
|
1775 |
nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1776 |
nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid) |
1777 |
|
1778 |
nimg_root.os_fail = False
|
1779 |
nimg_root.oslist = { |
1780 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1781 |
set([constants.OS_API_V20]))],
|
1782 |
"broken_base_os": [("/broken", False, "", set(), set(), |
1783 |
set([constants.OS_API_V20]))],
|
1784 |
"only_on_root": [("/random", True, "", set(), set(), set())], |
1785 |
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]), |
1786 |
set([("param1", "val1"), ("param2", "val2")]), |
1787 |
set([constants.OS_API_V20]))]
|
1788 |
} |
1789 |
nimg.os_fail = False
|
1790 |
nimg.oslist = { |
1791 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1792 |
set([constants.OS_API_V20]))],
|
1793 |
"only_on_test": [("/random", True, "", set(), set(), set())], |
1794 |
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]), |
1795 |
set([("param1", "val1"), ("param3", "val3")]), |
1796 |
set([constants.OS_API_V15]))],
|
1797 |
"broken_os": [("/broken", False, "", set(), set(), |
1798 |
set([constants.OS_API_V20]))],
|
1799 |
"multi_entries": [
|
1800 |
("/multi1", True, "", set(), set(), set([constants.OS_API_V20])), |
1801 |
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]))] |
1802 |
} |
1803 |
|
1804 |
lu._VerifyNodeOS(node, nimg, nimg_root) |
1805 |
|
1806 |
expected_msgs = [ |
1807 |
"Extra OS only_on_test not present on reference node",
|
1808 |
"OSes present on reference node .* but missing on this node:" +
|
1809 |
" only_on_root",
|
1810 |
"OS API version for diffing_os differs",
|
1811 |
"OS variants list for diffing_os differs",
|
1812 |
"OS parameters for diffing_os differs",
|
1813 |
"Invalid OS broken_os",
|
1814 |
"Extra OS broken_os not present on reference node",
|
1815 |
"OS 'multi_entries' has multiple entries",
|
1816 |
"Extra OS multi_entries not present on reference node"
|
1817 |
] |
1818 |
|
1819 |
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages())) |
1820 |
for expected_msg in expected_msgs: |
1821 |
self.mcpu.assertLogContainsRegex(expected_msg)
|
1822 |
|
1823 |
|
1824 |
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths( |
1825 |
TestLUClusterVerifyGroupMethods): |
1826 |
@withLockedLU
|
1827 |
def testNotMaster(self, lu): |
1828 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False) |
1829 |
self.mcpu.assertLogIsEmpty()
|
1830 |
|
1831 |
@withLockedLU
|
1832 |
def testNotMasterButRetunedValue(self, lu): |
1833 |
lu._VerifyAcceptedFileStoragePaths( |
1834 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False) |
1835 |
self.mcpu.assertLogContainsRegex(
|
1836 |
"Node should not have returned forbidden file storage paths")
|
1837 |
|
1838 |
@withLockedLU
|
1839 |
def testMasterInvalidNodeResult(self, lu): |
1840 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True) |
1841 |
self.mcpu.assertLogContainsRegex(
|
1842 |
"Node did not return forbidden file storage paths")
|
1843 |
|
1844 |
@withLockedLU
|
1845 |
def testMasterForbiddenPaths(self, lu): |
1846 |
lu._VerifyAcceptedFileStoragePaths( |
1847 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True) |
1848 |
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths") |
1849 |
|
1850 |
@withLockedLU
|
1851 |
def testMasterSuccess(self, lu): |
1852 |
lu._VerifyAcceptedFileStoragePaths( |
1853 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True) |
1854 |
self.mcpu.assertLogIsEmpty()
|
1855 |
|
1856 |
|
1857 |
class TestLUClusterVerifyGroupVerifyStoragePaths( |
1858 |
TestLUClusterVerifyGroupMethods): |
1859 |
@withLockedLU
|
1860 |
def testVerifyFileStoragePathsSuccess(self, lu): |
1861 |
lu._VerifyFileStoragePaths(self.master, {})
|
1862 |
self.mcpu.assertLogIsEmpty()
|
1863 |
|
1864 |
@withLockedLU
|
1865 |
def testVerifyFileStoragePathsFailure(self, lu): |
1866 |
lu._VerifyFileStoragePaths(self.master,
|
1867 |
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
|
1868 |
self.mcpu.assertLogContainsRegex(
|
1869 |
"The configured file storage path is unusable")
|
1870 |
|
1871 |
@withLockedLU
|
1872 |
def testVerifySharedFileStoragePathsSuccess(self, lu): |
1873 |
lu._VerifySharedFileStoragePaths(self.master, {})
|
1874 |
self.mcpu.assertLogIsEmpty()
|
1875 |
|
1876 |
@withLockedLU
|
1877 |
def testVerifySharedFileStoragePathsFailure(self, lu): |
1878 |
lu._VerifySharedFileStoragePaths( |
1879 |
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"}) |
1880 |
self.mcpu.assertLogContainsRegex(
|
1881 |
"The configured sharedfile storage path is unusable")
|
1882 |
|
1883 |
|
1884 |
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods): |
1885 |
@withLockedLU
|
1886 |
def testEmptyResult(self, lu): |
1887 |
lu._VerifyOob(self.master, {})
|
1888 |
self.mcpu.assertLogIsEmpty()
|
1889 |
|
1890 |
@withLockedLU
|
1891 |
def testErrorResults(self, lu): |
1892 |
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]}) |
1893 |
self.mcpu.assertLogContainsRegex("path1") |
1894 |
self.mcpu.assertLogContainsRegex("path2") |
1895 |
|
1896 |
|
1897 |
class TestLUClusterVerifyGroupUpdateNodeVolumes( |
1898 |
TestLUClusterVerifyGroupMethods): |
1899 |
def setUp(self): |
1900 |
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp() |
1901 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1902 |
|
1903 |
@withLockedLU
|
1904 |
def testNoVgName(self, lu): |
1905 |
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None) |
1906 |
self.mcpu.assertLogIsEmpty()
|
1907 |
self.assertTrue(self.nimg.lvm_fail) |
1908 |
|
1909 |
@withLockedLU
|
1910 |
def testErrorMessage(self, lu): |
1911 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"}, |
1912 |
self.nimg, "mock_vg") |
1913 |
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error") |
1914 |
self.assertTrue(self.nimg.lvm_fail) |
1915 |
|
1916 |
@withLockedLU
|
1917 |
def testInvalidNodeResult(self, lu): |
1918 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]}, |
1919 |
self.nimg, "mock_vg") |
1920 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1921 |
self.assertTrue(self.nimg.lvm_fail) |
1922 |
|
1923 |
@withLockedLU
|
1924 |
def testValidNodeResult(self, lu): |
1925 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
|
1926 |
self.nimg, "mock_vg") |
1927 |
self.mcpu.assertLogIsEmpty()
|
1928 |
self.assertFalse(self.nimg.lvm_fail) |
1929 |
|
1930 |
|
1931 |
class TestLUClusterVerifyGroupUpdateNodeInstances( |
1932 |
TestLUClusterVerifyGroupMethods): |
1933 |
def setUp(self): |
1934 |
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp() |
1935 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1936 |
|
1937 |
@withLockedLU
|
1938 |
def testInvalidNodeResult(self, lu): |
1939 |
lu._UpdateNodeInstances(self.master, {}, self.nimg) |
1940 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1941 |
|
1942 |
@withLockedLU
|
1943 |
def testValidNodeResult(self, lu): |
1944 |
inst = self.cfg.AddNewInstance()
|
1945 |
lu._UpdateNodeInstances(self.master,
|
1946 |
{constants.NV_INSTANCELIST: [inst.name]}, |
1947 |
self.nimg)
|
1948 |
self.mcpu.assertLogIsEmpty()
|
1949 |
|
1950 |
|
1951 |
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods): |
1952 |
def setUp(self): |
1953 |
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp() |
1954 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
1955 |
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}} |
1956 |
|
1957 |
@withLockedLU
|
1958 |
def testInvalidHvNodeResult(self, lu): |
1959 |
for ndata in [{}, {constants.NV_HVINFO: ""}]: |
1960 |
self.mcpu.ClearLogMessages()
|
1961 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None) |
1962 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
1963 |
|
1964 |
@withLockedLU
|
1965 |
def testInvalidMemoryFreeHvNodeResult(self, lu): |
1966 |
lu._UpdateNodeInfo(self.master,
|
1967 |
{constants.NV_HVINFO: {"memory_free": "abc"}}, |
1968 |
self.nimg, None) |
1969 |
self.mcpu.assertLogContainsRegex(
|
1970 |
"node returned invalid nodeinfo, check hypervisor")
|
1971 |
|
1972 |
@withLockedLU
|
1973 |
def testValidHvNodeResult(self, lu): |
1974 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None) |
1975 |
self.mcpu.assertLogIsEmpty()
|
1976 |
|
1977 |
@withLockedLU
|
1978 |
def testInvalidVgNodeResult(self, lu): |
1979 |
for vgdata in [[], ""]: |
1980 |
self.mcpu.ClearLogMessages()
|
1981 |
ndata = {constants.NV_VGLIST: vgdata} |
1982 |
ndata.update(self.valid_hvresult)
|
1983 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg") |
1984 |
self.mcpu.assertLogContainsRegex(
|
1985 |
"node didn't return data for the volume group 'mock_vg'")
|
1986 |
|
1987 |
@withLockedLU
|
1988 |
def testInvalidDiskFreeVgNodeResult(self, lu): |
1989 |
self.valid_hvresult.update({
|
1990 |
constants.NV_VGLIST: {"mock_vg": "abc"} |
1991 |
}) |
1992 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
1993 |
self.mcpu.assertLogContainsRegex(
|
1994 |
"node returned invalid LVM info, check LVM status")
|
1995 |
|
1996 |
@withLockedLU
|
1997 |
def testValidVgNodeResult(self, lu): |
1998 |
self.valid_hvresult.update({
|
1999 |
constants.NV_VGLIST: {"mock_vg": 10000} |
2000 |
}) |
2001 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
2002 |
self.mcpu.assertLogIsEmpty()
|
2003 |
|
2004 |
|
2005 |
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods): |
2006 |
def setUp(self): |
2007 |
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp() |
2008 |
|
2009 |
self.node1 = self.cfg.AddNewNode() |
2010 |
self.node2 = self.cfg.AddNewNode() |
2011 |
self.node3 = self.cfg.AddNewNode() |
2012 |
|
2013 |
self.diskless_inst = \
|
2014 |
self.cfg.AddNewInstance(primary_node=self.node1, |
2015 |
disk_template=constants.DT_DISKLESS) |
2016 |
self.plain_inst = \
|
2017 |
self.cfg.AddNewInstance(primary_node=self.node2, |
2018 |
disk_template=constants.DT_PLAIN) |
2019 |
self.drbd_inst = \
|
2020 |
self.cfg.AddNewInstance(primary_node=self.node3, |
2021 |
secondary_node=self.node2,
|
2022 |
disk_template=constants.DT_DRBD8) |
2023 |
|
2024 |
self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2025 |
uuid=self.node1.uuid)
|
2026 |
self.node1_img.pinst = [self.diskless_inst.uuid] |
2027 |
self.node1_img.sinst = []
|
2028 |
self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2029 |
uuid=self.node2.uuid)
|
2030 |
self.node2_img.pinst = [self.plain_inst.uuid] |
2031 |
self.node2_img.sinst = [self.drbd_inst.uuid] |
2032 |
self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2033 |
uuid=self.node3.uuid)
|
2034 |
self.node3_img.pinst = [self.drbd_inst.uuid] |
2035 |
self.node3_img.sinst = []
|
2036 |
|
2037 |
self.node_images = {
|
2038 |
self.node1.uuid: self.node1_img, |
2039 |
self.node2.uuid: self.node2_img, |
2040 |
self.node3.uuid: self.node3_img |
2041 |
} |
2042 |
|
2043 |
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid] |
2044 |
|
2045 |
@withLockedLU
|
2046 |
def testSuccessfulRun(self, lu): |
2047 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2048 |
RpcResultsBuilder() \ |
2049 |
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \ |
2050 |
.AddSuccessfulNode(self.node3, [(True, "")]) \ |
2051 |
.Build() |
2052 |
|
2053 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2054 |
self.cfg.GetAllInstancesInfo())
|
2055 |
|
2056 |
self.mcpu.assertLogIsEmpty()
|
2057 |
|
2058 |
@withLockedLU
|
2059 |
def testOfflineAndFailingNodes(self, lu): |
2060 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2061 |
RpcResultsBuilder() \ |
2062 |
.AddOfflineNode(self.node2) \
|
2063 |
.AddFailedNode(self.node3) \
|
2064 |
.Build() |
2065 |
|
2066 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2067 |
self.cfg.GetAllInstancesInfo())
|
2068 |
|
2069 |
self.mcpu.assertLogContainsRegex("while getting disk information") |
2070 |
|
2071 |
@withLockedLU
|
2072 |
def testInvalidNodeResult(self, lu): |
2073 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2074 |
RpcResultsBuilder() \ |
2075 |
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \ |
2076 |
.AddSuccessfulNode(self.node3, [""]) \ |
2077 |
.Build() |
2078 |
|
2079 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2080 |
self.cfg.GetAllInstancesInfo())
|
2081 |
# logging is not performed through mcpu
|
2082 |
self.mcpu.assertLogIsEmpty()
|
2083 |
|
2084 |
|
2085 |
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods): |
2086 |
def setUp(self): |
2087 |
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp() |
2088 |
|
2089 |
self.feedback_fn = lambda _: None |
2090 |
|
2091 |
def PrepareLU(self, lu): |
2092 |
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu) |
2093 |
|
2094 |
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys()) |
2095 |
|
2096 |
@withLockedLU
|
2097 |
def testEmptyGroup(self, lu): |
2098 |
lu.my_node_uuids = [] |
2099 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None) |
2100 |
|
2101 |
@withLockedLU
|
2102 |
def testFailedResult(self, lu): |
2103 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2104 |
RpcResultsBuilder(use_node_names=True)
|
2105 |
.AddFailedNode(self.master).Build(),
|
2106 |
self.feedback_fn,
|
2107 |
None)
|
2108 |
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution") |
2109 |
|
2110 |
@withLockedLU
|
2111 |
def testOfflineNode(self, lu): |
2112 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2113 |
RpcResultsBuilder(use_node_names=True)
|
2114 |
.AddOfflineNode(self.master).Build(),
|
2115 |
self.feedback_fn,
|
2116 |
None)
|
2117 |
|
2118 |
@withLockedLU
|
2119 |
def testValidResult(self, lu): |
2120 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2121 |
RpcResultsBuilder(use_node_names=True)
|
2122 |
.AddSuccessfulNode(self.master,
|
2123 |
[("mock_script",
|
2124 |
constants.HKR_SUCCESS, |
2125 |
"mock output")])
|
2126 |
.Build(), |
2127 |
self.feedback_fn,
|
2128 |
None)
|
2129 |
|
2130 |
@withLockedLU
|
2131 |
def testFailedScriptResult(self, lu): |
2132 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2133 |
RpcResultsBuilder(use_node_names=True)
|
2134 |
.AddSuccessfulNode(self.master,
|
2135 |
[("mock_script",
|
2136 |
constants.HKR_FAIL, |
2137 |
"mock output")])
|
2138 |
.Build(), |
2139 |
self.feedback_fn,
|
2140 |
None)
|
2141 |
self.mcpu.assertLogContainsRegex("Script mock_script failed") |
2142 |
|
2143 |
|
2144 |
class TestLUClusterVerifyDisks(CmdlibTestCase): |
2145 |
def testVerifyDisks(self): |
2146 |
op = opcodes.OpClusterVerifyDisks() |
2147 |
result = self.ExecOpCode(op)
|
2148 |
|
2149 |
self.assertEqual(1, len(result["jobs"])) |
2150 |
|
2151 |
|
2152 |
if __name__ == "__main__": |
2153 |
testutils.GanetiTestProgram() |