root / test / py / cmdlib / cluster_unittest.py @ aa14fb0a
History | View | Annotate | Download (81.3 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import copy |
29 |
import unittest |
30 |
import operator |
31 |
|
32 |
from ganeti.cmdlib import cluster |
33 |
from ganeti import constants |
34 |
from ganeti import errors |
35 |
from ganeti import netutils |
36 |
from ganeti import objects |
37 |
from ganeti import opcodes |
38 |
from ganeti import utils |
39 |
from ganeti import pathutils |
40 |
from ganeti import query |
41 |
from ganeti.hypervisor import hv_xen |
42 |
|
43 |
from testsupport import * |
44 |
|
45 |
import testutils |
46 |
|
47 |
|
48 |
class TestClusterVerifySsh(unittest.TestCase): |
49 |
def testMultipleGroups(self): |
50 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
51 |
mygroupnodes = [ |
52 |
objects.Node(name="node20", group="my", offline=False), |
53 |
objects.Node(name="node21", group="my", offline=False), |
54 |
objects.Node(name="node22", group="my", offline=False), |
55 |
objects.Node(name="node23", group="my", offline=False), |
56 |
objects.Node(name="node24", group="my", offline=False), |
57 |
objects.Node(name="node25", group="my", offline=False), |
58 |
objects.Node(name="node26", group="my", offline=True), |
59 |
] |
60 |
nodes = [ |
61 |
objects.Node(name="node1", group="g1", offline=True), |
62 |
objects.Node(name="node2", group="g1", offline=False), |
63 |
objects.Node(name="node3", group="g1", offline=False), |
64 |
objects.Node(name="node4", group="g1", offline=True), |
65 |
objects.Node(name="node5", group="g1", offline=False), |
66 |
objects.Node(name="node10", group="xyz", offline=False), |
67 |
objects.Node(name="node11", group="xyz", offline=False), |
68 |
objects.Node(name="node40", group="alloff", offline=True), |
69 |
objects.Node(name="node41", group="alloff", offline=True), |
70 |
objects.Node(name="node50", group="aaa", offline=False), |
71 |
] + mygroupnodes |
72 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
73 |
|
74 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
75 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
76 |
self.assertEqual(set(perhost.keys()), set(online)) |
77 |
|
78 |
self.assertEqual(perhost, {
|
79 |
"node20": ["node10", "node2", "node50"], |
80 |
"node21": ["node11", "node3", "node50"], |
81 |
"node22": ["node10", "node5", "node50"], |
82 |
"node23": ["node11", "node2", "node50"], |
83 |
"node24": ["node10", "node3", "node50"], |
84 |
"node25": ["node11", "node5", "node50"], |
85 |
}) |
86 |
|
87 |
def testSingleGroup(self): |
88 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
89 |
nodes = [ |
90 |
objects.Node(name="node1", group="default", offline=True), |
91 |
objects.Node(name="node2", group="default", offline=False), |
92 |
objects.Node(name="node3", group="default", offline=False), |
93 |
objects.Node(name="node4", group="default", offline=True), |
94 |
] |
95 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
96 |
|
97 |
(online, perhost) = fn(nodes, "default", nodes)
|
98 |
self.assertEqual(online, ["node2", "node3"]) |
99 |
self.assertEqual(set(perhost.keys()), set(online)) |
100 |
|
101 |
self.assertEqual(perhost, {
|
102 |
"node2": [],
|
103 |
"node3": [],
|
104 |
}) |
105 |
|
106 |
|
107 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
108 |
def testSuccess(self): |
109 |
op = opcodes.OpClusterActivateMasterIp() |
110 |
|
111 |
self.rpc.call_node_activate_master_ip.return_value = \
|
112 |
self.RpcResultsBuilder() \
|
113 |
.CreateSuccessfulNodeResult(self.master)
|
114 |
|
115 |
self.ExecOpCode(op)
|
116 |
|
117 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
118 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
119 |
|
120 |
def testFailure(self): |
121 |
op = opcodes.OpClusterActivateMasterIp() |
122 |
|
123 |
self.rpc.call_node_activate_master_ip.return_value = \
|
124 |
self.RpcResultsBuilder() \
|
125 |
.CreateFailedNodeResult(self.master) \
|
126 |
|
127 |
self.ExecOpCodeExpectOpExecError(op)
|
128 |
|
129 |
|
130 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
131 |
def testSuccess(self): |
132 |
op = opcodes.OpClusterDeactivateMasterIp() |
133 |
|
134 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
135 |
self.RpcResultsBuilder() \
|
136 |
.CreateSuccessfulNodeResult(self.master)
|
137 |
|
138 |
self.ExecOpCode(op)
|
139 |
|
140 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
141 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
142 |
|
143 |
def testFailure(self): |
144 |
op = opcodes.OpClusterDeactivateMasterIp() |
145 |
|
146 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
147 |
self.RpcResultsBuilder() \
|
148 |
.CreateFailedNodeResult(self.master) \
|
149 |
|
150 |
self.ExecOpCodeExpectOpExecError(op)
|
151 |
|
152 |
|
153 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
154 |
def testInvalidField(self): |
155 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
156 |
|
157 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
158 |
|
159 |
def testAllFields(self): |
160 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
161 |
|
162 |
self.rpc.call_get_watcher_pause.return_value = \
|
163 |
self.RpcResultsBuilder() \
|
164 |
.CreateSuccessfulNodeResult(self.master, -1) |
165 |
|
166 |
ret = self.ExecOpCode(op)
|
167 |
|
168 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
169 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
170 |
|
171 |
def testEmpytFields(self): |
172 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
173 |
|
174 |
self.ExecOpCode(op)
|
175 |
|
176 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
177 |
|
178 |
|
179 |
class TestLUClusterDestroy(CmdlibTestCase): |
180 |
def testExistingNodes(self): |
181 |
op = opcodes.OpClusterDestroy() |
182 |
|
183 |
self.cfg.AddNewNode()
|
184 |
self.cfg.AddNewNode()
|
185 |
|
186 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
187 |
|
188 |
def testExistingInstances(self): |
189 |
op = opcodes.OpClusterDestroy() |
190 |
|
191 |
self.cfg.AddNewInstance()
|
192 |
self.cfg.AddNewInstance()
|
193 |
|
194 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
195 |
|
196 |
def testEmptyCluster(self): |
197 |
op = opcodes.OpClusterDestroy() |
198 |
|
199 |
self.ExecOpCode(op)
|
200 |
|
201 |
self.assertSingleHooksCall([self.master.name], |
202 |
"cluster-destroy",
|
203 |
constants.HOOKS_PHASE_POST) |
204 |
|
205 |
|
206 |
class TestLUClusterPostInit(CmdlibTestCase): |
207 |
|
208 |
@testutils.patch_object(cluster, "_UpdateMasterClientCert") |
209 |
def testExecution(self, update_client_cert_mock): |
210 |
# mock the client certificate creation as it is tested separately
|
211 |
update_client_cert_mock.return_value = None
|
212 |
# For the purpose of this test, return the same certificate digest for all
|
213 |
# nodes
|
214 |
self.rpc.call_node_crypto_tokens = \
|
215 |
lambda node_uuid, _: self.RpcResultsBuilder() \ |
216 |
.CreateSuccessfulNodeResult(node_uuid, |
217 |
[(constants.CRYPTO_TYPE_SSL_DIGEST, "IA:MA:FA:KE:DI:GE:ST")])
|
218 |
op = opcodes.OpClusterPostInit() |
219 |
|
220 |
self.ExecOpCode(op)
|
221 |
|
222 |
self.assertSingleHooksCall([self.master.name], |
223 |
"cluster-init",
|
224 |
constants.HOOKS_PHASE_POST) |
225 |
|
226 |
|
227 |
class TestLUClusterQuery(CmdlibTestCase): |
228 |
def testSimpleInvocation(self): |
229 |
op = opcodes.OpClusterQuery() |
230 |
|
231 |
self.ExecOpCode(op)
|
232 |
|
233 |
def testIPv6Cluster(self): |
234 |
op = opcodes.OpClusterQuery() |
235 |
|
236 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
237 |
|
238 |
self.ExecOpCode(op)
|
239 |
|
240 |
|
241 |
class TestLUClusterRedistConf(CmdlibTestCase): |
242 |
def testSimpleInvocation(self): |
243 |
op = opcodes.OpClusterRedistConf() |
244 |
|
245 |
self.ExecOpCode(op)
|
246 |
|
247 |
|
248 |
class TestLUClusterRename(CmdlibTestCase): |
249 |
NEW_NAME = "new-name.example.com"
|
250 |
NEW_IP = "203.0.113.100"
|
251 |
|
252 |
def testNoChanges(self): |
253 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
254 |
|
255 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
256 |
|
257 |
def testReachableIp(self): |
258 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
259 |
|
260 |
self.netutils_mod.GetHostname.return_value = \
|
261 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
262 |
self.netutils_mod.TcpPing.return_value = True |
263 |
|
264 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
265 |
|
266 |
def testValidRename(self): |
267 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
268 |
|
269 |
self.netutils_mod.GetHostname.return_value = \
|
270 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
271 |
|
272 |
self.ExecOpCode(op)
|
273 |
|
274 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
275 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
276 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
277 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
278 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
279 |
|
280 |
def testRenameOfflineMaster(self): |
281 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
282 |
|
283 |
self.master.offline = True |
284 |
self.netutils_mod.GetHostname.return_value = \
|
285 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
286 |
|
287 |
self.ExecOpCode(op)
|
288 |
|
289 |
|
290 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
291 |
def testNoInstances(self): |
292 |
op = opcodes.OpClusterRepairDiskSizes() |
293 |
|
294 |
self.ExecOpCode(op)
|
295 |
|
296 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN): |
297 |
pnode = self.master
|
298 |
snode = self.cfg.AddNewNode()
|
299 |
|
300 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
301 |
primary_node=pnode, |
302 |
secondary_node=snode) |
303 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
304 |
|
305 |
return (inst, disk)
|
306 |
|
307 |
def testSingleInstanceOnFailingNode(self): |
308 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
309 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
310 |
|
311 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
312 |
self.RpcResultsBuilder() \
|
313 |
.CreateFailedNodeResult(self.master)
|
314 |
|
315 |
self.ExecOpCode(op)
|
316 |
|
317 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
318 |
|
319 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
320 |
# not specifying instances repairs all
|
321 |
op = opcodes.OpClusterRepairDiskSizes() |
322 |
|
323 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
324 |
self.RpcResultsBuilder() \
|
325 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
326 |
|
327 |
return self.ExecOpCode(op) |
328 |
|
329 |
def testInvalidResultData(self): |
330 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
331 |
self.ResetMocks()
|
332 |
|
333 |
self._SetUpInstanceSingleDisk()
|
334 |
self._ExecOpClusterRepairDiskSizes(data)
|
335 |
|
336 |
self.mcpu.assertLogContainsRegex("ignoring") |
337 |
|
338 |
def testCorrectSize(self): |
339 |
self._SetUpInstanceSingleDisk()
|
340 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
341 |
self.mcpu.assertLogIsEmpty()
|
342 |
self.assertEqual(0, len(changed)) |
343 |
|
344 |
def testWrongSize(self): |
345 |
self._SetUpInstanceSingleDisk()
|
346 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
347 |
self.assertEqual(1, len(changed)) |
348 |
|
349 |
def testCorrectDRBD(self): |
350 |
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
351 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
352 |
self.mcpu.assertLogIsEmpty()
|
353 |
self.assertEqual(0, len(changed)) |
354 |
|
355 |
def testWrongDRBDChild(self): |
356 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
|
357 |
disk.children[0].size = 512 |
358 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
359 |
self.assertEqual(1, len(changed)) |
360 |
|
361 |
def testExclusiveStorageInvalidResultData(self): |
362 |
self._SetUpInstanceSingleDisk()
|
363 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
364 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
365 |
|
366 |
self.mcpu.assertLogContainsRegex(
|
367 |
"did not return valid spindles information")
|
368 |
|
369 |
def testExclusiveStorageCorrectSpindles(self): |
370 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
371 |
disk.spindles = 1
|
372 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
373 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
374 |
self.assertEqual(0, len(changed)) |
375 |
|
376 |
def testExclusiveStorageWrongSpindles(self): |
377 |
self._SetUpInstanceSingleDisk()
|
378 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
379 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
380 |
self.assertEqual(1, len(changed)) |
381 |
|
382 |
|
383 |
class TestLUClusterSetParams(CmdlibTestCase): |
384 |
UID_POOL = [(10, 1000)] |
385 |
|
386 |
def testUidPool(self): |
387 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
388 |
self.ExecOpCode(op)
|
389 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
390 |
|
391 |
def testAddUids(self): |
392 |
old_pool = [(1, 9)] |
393 |
self.cluster.uid_pool = list(old_pool) |
394 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
395 |
self.ExecOpCode(op)
|
396 |
self.assertEqual(set(self.UID_POOL + old_pool), |
397 |
set(self.cluster.uid_pool)) |
398 |
|
399 |
def testRemoveUids(self): |
400 |
additional_pool = [(1, 9)] |
401 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
402 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
403 |
self.ExecOpCode(op)
|
404 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
405 |
|
406 |
def testMacPrefix(self): |
407 |
mac_prefix = "aa:01:02"
|
408 |
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix) |
409 |
self.ExecOpCode(op)
|
410 |
self.assertEqual(mac_prefix, self.cluster.mac_prefix) |
411 |
|
412 |
def testEmptyMacPrefix(self): |
413 |
mac_prefix = ""
|
414 |
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix) |
415 |
self.ExecOpCodeExpectOpPrereqError(
|
416 |
op, "Parameter 'OP_CLUSTER_SET_PARAMS.mac_prefix' fails validation")
|
417 |
|
418 |
def testInvalidMacPrefix(self): |
419 |
mac_prefix = "az:00:00"
|
420 |
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix) |
421 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid MAC address prefix") |
422 |
|
423 |
def testMasterNetmask(self): |
424 |
op = opcodes.OpClusterSetParams(master_netmask=26)
|
425 |
self.ExecOpCode(op)
|
426 |
self.assertEqual(26, self.cluster.master_netmask) |
427 |
|
428 |
def testInvalidDiskparams(self): |
429 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
430 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}},
|
431 |
{constants.DT_DRBD8: {constants.RBD_ACCESS: "bunny"}}]:
|
432 |
self.ResetMocks()
|
433 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
434 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
435 |
|
436 |
def testValidDiskparams(self): |
437 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
|
438 |
constants.RBD_ACCESS: "kernelspace"}}
|
439 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
440 |
self.ExecOpCode(op)
|
441 |
self.assertEqual(diskparams[constants.DT_RBD],
|
442 |
self.cluster.diskparams[constants.DT_RBD])
|
443 |
|
444 |
def testMinimalDiskparams(self): |
445 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
446 |
self.cluster.diskparams = {}
|
447 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
448 |
self.ExecOpCode(op)
|
449 |
self.assertEqual(diskparams, self.cluster.diskparams) |
450 |
|
451 |
def testValidDiskparamsAccess(self): |
452 |
for value in constants.DISK_VALID_ACCESS_MODES: |
453 |
self.ResetMocks()
|
454 |
op = opcodes.OpClusterSetParams(diskparams={ |
455 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
456 |
}) |
457 |
self.ExecOpCode(op)
|
458 |
got = self.cluster.diskparams[constants.DT_RBD][constants.RBD_ACCESS]
|
459 |
self.assertEqual(value, got)
|
460 |
|
461 |
def testInvalidDiskparamsAccess(self): |
462 |
for value in ["default", "pinky_bunny"]: |
463 |
self.ResetMocks()
|
464 |
op = opcodes.OpClusterSetParams(diskparams={ |
465 |
constants.DT_RBD: {constants.RBD_ACCESS: value} |
466 |
}) |
467 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid value of 'rbd:access'") |
468 |
|
469 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
470 |
self.cfg.AddNewInstance(disks=[
|
471 |
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)]) |
472 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
473 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
474 |
|
475 |
def testFileStorageDir(self): |
476 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
477 |
self.ExecOpCode(op)
|
478 |
|
479 |
def testSetFileStorageDirToCurrentValue(self): |
480 |
op = opcodes.OpClusterSetParams( |
481 |
file_storage_dir=self.cluster.file_storage_dir)
|
482 |
self.ExecOpCode(op)
|
483 |
|
484 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
485 |
|
486 |
def testUnsetFileStorageDirFileStorageEnabled(self): |
487 |
self.cfg.SetEnabledDiskTemplates([constants.DT_FILE])
|
488 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
489 |
self.ExecOpCodeExpectOpPrereqError(op, "Unsetting the 'file' storage") |
490 |
|
491 |
def testUnsetFileStorageDirFileStorageDisabled(self): |
492 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
493 |
op = opcodes.OpClusterSetParams(file_storage_dir='')
|
494 |
self.ExecOpCode(op)
|
495 |
|
496 |
def testSetFileStorageDirFileStorageDisabled(self): |
497 |
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
|
498 |
op = opcodes.OpClusterSetParams(file_storage_dir='/some/path/')
|
499 |
self.ExecOpCode(op)
|
500 |
self.mcpu.assertLogContainsRegex("although file storage is not enabled") |
501 |
|
502 |
def testValidDrbdHelper(self): |
503 |
node1 = self.cfg.AddNewNode()
|
504 |
node1.offline = True
|
505 |
self.rpc.call_drbd_helper.return_value = \
|
506 |
self.RpcResultsBuilder() \
|
507 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
508 |
.AddOfflineNode(node1) \ |
509 |
.Build() |
510 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
511 |
self.ExecOpCode(op)
|
512 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
513 |
|
514 |
def testDrbdHelperFailingNode(self): |
515 |
self.rpc.call_drbd_helper.return_value = \
|
516 |
self.RpcResultsBuilder() \
|
517 |
.AddFailedNode(self.master) \
|
518 |
.Build() |
519 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
520 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
521 |
|
522 |
def testInvalidDrbdHelper(self): |
523 |
self.rpc.call_drbd_helper.return_value = \
|
524 |
self.RpcResultsBuilder() \
|
525 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
526 |
.Build() |
527 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
528 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
529 |
|
530 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
531 |
drbd_helper = "/bin/random_helper"
|
532 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
533 |
self.rpc.call_drbd_helper.return_value = \
|
534 |
self.RpcResultsBuilder() \
|
535 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
536 |
.Build() |
537 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
538 |
self.ExecOpCode(op)
|
539 |
|
540 |
self.mcpu.assertLogContainsRegex("but did not enable") |
541 |
|
542 |
def testResetDrbdHelperDrbdDisabled(self): |
543 |
drbd_helper = ""
|
544 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
545 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
546 |
self.ExecOpCode(op)
|
547 |
|
548 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
549 |
|
550 |
def testResetDrbdHelperDrbdEnabled(self): |
551 |
drbd_helper = ""
|
552 |
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
|
553 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
554 |
self.ExecOpCodeExpectOpPrereqError(
|
555 |
op, "Cannot disable drbd helper while DRBD is enabled.")
|
556 |
|
557 |
def testEnableDrbdNoHelper(self): |
558 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
559 |
self.cluster.drbd_usermode_helper = None |
560 |
enabled_disk_templates = [constants.DT_DRBD8] |
561 |
op = opcodes.OpClusterSetParams( |
562 |
enabled_disk_templates=enabled_disk_templates) |
563 |
self.ExecOpCodeExpectOpPrereqError(
|
564 |
op, "Cannot enable DRBD without a DRBD usermode helper set")
|
565 |
|
566 |
def testEnableDrbdHelperSet(self): |
567 |
drbd_helper = "/bin/random_helper"
|
568 |
self.rpc.call_drbd_helper.return_value = \
|
569 |
self.RpcResultsBuilder() \
|
570 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
571 |
.Build() |
572 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
573 |
self.cluster.drbd_usermode_helper = drbd_helper
|
574 |
enabled_disk_templates = [constants.DT_DRBD8] |
575 |
op = opcodes.OpClusterSetParams( |
576 |
enabled_disk_templates=enabled_disk_templates, |
577 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
578 |
self.ExecOpCode(op)
|
579 |
|
580 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
581 |
|
582 |
def testDrbdHelperAlreadySet(self): |
583 |
drbd_helper = "/bin/true"
|
584 |
self.rpc.call_drbd_helper.return_value = \
|
585 |
self.RpcResultsBuilder() \
|
586 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
587 |
.Build() |
588 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
589 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
590 |
self.ExecOpCode(op)
|
591 |
|
592 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
593 |
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state") |
594 |
|
595 |
def testSetDrbdHelper(self): |
596 |
drbd_helper = "/bin/true"
|
597 |
self.rpc.call_drbd_helper.return_value = \
|
598 |
self.RpcResultsBuilder() \
|
599 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
600 |
.Build() |
601 |
self.cluster.drbd_usermode_helper = "/bin/false" |
602 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
|
603 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
604 |
self.ExecOpCode(op)
|
605 |
|
606 |
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper) |
607 |
|
608 |
def testBeparams(self): |
609 |
beparams = {constants.BE_VCPUS: 32}
|
610 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
611 |
self.ExecOpCode(op)
|
612 |
self.assertEqual(32, self.cluster |
613 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
614 |
|
615 |
def testNdparams(self): |
616 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
617 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
618 |
self.ExecOpCode(op)
|
619 |
self.assertEqual(True, self.cluster |
620 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
621 |
|
622 |
def testNdparamsResetOobProgram(self): |
623 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
624 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
625 |
self.ExecOpCode(op)
|
626 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
627 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
628 |
|
629 |
def testHvState(self): |
630 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
631 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
632 |
self.ExecOpCode(op)
|
633 |
self.assertEqual(8, self.cluster.hv_state_static |
634 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
635 |
|
636 |
def testDiskState(self): |
637 |
disk_state = { |
638 |
constants.DT_PLAIN: { |
639 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
640 |
} |
641 |
} |
642 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
643 |
self.ExecOpCode(op)
|
644 |
self.assertEqual(10, self.cluster |
645 |
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
|
646 |
[constants.DS_DISK_TOTAL]) |
647 |
|
648 |
def testDefaultIPolicy(self): |
649 |
ipolicy = constants.IPOLICY_DEFAULTS |
650 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
651 |
self.ExecOpCode(op)
|
652 |
|
653 |
def testIPolicyNewViolation(self): |
654 |
import ganeti.constants as C |
655 |
ipolicy = C.IPOLICY_DEFAULTS |
656 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
657 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
658 |
|
659 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
660 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
661 |
self.ExecOpCode(op)
|
662 |
|
663 |
self.mcpu.assertLogContainsRegex("instances violate them") |
664 |
|
665 |
def testNicparamsNoInstance(self): |
666 |
nicparams = { |
667 |
constants.NIC_LINK: "mock_bridge"
|
668 |
} |
669 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
670 |
self.ExecOpCode(op)
|
671 |
|
672 |
self.assertEqual("mock_bridge", |
673 |
self.cluster.nicparams
|
674 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
675 |
|
676 |
def testNicparamsInvalidConf(self): |
677 |
nicparams = { |
678 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
679 |
constants.NIC_LINK: ""
|
680 |
} |
681 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
682 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
683 |
|
684 |
def testNicparamsInvalidInstanceConf(self): |
685 |
nicparams = { |
686 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
687 |
constants.NIC_LINK: "mock_bridge"
|
688 |
} |
689 |
self.cfg.AddNewInstance(nics=[
|
690 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
691 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
692 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
693 |
|
694 |
def testNicparamsMissingIp(self): |
695 |
nicparams = { |
696 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
697 |
} |
698 |
self.cfg.AddNewInstance()
|
699 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
700 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
701 |
|
702 |
def testNicparamsWithInstance(self): |
703 |
nicparams = { |
704 |
constants.NIC_LINK: "mock_bridge"
|
705 |
} |
706 |
self.cfg.AddNewInstance()
|
707 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
708 |
self.ExecOpCode(op)
|
709 |
|
710 |
def testDefaultHvparams(self): |
711 |
hvparams = constants.HVC_DEFAULTS |
712 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
713 |
self.ExecOpCode(op)
|
714 |
|
715 |
self.assertEqual(hvparams, self.cluster.hvparams) |
716 |
|
717 |
def testMinimalHvparams(self): |
718 |
hvparams = { |
719 |
constants.HT_FAKE: { |
720 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
721 |
} |
722 |
} |
723 |
self.cluster.hvparams = {}
|
724 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
725 |
self.ExecOpCode(op)
|
726 |
|
727 |
self.assertEqual(hvparams, self.cluster.hvparams) |
728 |
|
729 |
def testOsHvp(self): |
730 |
os_hvp = { |
731 |
"mocked_os": {
|
732 |
constants.HT_FAKE: { |
733 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
734 |
} |
735 |
}, |
736 |
"other_os": constants.HVC_DEFAULTS
|
737 |
} |
738 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
739 |
self.ExecOpCode(op)
|
740 |
|
741 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
742 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
743 |
[constants.HV_MIGRATION_MODE]) |
744 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
745 |
|
746 |
def testRemoveOsHvp(self): |
747 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
748 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
749 |
self.ExecOpCode(op)
|
750 |
|
751 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
752 |
|
753 |
def testDefaultOsHvp(self): |
754 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
755 |
self.cluster.os_hvp = {"mocked_os": {}} |
756 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
757 |
self.ExecOpCode(op)
|
758 |
|
759 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
760 |
|
761 |
def testOsparams(self): |
762 |
osparams = { |
763 |
"mocked_os": {
|
764 |
"param1": "value1", |
765 |
"param2": None |
766 |
}, |
767 |
"other_os": {
|
768 |
"param1": None |
769 |
} |
770 |
} |
771 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
772 |
self.cluster.osparams_private_cluster = {}
|
773 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
774 |
self.ExecOpCode(op)
|
775 |
|
776 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
777 |
|
778 |
def testEnabledHypervisors(self): |
779 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
780 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
781 |
self.ExecOpCode(op)
|
782 |
|
783 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
784 |
|
785 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
786 |
enabled_hypervisors = [constants.HT_FAKE] |
787 |
self.cluster.hvparams = {}
|
788 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
789 |
self.ExecOpCode(op)
|
790 |
|
791 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
792 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
793 |
self.cluster.hvparams[constants.HT_FAKE])
|
794 |
|
795 |
@testutils.patch_object(utils, "FindFile") |
796 |
def testValidDefaultIallocator(self, find_file_mock): |
797 |
find_file_mock.return_value = "/random/path"
|
798 |
default_iallocator = "/random/path"
|
799 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
800 |
self.ExecOpCode(op)
|
801 |
|
802 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
803 |
|
804 |
@testutils.patch_object(utils, "FindFile") |
805 |
def testInvalidDefaultIallocator(self, find_file_mock): |
806 |
find_file_mock.return_value = None
|
807 |
default_iallocator = "/random/path"
|
808 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
809 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
810 |
|
811 |
def testEnabledDiskTemplates(self): |
812 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
813 |
op = opcodes.OpClusterSetParams( |
814 |
enabled_disk_templates=enabled_disk_templates, |
815 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
816 |
self.ExecOpCode(op)
|
817 |
|
818 |
self.assertEqual(enabled_disk_templates,
|
819 |
self.cluster.enabled_disk_templates)
|
820 |
|
821 |
def testEnabledDiskTemplatesVsIpolicy(self): |
822 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
823 |
op = opcodes.OpClusterSetParams( |
824 |
enabled_disk_templates=enabled_disk_templates, |
825 |
ipolicy={constants.IPOLICY_DTS: [constants.DT_FILE]}) |
826 |
self.ExecOpCodeExpectOpPrereqError(op, "but not enabled on the cluster") |
827 |
|
828 |
def testDisablingDiskTemplatesOfInstances(self): |
829 |
old_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
830 |
self.cfg.SetEnabledDiskTemplates(old_disk_templates)
|
831 |
self.cfg.AddNewInstance(
|
832 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
833 |
new_disk_templates = [constants.DT_DISKLESS, constants.DT_DRBD8] |
834 |
op = opcodes.OpClusterSetParams( |
835 |
enabled_disk_templates=new_disk_templates, |
836 |
ipolicy={constants.IPOLICY_DTS: new_disk_templates}) |
837 |
self.ExecOpCodeExpectOpPrereqError(op, "least one instance using it") |
838 |
|
839 |
def testEnabledDiskTemplatesWithoutVgName(self): |
840 |
enabled_disk_templates = [constants.DT_PLAIN] |
841 |
self.cluster.volume_group_name = None |
842 |
op = opcodes.OpClusterSetParams( |
843 |
enabled_disk_templates=enabled_disk_templates) |
844 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
845 |
|
846 |
def testDisableDiskTemplateWithExistingInstance(self): |
847 |
enabled_disk_templates = [constants.DT_DISKLESS] |
848 |
self.cfg.AddNewInstance(
|
849 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
850 |
op = opcodes.OpClusterSetParams( |
851 |
enabled_disk_templates=enabled_disk_templates, |
852 |
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates}) |
853 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
854 |
|
855 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
856 |
vg_name = "test_vg"
|
857 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
858 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
859 |
self.ExecOpCode(op)
|
860 |
|
861 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
862 |
self.mcpu.assertLogIsEmpty()
|
863 |
|
864 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
865 |
vg_name = ""
|
866 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
867 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
868 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
869 |
|
870 |
def testUnsetVgNameWithLvmInstance(self): |
871 |
vg_name = ""
|
872 |
self.cfg.AddNewInstance(
|
873 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
|
874 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
875 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
876 |
|
877 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
878 |
vg_name = ""
|
879 |
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
|
880 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
881 |
self.ExecOpCode(op)
|
882 |
|
883 |
self.assertEqual(None, self.cluster.volume_group_name) |
884 |
|
885 |
def testVgNameToOldName(self): |
886 |
vg_name = self.cluster.volume_group_name
|
887 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
888 |
self.ExecOpCode(op)
|
889 |
|
890 |
self.mcpu.assertLogContainsRegex("already in desired state") |
891 |
|
892 |
def testVgNameWithFailingNode(self): |
893 |
vg_name = "test_vg"
|
894 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
895 |
self.rpc.call_vg_list.return_value = \
|
896 |
self.RpcResultsBuilder() \
|
897 |
.AddFailedNode(self.master) \
|
898 |
.Build() |
899 |
self.ExecOpCode(op)
|
900 |
|
901 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
902 |
|
903 |
def testVgNameWithValidNode(self): |
904 |
vg_name = "test_vg"
|
905 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
906 |
self.rpc.call_vg_list.return_value = \
|
907 |
self.RpcResultsBuilder() \
|
908 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
909 |
.Build() |
910 |
self.ExecOpCode(op)
|
911 |
|
912 |
def testVgNameWithTooSmallNode(self): |
913 |
vg_name = "test_vg"
|
914 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
915 |
self.rpc.call_vg_list.return_value = \
|
916 |
self.RpcResultsBuilder() \
|
917 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
918 |
.Build() |
919 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
920 |
|
921 |
def testMiscParameters(self): |
922 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
923 |
maintain_node_health=True,
|
924 |
modify_etc_hosts=True,
|
925 |
prealloc_wipe_disks=True,
|
926 |
reserved_lvs=["/dev/mock_lv"],
|
927 |
use_external_mip_script=True)
|
928 |
self.ExecOpCode(op)
|
929 |
|
930 |
self.mcpu.assertLogIsEmpty()
|
931 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
932 |
self.assertEqual(True, self.cluster.maintain_node_health) |
933 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
934 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
935 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
936 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
937 |
|
938 |
def testAddHiddenOs(self): |
939 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
940 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
941 |
(constants.DDM_ADD, "hidden3")])
|
942 |
self.ExecOpCode(op)
|
943 |
|
944 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
945 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
946 |
|
947 |
def testRemoveBlacklistedOs(self): |
948 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
949 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
950 |
(constants.DDM_REMOVE, "blisted2"),
|
951 |
(constants.DDM_REMOVE, "blisted3")])
|
952 |
self.ExecOpCode(op)
|
953 |
|
954 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
955 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
956 |
|
957 |
def testMasterNetdev(self): |
958 |
master_netdev = "test_dev"
|
959 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
960 |
self.ExecOpCode(op)
|
961 |
|
962 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
963 |
|
964 |
def testMasterNetdevFailNoForce(self): |
965 |
master_netdev = "test_dev"
|
966 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
967 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
968 |
self.RpcResultsBuilder() \
|
969 |
.CreateFailedNodeResult(self.master)
|
970 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
971 |
|
972 |
def testMasterNetdevFailForce(self): |
973 |
master_netdev = "test_dev"
|
974 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
975 |
force=True)
|
976 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
977 |
self.RpcResultsBuilder() \
|
978 |
.CreateFailedNodeResult(self.master)
|
979 |
self.ExecOpCode(op)
|
980 |
|
981 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
982 |
|
983 |
|
984 |
class TestLUClusterVerify(CmdlibTestCase): |
985 |
def testVerifyAllGroups(self): |
986 |
op = opcodes.OpClusterVerify() |
987 |
result = self.ExecOpCode(op)
|
988 |
|
989 |
self.assertEqual(2, len(result["jobs"])) |
990 |
|
991 |
def testVerifyDefaultGroups(self): |
992 |
op = opcodes.OpClusterVerify(group_name="default")
|
993 |
result = self.ExecOpCode(op)
|
994 |
|
995 |
self.assertEqual(1, len(result["jobs"])) |
996 |
|
997 |
|
998 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
999 |
|
1000 |
def setUp(self): |
1001 |
super(TestLUClusterVerifyConfig, self).setUp() |
1002 |
|
1003 |
self._load_cert_patcher = testutils \
|
1004 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
1005 |
self._load_cert_mock = self._load_cert_patcher.start() |
1006 |
self._verify_cert_patcher = testutils \
|
1007 |
.patch_object(utils, "VerifyCertificate")
|
1008 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
1009 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
1010 |
self._read_file_mock = self._read_file_patcher.start() |
1011 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
1012 |
self._can_read_mock = self._can_read_patcher.start() |
1013 |
|
1014 |
self._can_read_mock.return_value = True |
1015 |
self._read_file_mock.return_value = True |
1016 |
self._verify_cert_mock.return_value = (None, "") |
1017 |
self._load_cert_mock.return_value = True |
1018 |
|
1019 |
def tearDown(self): |
1020 |
super(TestLUClusterVerifyConfig, self).tearDown() |
1021 |
|
1022 |
self._can_read_patcher.stop()
|
1023 |
self._read_file_patcher.stop()
|
1024 |
self._verify_cert_patcher.stop()
|
1025 |
self._load_cert_patcher.stop()
|
1026 |
|
1027 |
def testSuccessfulRun(self): |
1028 |
self.cfg.AddNewInstance()
|
1029 |
op = opcodes.OpClusterVerifyConfig() |
1030 |
result = self.ExecOpCode(op)
|
1031 |
self.assertTrue(result)
|
1032 |
|
1033 |
def testDanglingNode(self): |
1034 |
node = self.cfg.AddNewNode()
|
1035 |
self.cfg.AddNewInstance(primary_node=node)
|
1036 |
node.group = "invalid"
|
1037 |
op = opcodes.OpClusterVerifyConfig() |
1038 |
result = self.ExecOpCode(op)
|
1039 |
|
1040 |
self.mcpu.assertLogContainsRegex(
|
1041 |
"following nodes \(and their instances\) belong to a non existing group")
|
1042 |
self.assertFalse(result)
|
1043 |
|
1044 |
def testDanglingInstance(self): |
1045 |
inst = self.cfg.AddNewInstance()
|
1046 |
inst.primary_node = "invalid"
|
1047 |
op = opcodes.OpClusterVerifyConfig() |
1048 |
result = self.ExecOpCode(op)
|
1049 |
|
1050 |
self.mcpu.assertLogContainsRegex(
|
1051 |
"following instances have a non-existing primary-node")
|
1052 |
self.assertFalse(result)
|
1053 |
|
1054 |
|
1055 |
class TestLUClusterVerifyGroup(CmdlibTestCase): |
1056 |
def testEmptyNodeGroup(self): |
1057 |
group = self.cfg.AddNewNodeGroup()
|
1058 |
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
|
1059 |
|
1060 |
result = self.ExecOpCode(op)
|
1061 |
|
1062 |
self.assertTrue(result)
|
1063 |
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification") |
1064 |
|
1065 |
def testSimpleInvocation(self): |
1066 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1067 |
|
1068 |
self.ExecOpCode(op)
|
1069 |
|
1070 |
def testSimpleInvocationWithInstance(self): |
1071 |
self.cfg.AddNewInstance(disks=[])
|
1072 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1073 |
|
1074 |
self.ExecOpCode(op)
|
1075 |
|
1076 |
def testGhostNode(self): |
1077 |
group = self.cfg.AddNewNodeGroup()
|
1078 |
node = self.cfg.AddNewNode(group=group.uuid, offline=True) |
1079 |
self.master.offline = True |
1080 |
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
|
1081 |
primary_node=self.master,
|
1082 |
secondary_node=node) |
1083 |
|
1084 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
1085 |
RpcResultsBuilder() \ |
1086 |
.AddOfflineNode(self.master) \
|
1087 |
.Build() |
1088 |
|
1089 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1090 |
|
1091 |
self.ExecOpCode(op)
|
1092 |
|
1093 |
def testValidRpcResult(self): |
1094 |
self.cfg.AddNewInstance(disks=[])
|
1095 |
|
1096 |
self.rpc.call_node_verify.return_value = \
|
1097 |
RpcResultsBuilder() \ |
1098 |
.AddSuccessfulNode(self.master, {}) \
|
1099 |
.Build() |
1100 |
|
1101 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1102 |
|
1103 |
self.ExecOpCode(op)
|
1104 |
|
1105 |
|
1106 |
class TestLUClusterVerifyClientCerts(CmdlibTestCase): |
1107 |
|
1108 |
def _AddNormalNode(self): |
1109 |
self.normalnode = copy.deepcopy(self.master) |
1110 |
self.normalnode.master_candidate = False |
1111 |
self.normalnode.uuid = "normal-node-uuid" |
1112 |
self.cfg.AddNode(self.normalnode, None) |
1113 |
|
1114 |
def testVerifyMasterCandidate(self): |
1115 |
client_cert = "client-cert-digest"
|
1116 |
self.cluster.candidate_certs = {self.master.uuid: client_cert} |
1117 |
self.rpc.call_node_verify.return_value = \
|
1118 |
RpcResultsBuilder() \ |
1119 |
.AddSuccessfulNode(self.master,
|
1120 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1121 |
.Build() |
1122 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1123 |
self.ExecOpCode(op)
|
1124 |
|
1125 |
def testVerifyMasterCandidateInvalid(self): |
1126 |
client_cert = "client-cert-digest"
|
1127 |
self.cluster.candidate_certs = {self.master.uuid: client_cert} |
1128 |
self.rpc.call_node_verify.return_value = \
|
1129 |
RpcResultsBuilder() \ |
1130 |
.AddSuccessfulNode(self.master,
|
1131 |
{constants.NV_CLIENT_CERT: (666, "Invalid Certificate")}) \ |
1132 |
.Build() |
1133 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1134 |
self.ExecOpCode(op)
|
1135 |
self.mcpu.assertLogContainsRegex("Client certificate") |
1136 |
self.mcpu.assertLogContainsRegex("failed validation") |
1137 |
|
1138 |
def testVerifyNoMasterCandidateMap(self): |
1139 |
client_cert = "client-cert-digest"
|
1140 |
self.cluster.candidate_certs = {}
|
1141 |
self.rpc.call_node_verify.return_value = \
|
1142 |
RpcResultsBuilder() \ |
1143 |
.AddSuccessfulNode(self.master,
|
1144 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1145 |
.Build() |
1146 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1147 |
self.ExecOpCode(op)
|
1148 |
self.mcpu.assertLogContainsRegex(
|
1149 |
"list of master candidate certificates is empty")
|
1150 |
|
1151 |
def testVerifyNoSharingMasterCandidates(self): |
1152 |
client_cert = "client-cert-digest"
|
1153 |
self.cluster.candidate_certs = {
|
1154 |
self.master.uuid: client_cert,
|
1155 |
"some-other-master-candidate-uuid": client_cert}
|
1156 |
self.rpc.call_node_verify.return_value = \
|
1157 |
RpcResultsBuilder() \ |
1158 |
.AddSuccessfulNode(self.master,
|
1159 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1160 |
.Build() |
1161 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1162 |
self.ExecOpCode(op)
|
1163 |
self.mcpu.assertLogContainsRegex(
|
1164 |
"two master candidates configured to use the same")
|
1165 |
|
1166 |
def testVerifyMasterCandidateCertMismatch(self): |
1167 |
client_cert = "client-cert-digest"
|
1168 |
self.cluster.candidate_certs = {self.master.uuid: "different-cert-digest"} |
1169 |
self.rpc.call_node_verify.return_value = \
|
1170 |
RpcResultsBuilder() \ |
1171 |
.AddSuccessfulNode(self.master,
|
1172 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1173 |
.Build() |
1174 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1175 |
self.ExecOpCode(op)
|
1176 |
self.mcpu.assertLogContainsRegex("does not match its entry") |
1177 |
|
1178 |
def testVerifyMasterCandidateUnregistered(self): |
1179 |
client_cert = "client-cert-digest"
|
1180 |
self.cluster.candidate_certs = {"other-node-uuid": "different-cert-digest"} |
1181 |
self.rpc.call_node_verify.return_value = \
|
1182 |
RpcResultsBuilder() \ |
1183 |
.AddSuccessfulNode(self.master,
|
1184 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1185 |
.Build() |
1186 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1187 |
self.ExecOpCode(op)
|
1188 |
self.mcpu.assertLogContainsRegex("does not have an entry") |
1189 |
|
1190 |
def testVerifyMasterCandidateOtherNodesCert(self): |
1191 |
client_cert = "client-cert-digest"
|
1192 |
self.cluster.candidate_certs = {"other-node-uuid": client_cert} |
1193 |
self.rpc.call_node_verify.return_value = \
|
1194 |
RpcResultsBuilder() \ |
1195 |
.AddSuccessfulNode(self.master,
|
1196 |
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
|
1197 |
.Build() |
1198 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1199 |
self.ExecOpCode(op)
|
1200 |
self.mcpu.assertLogContainsRegex("using a certificate of another node") |
1201 |
|
1202 |
def testNormalNodeStillInList(self): |
1203 |
self._AddNormalNode()
|
1204 |
client_cert_master = "client-cert-digest-master"
|
1205 |
client_cert_normal = "client-cert-digest-normal"
|
1206 |
self.cluster.candidate_certs = {
|
1207 |
self.normalnode.uuid: client_cert_normal,
|
1208 |
self.master.uuid: client_cert_master}
|
1209 |
self.rpc.call_node_verify.return_value = \
|
1210 |
RpcResultsBuilder() \ |
1211 |
.AddSuccessfulNode(self.normalnode,
|
1212 |
{constants.NV_CLIENT_CERT: (None, client_cert_normal)}) \
|
1213 |
.AddSuccessfulNode(self.master,
|
1214 |
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
|
1215 |
.Build() |
1216 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1217 |
self.ExecOpCode(op)
|
1218 |
self.mcpu.assertLogContainsRegex("not a master candidate") |
1219 |
self.mcpu.assertLogContainsRegex("still listed") |
1220 |
|
1221 |
def testNormalNodeStealingMasterCandidateCert(self): |
1222 |
self._AddNormalNode()
|
1223 |
client_cert_master = "client-cert-digest-master"
|
1224 |
self.cluster.candidate_certs = {
|
1225 |
self.master.uuid: client_cert_master}
|
1226 |
self.rpc.call_node_verify.return_value = \
|
1227 |
RpcResultsBuilder() \ |
1228 |
.AddSuccessfulNode(self.normalnode,
|
1229 |
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
|
1230 |
.AddSuccessfulNode(self.master,
|
1231 |
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
|
1232 |
.Build() |
1233 |
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True) |
1234 |
self.ExecOpCode(op)
|
1235 |
self.mcpu.assertLogContainsRegex("not a master candidate") |
1236 |
self.mcpu.assertLogContainsRegex(
|
1237 |
"certificate of another node which is master candidate")
|
1238 |
|
1239 |
|
1240 |
class TestLUClusterVerifyGroupMethods(CmdlibTestCase): |
1241 |
"""Base class for testing individual methods in LUClusterVerifyGroup.
|
1242 |
|
1243 |
"""
|
1244 |
def setUp(self): |
1245 |
super(TestLUClusterVerifyGroupMethods, self).setUp() |
1246 |
self.op = opcodes.OpClusterVerifyGroup(group_name="default") |
1247 |
|
1248 |
def PrepareLU(self, lu): |
1249 |
lu._exclusive_storage = False
|
1250 |
lu.master_node = self.master_uuid
|
1251 |
lu.group_info = self.group
|
1252 |
cluster.LUClusterVerifyGroup.all_node_info = \ |
1253 |
property(fget=lambda _: self.cfg.GetAllNodesInfo()) |
1254 |
|
1255 |
|
1256 |
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods): |
1257 |
@withLockedLU
|
1258 |
def testInvalidNodeResult(self, lu): |
1259 |
self.assertFalse(lu._VerifyNode(self.master, None)) |
1260 |
self.assertFalse(lu._VerifyNode(self.master, "")) |
1261 |
|
1262 |
@withLockedLU
|
1263 |
def testInvalidVersion(self, lu): |
1264 |
self.assertFalse(lu._VerifyNode(self.master, {"version": None})) |
1265 |
self.assertFalse(lu._VerifyNode(self.master, {"version": ""})) |
1266 |
self.assertFalse(lu._VerifyNode(self.master, { |
1267 |
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION) |
1268 |
})) |
1269 |
|
1270 |
self.mcpu.ClearLogMessages()
|
1271 |
self.assertTrue(lu._VerifyNode(self.master, { |
1272 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x") |
1273 |
})) |
1274 |
self.mcpu.assertLogContainsRegex("software version mismatch") |
1275 |
|
1276 |
def _GetValidNodeResult(self, additional_fields): |
1277 |
ret = { |
1278 |
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
|
1279 |
constants.NV_NODESETUP: [] |
1280 |
} |
1281 |
ret.update(additional_fields) |
1282 |
return ret
|
1283 |
|
1284 |
@withLockedLU
|
1285 |
def testHypervisor(self, lu): |
1286 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1287 |
constants.NV_HYPERVISOR: { |
1288 |
constants.HT_XEN_PVM: None,
|
1289 |
constants.HT_XEN_HVM: "mock error"
|
1290 |
} |
1291 |
})) |
1292 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1293 |
self.mcpu.assertLogContainsRegex("mock error") |
1294 |
|
1295 |
@withLockedLU
|
1296 |
def testHvParams(self, lu): |
1297 |
lu._VerifyNode(self.master, self._GetValidNodeResult({ |
1298 |
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")] |
1299 |
})) |
1300 |
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
|
1301 |
self.mcpu.assertLogContainsRegex("mock item") |
1302 |
self.mcpu.assertLogContainsRegex("mock error") |
1303 |
|
1304 |
@withLockedLU
|
1305 |
def testSuccessfulResult(self, lu): |
1306 |
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({}))) |
1307 |
self.mcpu.assertLogIsEmpty()
|
1308 |
|
1309 |
|
1310 |
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods): |
1311 |
@withLockedLU
|
1312 |
def testInvalidNodeResult(self, lu): |
1313 |
for ndata in [{}, {constants.NV_TIME: "invalid"}]: |
1314 |
self.mcpu.ClearLogMessages()
|
1315 |
lu._VerifyNodeTime(self.master, ndata, None, None) |
1316 |
|
1317 |
self.mcpu.assertLogContainsRegex("Node returned invalid time") |
1318 |
|
1319 |
@withLockedLU
|
1320 |
def testNodeDiverges(self, lu): |
1321 |
for ntime in [(0, 0), (2000, 0)]: |
1322 |
self.mcpu.ClearLogMessages()
|
1323 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005) |
1324 |
|
1325 |
self.mcpu.assertLogContainsRegex("Node time diverges") |
1326 |
|
1327 |
@withLockedLU
|
1328 |
def testSuccessfulResult(self, lu): |
1329 |
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5) |
1330 |
self.mcpu.assertLogIsEmpty()
|
1331 |
|
1332 |
|
1333 |
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM( |
1334 |
TestLUClusterVerifyGroupMethods): |
1335 |
def setUp(self): |
1336 |
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp() |
1337 |
self.VALID_NRESULT = {
|
1338 |
constants.NV_VGLIST: {"mock_vg": 30000}, |
1339 |
constants.NV_PVLIST: [ |
1340 |
{ |
1341 |
"name": "mock_pv", |
1342 |
"vg_name": "mock_vg", |
1343 |
"size": 5000, |
1344 |
"free": 2500, |
1345 |
"attributes": [],
|
1346 |
"lv_list": []
|
1347 |
} |
1348 |
] |
1349 |
} |
1350 |
|
1351 |
@withLockedLU
|
1352 |
def testNoVgName(self, lu): |
1353 |
lu._UpdateVerifyNodeLVM(self.master, {}, None, None) |
1354 |
self.mcpu.assertLogIsEmpty()
|
1355 |
|
1356 |
@withLockedLU
|
1357 |
def testEmptyNodeResult(self, lu): |
1358 |
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None) |
1359 |
self.mcpu.assertLogContainsRegex("unable to check volume groups") |
1360 |
self.mcpu.assertLogContainsRegex("Can't get PV list from node") |
1361 |
|
1362 |
@withLockedLU
|
1363 |
def testValidNodeResult(self, lu): |
1364 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None) |
1365 |
self.mcpu.assertLogIsEmpty()
|
1366 |
|
1367 |
@withLockedLU
|
1368 |
def testValidNodeResultExclusiveStorage(self, lu): |
1369 |
lu._exclusive_storage = True
|
1370 |
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", |
1371 |
cluster.LUClusterVerifyGroup.NodeImage()) |
1372 |
self.mcpu.assertLogIsEmpty()
|
1373 |
|
1374 |
|
1375 |
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion( |
1376 |
TestLUClusterVerifyGroupMethods): |
1377 |
@withLockedLU
|
1378 |
def testEmptyNodeResult(self, lu): |
1379 |
lu._VerifyGroupDRBDVersion({}) |
1380 |
self.mcpu.assertLogIsEmpty()
|
1381 |
|
1382 |
@withLockedLU
|
1383 |
def testValidNodeResult(self, lu): |
1384 |
lu._VerifyGroupDRBDVersion( |
1385 |
RpcResultsBuilder() |
1386 |
.AddSuccessfulNode(self.master, {
|
1387 |
constants.NV_DRBDVERSION: "8.3.0"
|
1388 |
}) |
1389 |
.Build()) |
1390 |
self.mcpu.assertLogIsEmpty()
|
1391 |
|
1392 |
@withLockedLU
|
1393 |
def testDifferentVersions(self, lu): |
1394 |
node1 = self.cfg.AddNewNode()
|
1395 |
lu._VerifyGroupDRBDVersion( |
1396 |
RpcResultsBuilder() |
1397 |
.AddSuccessfulNode(self.master, {
|
1398 |
constants.NV_DRBDVERSION: "8.3.0"
|
1399 |
}) |
1400 |
.AddSuccessfulNode(node1, { |
1401 |
constants.NV_DRBDVERSION: "8.4.0"
|
1402 |
}) |
1403 |
.Build()) |
1404 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0") |
1405 |
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0") |
1406 |
|
1407 |
|
1408 |
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods): |
1409 |
@withLockedLU
|
1410 |
def testNoVgName(self, lu): |
1411 |
lu._VerifyGroupLVM(None, None) |
1412 |
self.mcpu.assertLogIsEmpty()
|
1413 |
|
1414 |
@withLockedLU
|
1415 |
def testNoExclusiveStorage(self, lu): |
1416 |
lu._VerifyGroupLVM(None, "mock_vg") |
1417 |
self.mcpu.assertLogIsEmpty()
|
1418 |
|
1419 |
@withLockedLU
|
1420 |
def testNoPvInfo(self, lu): |
1421 |
lu._exclusive_storage = True
|
1422 |
nimg = cluster.LUClusterVerifyGroup.NodeImage() |
1423 |
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg") |
1424 |
self.mcpu.assertLogIsEmpty()
|
1425 |
|
1426 |
@withLockedLU
|
1427 |
def testValidPvInfos(self, lu): |
1428 |
lu._exclusive_storage = True
|
1429 |
node2 = self.cfg.AddNewNode()
|
1430 |
nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
|
1431 |
nimg1.pv_min = 10000
|
1432 |
nimg1.pv_max = 10010
|
1433 |
nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1434 |
nimg2.pv_min = 9998
|
1435 |
nimg2.pv_max = 10005
|
1436 |
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg") |
1437 |
self.mcpu.assertLogIsEmpty()
|
1438 |
|
1439 |
|
1440 |
class TestLUClusterVerifyGroupVerifyNodeBridges( |
1441 |
TestLUClusterVerifyGroupMethods): |
1442 |
@withLockedLU
|
1443 |
def testNoBridges(self, lu): |
1444 |
lu._VerifyNodeBridges(None, None, None) |
1445 |
self.mcpu.assertLogIsEmpty()
|
1446 |
|
1447 |
@withLockedLU
|
1448 |
def testInvalidBridges(self, lu): |
1449 |
for ndata in [{}, {constants.NV_BRIDGES: ""}]: |
1450 |
self.mcpu.ClearLogMessages()
|
1451 |
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"]) |
1452 |
self.mcpu.assertLogContainsRegex("not return valid bridge information") |
1453 |
|
1454 |
self.mcpu.ClearLogMessages()
|
1455 |
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]}, |
1456 |
["mock_bridge"])
|
1457 |
self.mcpu.assertLogContainsRegex("missing bridge") |
1458 |
|
1459 |
|
1460 |
class TestLUClusterVerifyGroupVerifyNodeUserScripts( |
1461 |
TestLUClusterVerifyGroupMethods): |
1462 |
@withLockedLU
|
1463 |
def testNoUserScripts(self, lu): |
1464 |
lu._VerifyNodeUserScripts(self.master, {})
|
1465 |
self.mcpu.assertLogContainsRegex("did not return user scripts information") |
1466 |
|
1467 |
@withLockedLU
|
1468 |
def testBrokenUserScripts(self, lu): |
1469 |
lu._VerifyNodeUserScripts(self.master,
|
1470 |
{constants.NV_USERSCRIPTS: ["script"]})
|
1471 |
self.mcpu.assertLogContainsRegex("scripts not present or not executable") |
1472 |
|
1473 |
|
1474 |
class TestLUClusterVerifyGroupVerifyNodeNetwork( |
1475 |
TestLUClusterVerifyGroupMethods): |
1476 |
|
1477 |
def setUp(self): |
1478 |
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp() |
1479 |
self.VALID_NRESULT = {
|
1480 |
constants.NV_NODELIST: {}, |
1481 |
constants.NV_NODENETTEST: {}, |
1482 |
constants.NV_MASTERIP: True
|
1483 |
} |
1484 |
|
1485 |
@withLockedLU
|
1486 |
def testEmptyNodeResult(self, lu): |
1487 |
lu._VerifyNodeNetwork(self.master, {})
|
1488 |
self.mcpu.assertLogContainsRegex(
|
1489 |
"node hasn't returned node ssh connectivity data")
|
1490 |
self.mcpu.assertLogContainsRegex(
|
1491 |
"node hasn't returned node tcp connectivity data")
|
1492 |
self.mcpu.assertLogContainsRegex(
|
1493 |
"node hasn't returned node master IP reachability data")
|
1494 |
|
1495 |
@withLockedLU
|
1496 |
def testValidResult(self, lu): |
1497 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1498 |
self.mcpu.assertLogIsEmpty()
|
1499 |
|
1500 |
@withLockedLU
|
1501 |
def testSshProblem(self, lu): |
1502 |
self.VALID_NRESULT.update({
|
1503 |
constants.NV_NODELIST: { |
1504 |
"mock_node": "mock_error" |
1505 |
} |
1506 |
}) |
1507 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1508 |
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'") |
1509 |
|
1510 |
@withLockedLU
|
1511 |
def testTcpProblem(self, lu): |
1512 |
self.VALID_NRESULT.update({
|
1513 |
constants.NV_NODENETTEST: { |
1514 |
"mock_node": "mock_error" |
1515 |
} |
1516 |
}) |
1517 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1518 |
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'") |
1519 |
|
1520 |
@withLockedLU
|
1521 |
def testMasterIpNotReachable(self, lu): |
1522 |
self.VALID_NRESULT.update({
|
1523 |
constants.NV_MASTERIP: False
|
1524 |
}) |
1525 |
node1 = self.cfg.AddNewNode()
|
1526 |
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT) |
1527 |
self.mcpu.assertLogContainsRegex(
|
1528 |
"the master node cannot reach the master IP")
|
1529 |
|
1530 |
self.mcpu.ClearLogMessages()
|
1531 |
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
|
1532 |
self.mcpu.assertLogContainsRegex("cannot reach the master IP") |
1533 |
|
1534 |
|
1535 |
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods): |
1536 |
def setUp(self): |
1537 |
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp() |
1538 |
|
1539 |
self.node1 = self.cfg.AddNewNode() |
1540 |
self.drbd_inst = self.cfg.AddNewInstance( |
1541 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1542 |
primary_node=self.master,
|
1543 |
secondary_node=self.node1)])
|
1544 |
self.running_inst = self.cfg.AddNewInstance( |
1545 |
admin_state=constants.ADMINST_UP, disks_active=True)
|
1546 |
self.diskless_inst = self.cfg.AddNewInstance(disks=[]) |
1547 |
|
1548 |
self.master_img = \
|
1549 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1550 |
self.master_img.volumes = ["/".join(disk.logical_id) |
1551 |
for inst in [self.running_inst, |
1552 |
self.diskless_inst]
|
1553 |
for disk in inst.disks] |
1554 |
self.master_img.volumes.extend(
|
1555 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children]) |
1556 |
self.master_img.instances = [self.running_inst.uuid] |
1557 |
self.node1_img = \
|
1558 |
cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
|
1559 |
self.node1_img.volumes = \
|
1560 |
["/".join(disk.logical_id) for disk in self.drbd_inst.disks[0].children] |
1561 |
self.node_imgs = {
|
1562 |
self.master_uuid: self.master_img, |
1563 |
self.node1.uuid: self.node1_img |
1564 |
} |
1565 |
self.diskstatus = {
|
1566 |
self.master_uuid: [
|
1567 |
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
|
1568 |
for _ in self.running_inst.disks |
1569 |
] |
1570 |
} |
1571 |
|
1572 |
@withLockedLU
|
1573 |
def testDisklessInst(self, lu): |
1574 |
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {}) |
1575 |
self.mcpu.assertLogIsEmpty()
|
1576 |
|
1577 |
@withLockedLU
|
1578 |
def testOfflineNode(self, lu): |
1579 |
self.master_img.offline = True |
1580 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {}) |
1581 |
self.mcpu.assertLogIsEmpty()
|
1582 |
|
1583 |
@withLockedLU
|
1584 |
def testRunningOnOfflineNode(self, lu): |
1585 |
self.master_img.offline = True |
1586 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1587 |
self.mcpu.assertLogContainsRegex(
|
1588 |
"instance is marked as running and lives on offline node")
|
1589 |
|
1590 |
@withLockedLU
|
1591 |
def testMissingVolume(self, lu): |
1592 |
self.master_img.volumes = []
|
1593 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1594 |
self.mcpu.assertLogContainsRegex("volume .* missing") |
1595 |
|
1596 |
@withLockedLU
|
1597 |
def testRunningInstanceOnWrongNode(self, lu): |
1598 |
self.master_img.instances = []
|
1599 |
self.diskless_inst.admin_state = constants.ADMINST_UP
|
1600 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1601 |
self.mcpu.assertLogContainsRegex("instance not running on its primary node") |
1602 |
|
1603 |
@withLockedLU
|
1604 |
def testRunningInstanceOnRightNode(self, lu): |
1605 |
self.master_img.instances = [self.running_inst.uuid] |
1606 |
lu._VerifyInstance(self.running_inst, self.node_imgs, {}) |
1607 |
self.mcpu.assertLogIsEmpty()
|
1608 |
|
1609 |
@withLockedLU
|
1610 |
def testValidDiskStatus(self, lu): |
1611 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1612 |
self.mcpu.assertLogIsEmpty()
|
1613 |
|
1614 |
@withLockedLU
|
1615 |
def testDegradedDiskStatus(self, lu): |
1616 |
self.diskstatus[self.master_uuid][0][1].is_degraded = True |
1617 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1618 |
self.mcpu.assertLogContainsRegex("instance .* is degraded") |
1619 |
|
1620 |
@withLockedLU
|
1621 |
def testNotOkayDiskStatus(self, lu): |
1622 |
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY |
1623 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1624 |
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'") |
1625 |
|
1626 |
@withLockedLU
|
1627 |
def testExclusiveStorageWithInvalidInstance(self, lu): |
1628 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1629 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1630 |
self.mcpu.assertLogContainsRegex(
|
1631 |
"instance has template drbd, which is not supported")
|
1632 |
|
1633 |
@withLockedLU
|
1634 |
def testExclusiveStorageWithValidInstance(self, lu): |
1635 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
1636 |
self.running_inst.disks[0].spindles = 1 |
1637 |
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus) |
1638 |
self.mcpu.assertLogIsEmpty()
|
1639 |
|
1640 |
@withLockedLU
|
1641 |
def testDrbdInTwoGroups(self, lu): |
1642 |
group = self.cfg.AddNewNodeGroup()
|
1643 |
self.node1.group = group.uuid
|
1644 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1645 |
self.mcpu.assertLogContainsRegex(
|
1646 |
"instance has primary and secondary nodes in different groups")
|
1647 |
|
1648 |
@withLockedLU
|
1649 |
def testOfflineSecondary(self, lu): |
1650 |
self.node1_img.offline = True |
1651 |
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus) |
1652 |
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)") |
1653 |
|
1654 |
|
1655 |
class TestLUClusterVerifyGroupVerifyOrphanVolumes( |
1656 |
TestLUClusterVerifyGroupMethods): |
1657 |
@withLockedLU
|
1658 |
def testOrphanedVolume(self, lu): |
1659 |
master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1660 |
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"] |
1661 |
node_imgs = { |
1662 |
self.master_uuid: master_img
|
1663 |
} |
1664 |
node_vol_should = { |
1665 |
self.master_uuid: ["mock_vg/disk_0"] |
1666 |
} |
1667 |
|
1668 |
lu._VerifyOrphanVolumes(node_vol_should, node_imgs, |
1669 |
utils.FieldSet("mock_vg/disk_2"))
|
1670 |
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown") |
1671 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown") |
1672 |
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown") |
1673 |
|
1674 |
|
1675 |
class TestLUClusterVerifyGroupVerifyNPlusOneMemory( |
1676 |
TestLUClusterVerifyGroupMethods): |
1677 |
@withLockedLU
|
1678 |
def testN1Failure(self, lu): |
1679 |
group1 = self.cfg.AddNewNodeGroup()
|
1680 |
|
1681 |
node1 = self.cfg.AddNewNode()
|
1682 |
node2 = self.cfg.AddNewNode(group=group1)
|
1683 |
node3 = self.cfg.AddNewNode()
|
1684 |
|
1685 |
inst1 = self.cfg.AddNewInstance()
|
1686 |
inst2 = self.cfg.AddNewInstance()
|
1687 |
inst3 = self.cfg.AddNewInstance()
|
1688 |
|
1689 |
node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid) |
1690 |
node1_img.sbp = { |
1691 |
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
|
1692 |
} |
1693 |
|
1694 |
node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid) |
1695 |
|
1696 |
node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid) |
1697 |
node3_img.offline = True
|
1698 |
|
1699 |
node_imgs = { |
1700 |
node1.uuid: node1_img, |
1701 |
node2.uuid: node2_img, |
1702 |
node3.uuid: node3_img |
1703 |
} |
1704 |
|
1705 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1706 |
self.mcpu.assertLogContainsRegex(
|
1707 |
"not enough memory to accomodate instance failovers")
|
1708 |
|
1709 |
self.mcpu.ClearLogMessages()
|
1710 |
node1_img.mfree = 1000
|
1711 |
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
|
1712 |
self.mcpu.assertLogIsEmpty()
|
1713 |
|
1714 |
|
1715 |
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods): |
1716 |
@withLockedLU
|
1717 |
def test(self, lu): |
1718 |
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1719 |
vm_capable=True)
|
1720 |
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False) |
1721 |
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1722 |
vm_capable=True)
|
1723 |
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False, |
1724 |
vm_capable=True)
|
1725 |
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True) |
1726 |
|
1727 |
nodeinfo = [self.master, node1, node2, node3, node4, node5]
|
1728 |
files_all = set([
|
1729 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
1730 |
pathutils.RAPI_CERT_FILE, |
1731 |
pathutils.RAPI_USERS_FILE, |
1732 |
]) |
1733 |
files_opt = set([
|
1734 |
pathutils.RAPI_USERS_FILE, |
1735 |
hv_xen.XL_CONFIG_FILE, |
1736 |
pathutils.VNC_PASSWORD_FILE, |
1737 |
]) |
1738 |
files_mc = set([
|
1739 |
pathutils.CLUSTER_CONF_FILE, |
1740 |
]) |
1741 |
files_vm = set([
|
1742 |
hv_xen.XEND_CONFIG_FILE, |
1743 |
hv_xen.XL_CONFIG_FILE, |
1744 |
pathutils.VNC_PASSWORD_FILE, |
1745 |
]) |
1746 |
nvinfo = RpcResultsBuilder() \ |
1747 |
.AddSuccessfulNode(self.master, {
|
1748 |
constants.NV_FILELIST: { |
1749 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
1750 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
1751 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1752 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1753 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1754 |
}}) \ |
1755 |
.AddSuccessfulNode(node1, { |
1756 |
constants.NV_FILELIST: { |
1757 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1758 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
1759 |
} |
1760 |
}) \ |
1761 |
.AddSuccessfulNode(node2, { |
1762 |
constants.NV_FILELIST: { |
1763 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1764 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1765 |
} |
1766 |
}) \ |
1767 |
.AddSuccessfulNode(node3, { |
1768 |
constants.NV_FILELIST: { |
1769 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
1770 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
1771 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
1772 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
1773 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
1774 |
} |
1775 |
}) \ |
1776 |
.AddSuccessfulNode(node4, {}) \ |
1777 |
.AddOfflineNode(node5) \ |
1778 |
.Build() |
1779 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
1780 |
|
1781 |
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
|
1782 |
(files_all, files_opt, files_mc, files_vm)) |
1783 |
|
1784 |
expected_msgs = [ |
1785 |
"File %s found with 2 different checksums (variant 1 on"
|
1786 |
" %s, %s, %s; variant 2 on %s)" %
|
1787 |
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name, |
1788 |
self.master.name),
|
1789 |
"File %s is missing from node(s) %s" %
|
1790 |
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name), |
1791 |
"File %s should not exist on node(s) %s" %
|
1792 |
(pathutils.CLUSTER_CONF_FILE, node3.name), |
1793 |
"File %s is missing from node(s) %s" %
|
1794 |
(hv_xen.XEND_CONFIG_FILE, node3.name), |
1795 |
"File %s is missing from node(s) %s" %
|
1796 |
(pathutils.CLUSTER_CONF_FILE, node2.name), |
1797 |
"File %s found with 2 different checksums (variant 1 on"
|
1798 |
" %s; variant 2 on %s)" %
|
1799 |
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
|
1800 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1801 |
" found on %s, %s, %s)" %
|
1802 |
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
|
1803 |
"File %s is optional, but it must exist on all or no nodes (not"
|
1804 |
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
|
1805 |
"Node did not return file checksum data",
|
1806 |
] |
1807 |
|
1808 |
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs)) |
1809 |
for expected_msg in expected_msgs: |
1810 |
self.mcpu.assertLogContainsInLine(expected_msg)
|
1811 |
|
1812 |
|
1813 |
class TestLUClusterVerifyGroupVerifyNodeDrbd(TestLUClusterVerifyGroupMethods): |
1814 |
def setUp(self): |
1815 |
super(TestLUClusterVerifyGroupVerifyNodeDrbd, self).setUp() |
1816 |
|
1817 |
self.node1 = self.cfg.AddNewNode() |
1818 |
self.node2 = self.cfg.AddNewNode() |
1819 |
self.inst = self.cfg.AddNewInstance( |
1820 |
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
|
1821 |
primary_node=self.node1,
|
1822 |
secondary_node=self.node2)],
|
1823 |
admin_state=constants.ADMINST_UP) |
1824 |
|
1825 |
@withLockedLU
|
1826 |
def testNoDrbdHelper(self, lu): |
1827 |
lu._VerifyNodeDrbd(self.master, {}, self.cfg.GetAllInstancesInfo(), None, |
1828 |
self.cfg.ComputeDRBDMap())
|
1829 |
self.mcpu.assertLogIsEmpty()
|
1830 |
|
1831 |
@withLockedLU
|
1832 |
def testDrbdHelperInvalidNodeResult(self, lu): |
1833 |
for ndata, expected in [({}, "no drbd usermode helper returned"), |
1834 |
({constants.NV_DRBDHELPER: (False, "")}, |
1835 |
"drbd usermode helper check unsuccessful"),
|
1836 |
({constants.NV_DRBDHELPER: (True, "/bin/false")}, |
1837 |
"wrong drbd usermode helper")]:
|
1838 |
self.mcpu.ClearLogMessages()
|
1839 |
lu._VerifyNodeDrbd(self.master, ndata, self.cfg.GetAllInstancesInfo(), |
1840 |
"/bin/true", self.cfg.ComputeDRBDMap()) |
1841 |
self.mcpu.assertLogContainsRegex(expected)
|
1842 |
|
1843 |
@withLockedLU
|
1844 |
def testNoNodeResult(self, lu): |
1845 |
lu._VerifyNodeDrbd(self.node1, {}, self.cfg.GetAllInstancesInfo(), |
1846 |
None, self.cfg.ComputeDRBDMap()) |
1847 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1848 |
|
1849 |
@withLockedLU
|
1850 |
def testInvalidNodeResult(self, lu): |
1851 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: ""}, |
1852 |
self.cfg.GetAllInstancesInfo(), None, |
1853 |
self.cfg.ComputeDRBDMap())
|
1854 |
self.mcpu.assertLogContainsRegex("cannot parse drbd status file") |
1855 |
|
1856 |
@withLockedLU
|
1857 |
def testWrongMinorInUse(self, lu): |
1858 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [2]}, |
1859 |
self.cfg.GetAllInstancesInfo(), None, |
1860 |
self.cfg.ComputeDRBDMap())
|
1861 |
self.mcpu.assertLogContainsRegex("drbd minor 1 of .* is not active") |
1862 |
self.mcpu.assertLogContainsRegex("unallocated drbd minor 2 is in use") |
1863 |
|
1864 |
@withLockedLU
|
1865 |
def testValidResult(self, lu): |
1866 |
lu._VerifyNodeDrbd(self.node1, {constants.NV_DRBDLIST: [1]}, |
1867 |
self.cfg.GetAllInstancesInfo(), None, |
1868 |
self.cfg.ComputeDRBDMap())
|
1869 |
self.mcpu.assertLogIsEmpty()
|
1870 |
|
1871 |
|
1872 |
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods): |
1873 |
@withLockedLU
|
1874 |
def testUpdateNodeOsInvalidNodeResult(self, lu): |
1875 |
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]}, |
1876 |
{constants.NV_OSLIST: [["1", "2"]]}]: |
1877 |
self.mcpu.ClearLogMessages()
|
1878 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1879 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1880 |
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data") |
1881 |
|
1882 |
@withLockedLU
|
1883 |
def testUpdateNodeOsValidNodeResult(self, lu): |
1884 |
ndata = { |
1885 |
constants.NV_OSLIST: [ |
1886 |
["mock_OS", "/mocked/path", True, "", ["default"], [], |
1887 |
[constants.OS_API_V20]], |
1888 |
["Another_Mock", "/random", True, "", ["var1", "var2"], |
1889 |
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS] |
1890 |
] |
1891 |
} |
1892 |
nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1893 |
lu._UpdateNodeOS(self.master, ndata, nimage)
|
1894 |
self.mcpu.assertLogIsEmpty()
|
1895 |
|
1896 |
@withLockedLU
|
1897 |
def testVerifyNodeOs(self, lu): |
1898 |
node = self.cfg.AddNewNode()
|
1899 |
nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
|
1900 |
nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid) |
1901 |
|
1902 |
nimg_root.os_fail = False
|
1903 |
nimg_root.oslist = { |
1904 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1905 |
set([constants.OS_API_V20]))],
|
1906 |
"broken_base_os": [("/broken", False, "", set(), set(), |
1907 |
set([constants.OS_API_V20]))],
|
1908 |
"only_on_root": [("/random", True, "", set(), set(), set())], |
1909 |
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]), |
1910 |
set([("param1", "val1"), ("param2", "val2")]), |
1911 |
set([constants.OS_API_V20]))]
|
1912 |
} |
1913 |
nimg.os_fail = False
|
1914 |
nimg.oslist = { |
1915 |
"mock_os": [("/mocked/path", True, "", set(["default"]), set(), |
1916 |
set([constants.OS_API_V20]))],
|
1917 |
"only_on_test": [("/random", True, "", set(), set(), set())], |
1918 |
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]), |
1919 |
set([("param1", "val1"), ("param3", "val3")]), |
1920 |
set([constants.OS_API_V15]))],
|
1921 |
"broken_os": [("/broken", False, "", set(), set(), |
1922 |
set([constants.OS_API_V20]))],
|
1923 |
"multi_entries": [
|
1924 |
("/multi1", True, "", set(), set(), set([constants.OS_API_V20])), |
1925 |
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]))] |
1926 |
} |
1927 |
|
1928 |
lu._VerifyNodeOS(node, nimg, nimg_root) |
1929 |
|
1930 |
expected_msgs = [ |
1931 |
"Extra OS only_on_test not present on reference node",
|
1932 |
"OSes present on reference node .* but missing on this node:" +
|
1933 |
" only_on_root",
|
1934 |
"OS API version for diffing_os differs",
|
1935 |
"OS variants list for diffing_os differs",
|
1936 |
"OS parameters for diffing_os differs",
|
1937 |
"Invalid OS broken_os",
|
1938 |
"Extra OS broken_os not present on reference node",
|
1939 |
"OS 'multi_entries' has multiple entries",
|
1940 |
"Extra OS multi_entries not present on reference node"
|
1941 |
] |
1942 |
|
1943 |
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages())) |
1944 |
for expected_msg in expected_msgs: |
1945 |
self.mcpu.assertLogContainsRegex(expected_msg)
|
1946 |
|
1947 |
|
1948 |
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths( |
1949 |
TestLUClusterVerifyGroupMethods): |
1950 |
@withLockedLU
|
1951 |
def testNotMaster(self, lu): |
1952 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False) |
1953 |
self.mcpu.assertLogIsEmpty()
|
1954 |
|
1955 |
@withLockedLU
|
1956 |
def testNotMasterButRetunedValue(self, lu): |
1957 |
lu._VerifyAcceptedFileStoragePaths( |
1958 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False) |
1959 |
self.mcpu.assertLogContainsRegex(
|
1960 |
"Node should not have returned forbidden file storage paths")
|
1961 |
|
1962 |
@withLockedLU
|
1963 |
def testMasterInvalidNodeResult(self, lu): |
1964 |
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True) |
1965 |
self.mcpu.assertLogContainsRegex(
|
1966 |
"Node did not return forbidden file storage paths")
|
1967 |
|
1968 |
@withLockedLU
|
1969 |
def testMasterForbiddenPaths(self, lu): |
1970 |
lu._VerifyAcceptedFileStoragePaths( |
1971 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True) |
1972 |
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths") |
1973 |
|
1974 |
@withLockedLU
|
1975 |
def testMasterSuccess(self, lu): |
1976 |
lu._VerifyAcceptedFileStoragePaths( |
1977 |
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True) |
1978 |
self.mcpu.assertLogIsEmpty()
|
1979 |
|
1980 |
|
1981 |
class TestLUClusterVerifyGroupVerifyStoragePaths( |
1982 |
TestLUClusterVerifyGroupMethods): |
1983 |
@withLockedLU
|
1984 |
def testVerifyFileStoragePathsSuccess(self, lu): |
1985 |
lu._VerifyFileStoragePaths(self.master, {})
|
1986 |
self.mcpu.assertLogIsEmpty()
|
1987 |
|
1988 |
@withLockedLU
|
1989 |
def testVerifyFileStoragePathsFailure(self, lu): |
1990 |
lu._VerifyFileStoragePaths(self.master,
|
1991 |
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
|
1992 |
self.mcpu.assertLogContainsRegex(
|
1993 |
"The configured file storage path is unusable")
|
1994 |
|
1995 |
@withLockedLU
|
1996 |
def testVerifySharedFileStoragePathsSuccess(self, lu): |
1997 |
lu._VerifySharedFileStoragePaths(self.master, {})
|
1998 |
self.mcpu.assertLogIsEmpty()
|
1999 |
|
2000 |
@withLockedLU
|
2001 |
def testVerifySharedFileStoragePathsFailure(self, lu): |
2002 |
lu._VerifySharedFileStoragePaths( |
2003 |
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"}) |
2004 |
self.mcpu.assertLogContainsRegex(
|
2005 |
"The configured sharedfile storage path is unusable")
|
2006 |
|
2007 |
|
2008 |
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods): |
2009 |
@withLockedLU
|
2010 |
def testEmptyResult(self, lu): |
2011 |
lu._VerifyOob(self.master, {})
|
2012 |
self.mcpu.assertLogIsEmpty()
|
2013 |
|
2014 |
@withLockedLU
|
2015 |
def testErrorResults(self, lu): |
2016 |
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]}) |
2017 |
self.mcpu.assertLogContainsRegex("path1") |
2018 |
self.mcpu.assertLogContainsRegex("path2") |
2019 |
|
2020 |
|
2021 |
class TestLUClusterVerifyGroupUpdateNodeVolumes( |
2022 |
TestLUClusterVerifyGroupMethods): |
2023 |
def setUp(self): |
2024 |
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp() |
2025 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
2026 |
|
2027 |
@withLockedLU
|
2028 |
def testNoVgName(self, lu): |
2029 |
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None) |
2030 |
self.mcpu.assertLogIsEmpty()
|
2031 |
self.assertTrue(self.nimg.lvm_fail) |
2032 |
|
2033 |
@withLockedLU
|
2034 |
def testErrorMessage(self, lu): |
2035 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"}, |
2036 |
self.nimg, "mock_vg") |
2037 |
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error") |
2038 |
self.assertTrue(self.nimg.lvm_fail) |
2039 |
|
2040 |
@withLockedLU
|
2041 |
def testInvalidNodeResult(self, lu): |
2042 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]}, |
2043 |
self.nimg, "mock_vg") |
2044 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
2045 |
self.assertTrue(self.nimg.lvm_fail) |
2046 |
|
2047 |
@withLockedLU
|
2048 |
def testValidNodeResult(self, lu): |
2049 |
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
|
2050 |
self.nimg, "mock_vg") |
2051 |
self.mcpu.assertLogIsEmpty()
|
2052 |
self.assertFalse(self.nimg.lvm_fail) |
2053 |
|
2054 |
|
2055 |
class TestLUClusterVerifyGroupUpdateNodeInstances( |
2056 |
TestLUClusterVerifyGroupMethods): |
2057 |
def setUp(self): |
2058 |
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp() |
2059 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
2060 |
|
2061 |
@withLockedLU
|
2062 |
def testInvalidNodeResult(self, lu): |
2063 |
lu._UpdateNodeInstances(self.master, {}, self.nimg) |
2064 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
2065 |
|
2066 |
@withLockedLU
|
2067 |
def testValidNodeResult(self, lu): |
2068 |
inst = self.cfg.AddNewInstance()
|
2069 |
lu._UpdateNodeInstances(self.master,
|
2070 |
{constants.NV_INSTANCELIST: [inst.name]}, |
2071 |
self.nimg)
|
2072 |
self.mcpu.assertLogIsEmpty()
|
2073 |
|
2074 |
|
2075 |
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods): |
2076 |
def setUp(self): |
2077 |
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp() |
2078 |
self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid) |
2079 |
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}} |
2080 |
|
2081 |
@withLockedLU
|
2082 |
def testInvalidHvNodeResult(self, lu): |
2083 |
for ndata in [{}, {constants.NV_HVINFO: ""}]: |
2084 |
self.mcpu.ClearLogMessages()
|
2085 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None) |
2086 |
self.mcpu.assertLogContainsRegex("rpc call to node failed") |
2087 |
|
2088 |
@withLockedLU
|
2089 |
def testInvalidMemoryFreeHvNodeResult(self, lu): |
2090 |
lu._UpdateNodeInfo(self.master,
|
2091 |
{constants.NV_HVINFO: {"memory_free": "abc"}}, |
2092 |
self.nimg, None) |
2093 |
self.mcpu.assertLogContainsRegex(
|
2094 |
"node returned invalid nodeinfo, check hypervisor")
|
2095 |
|
2096 |
@withLockedLU
|
2097 |
def testValidHvNodeResult(self, lu): |
2098 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None) |
2099 |
self.mcpu.assertLogIsEmpty()
|
2100 |
|
2101 |
@withLockedLU
|
2102 |
def testInvalidVgNodeResult(self, lu): |
2103 |
for vgdata in [[], ""]: |
2104 |
self.mcpu.ClearLogMessages()
|
2105 |
ndata = {constants.NV_VGLIST: vgdata} |
2106 |
ndata.update(self.valid_hvresult)
|
2107 |
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg") |
2108 |
self.mcpu.assertLogContainsRegex(
|
2109 |
"node didn't return data for the volume group 'mock_vg'")
|
2110 |
|
2111 |
@withLockedLU
|
2112 |
def testInvalidDiskFreeVgNodeResult(self, lu): |
2113 |
self.valid_hvresult.update({
|
2114 |
constants.NV_VGLIST: {"mock_vg": "abc"} |
2115 |
}) |
2116 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
2117 |
self.mcpu.assertLogContainsRegex(
|
2118 |
"node returned invalid LVM info, check LVM status")
|
2119 |
|
2120 |
@withLockedLU
|
2121 |
def testValidVgNodeResult(self, lu): |
2122 |
self.valid_hvresult.update({
|
2123 |
constants.NV_VGLIST: {"mock_vg": 10000} |
2124 |
}) |
2125 |
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg") |
2126 |
self.mcpu.assertLogIsEmpty()
|
2127 |
|
2128 |
|
2129 |
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods): |
2130 |
def setUp(self): |
2131 |
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp() |
2132 |
|
2133 |
self.node1 = self.cfg.AddNewNode() |
2134 |
self.node2 = self.cfg.AddNewNode() |
2135 |
self.node3 = self.cfg.AddNewNode() |
2136 |
|
2137 |
self.diskless_inst = \
|
2138 |
self.cfg.AddNewInstance(primary_node=self.node1, |
2139 |
disk_template=constants.DT_DISKLESS) |
2140 |
self.plain_inst = \
|
2141 |
self.cfg.AddNewInstance(primary_node=self.node2, |
2142 |
disk_template=constants.DT_PLAIN) |
2143 |
self.drbd_inst = \
|
2144 |
self.cfg.AddNewInstance(primary_node=self.node3, |
2145 |
secondary_node=self.node2,
|
2146 |
disk_template=constants.DT_DRBD8) |
2147 |
|
2148 |
self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2149 |
uuid=self.node1.uuid)
|
2150 |
self.node1_img.pinst = [self.diskless_inst.uuid] |
2151 |
self.node1_img.sinst = []
|
2152 |
self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2153 |
uuid=self.node2.uuid)
|
2154 |
self.node2_img.pinst = [self.plain_inst.uuid] |
2155 |
self.node2_img.sinst = [self.drbd_inst.uuid] |
2156 |
self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
|
2157 |
uuid=self.node3.uuid)
|
2158 |
self.node3_img.pinst = [self.drbd_inst.uuid] |
2159 |
self.node3_img.sinst = []
|
2160 |
|
2161 |
self.node_images = {
|
2162 |
self.node1.uuid: self.node1_img, |
2163 |
self.node2.uuid: self.node2_img, |
2164 |
self.node3.uuid: self.node3_img |
2165 |
} |
2166 |
|
2167 |
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid] |
2168 |
|
2169 |
@withLockedLU
|
2170 |
def testSuccessfulRun(self, lu): |
2171 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2172 |
RpcResultsBuilder() \ |
2173 |
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \ |
2174 |
.AddSuccessfulNode(self.node3, [(True, "")]) \ |
2175 |
.Build() |
2176 |
|
2177 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2178 |
self.cfg.GetAllInstancesInfo())
|
2179 |
|
2180 |
self.mcpu.assertLogIsEmpty()
|
2181 |
|
2182 |
@withLockedLU
|
2183 |
def testOfflineAndFailingNodes(self, lu): |
2184 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2185 |
RpcResultsBuilder() \ |
2186 |
.AddOfflineNode(self.node2) \
|
2187 |
.AddFailedNode(self.node3) \
|
2188 |
.Build() |
2189 |
|
2190 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2191 |
self.cfg.GetAllInstancesInfo())
|
2192 |
|
2193 |
self.mcpu.assertLogContainsRegex("while getting disk information") |
2194 |
|
2195 |
@withLockedLU
|
2196 |
def testInvalidNodeResult(self, lu): |
2197 |
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
|
2198 |
RpcResultsBuilder() \ |
2199 |
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \ |
2200 |
.AddSuccessfulNode(self.node3, [""]) \ |
2201 |
.Build() |
2202 |
|
2203 |
lu._CollectDiskInfo(self.node_uuids, self.node_images, |
2204 |
self.cfg.GetAllInstancesInfo())
|
2205 |
# logging is not performed through mcpu
|
2206 |
self.mcpu.assertLogIsEmpty()
|
2207 |
|
2208 |
|
2209 |
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods): |
2210 |
def setUp(self): |
2211 |
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp() |
2212 |
|
2213 |
self.feedback_fn = lambda _: None |
2214 |
|
2215 |
def PrepareLU(self, lu): |
2216 |
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu) |
2217 |
|
2218 |
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys()) |
2219 |
|
2220 |
@withLockedLU
|
2221 |
def testEmptyGroup(self, lu): |
2222 |
lu.my_node_uuids = [] |
2223 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None) |
2224 |
|
2225 |
@withLockedLU
|
2226 |
def testFailedResult(self, lu): |
2227 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2228 |
RpcResultsBuilder(use_node_names=True)
|
2229 |
.AddFailedNode(self.master).Build(),
|
2230 |
self.feedback_fn,
|
2231 |
None)
|
2232 |
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution") |
2233 |
|
2234 |
@withLockedLU
|
2235 |
def testOfflineNode(self, lu): |
2236 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2237 |
RpcResultsBuilder(use_node_names=True)
|
2238 |
.AddOfflineNode(self.master).Build(),
|
2239 |
self.feedback_fn,
|
2240 |
None)
|
2241 |
|
2242 |
@withLockedLU
|
2243 |
def testValidResult(self, lu): |
2244 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2245 |
RpcResultsBuilder(use_node_names=True)
|
2246 |
.AddSuccessfulNode(self.master,
|
2247 |
[("mock_script",
|
2248 |
constants.HKR_SUCCESS, |
2249 |
"mock output")])
|
2250 |
.Build(), |
2251 |
self.feedback_fn,
|
2252 |
None)
|
2253 |
|
2254 |
@withLockedLU
|
2255 |
def testFailedScriptResult(self, lu): |
2256 |
lu.HooksCallBack(constants.HOOKS_PHASE_POST, |
2257 |
RpcResultsBuilder(use_node_names=True)
|
2258 |
.AddSuccessfulNode(self.master,
|
2259 |
[("mock_script",
|
2260 |
constants.HKR_FAIL, |
2261 |
"mock output")])
|
2262 |
.Build(), |
2263 |
self.feedback_fn,
|
2264 |
None)
|
2265 |
self.mcpu.assertLogContainsRegex("Script mock_script failed") |
2266 |
|
2267 |
|
2268 |
class TestLUClusterVerifyDisks(CmdlibTestCase): |
2269 |
def testVerifyDisks(self): |
2270 |
op = opcodes.OpClusterVerifyDisks() |
2271 |
result = self.ExecOpCode(op)
|
2272 |
|
2273 |
self.assertEqual(1, len(result["jobs"])) |
2274 |
|
2275 |
|
2276 |
if __name__ == "__main__": |
2277 |
testutils.GanetiTestProgram() |