root / test / py / cmdlib / cluster_unittest.py @ 850be460
History | View | Annotate | Download (37.3 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2008, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Tests for LUCluster*
|
23 |
|
24 |
"""
|
25 |
|
26 |
import OpenSSL |
27 |
|
28 |
import unittest |
29 |
import operator |
30 |
import os |
31 |
import tempfile |
32 |
import shutil |
33 |
|
34 |
from ganeti import constants |
35 |
from ganeti import compat |
36 |
from ganeti import errors |
37 |
from ganeti import ht |
38 |
from ganeti import netutils |
39 |
from ganeti import objects |
40 |
from ganeti import opcodes |
41 |
from ganeti import utils |
42 |
from ganeti import pathutils |
43 |
from ganeti import rpc |
44 |
from ganeti import query |
45 |
from ganeti.cmdlib import cluster |
46 |
from ganeti.hypervisor import hv_xen |
47 |
|
48 |
from testsupport import * |
49 |
|
50 |
import testutils |
51 |
import mocks |
52 |
|
53 |
|
54 |
class TestCertVerification(testutils.GanetiTestCase): |
55 |
def setUp(self): |
56 |
testutils.GanetiTestCase.setUp(self)
|
57 |
|
58 |
self.tmpdir = tempfile.mkdtemp()
|
59 |
|
60 |
def tearDown(self): |
61 |
shutil.rmtree(self.tmpdir)
|
62 |
|
63 |
def testVerifyCertificate(self): |
64 |
cluster._VerifyCertificate(testutils.TestDataFilename("cert1.pem"))
|
65 |
|
66 |
nonexist_filename = os.path.join(self.tmpdir, "does-not-exist") |
67 |
|
68 |
(errcode, msg) = cluster._VerifyCertificate(nonexist_filename) |
69 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
70 |
|
71 |
# Try to load non-certificate file
|
72 |
invalid_cert = testutils.TestDataFilename("bdev-net.txt")
|
73 |
(errcode, msg) = cluster._VerifyCertificate(invalid_cert) |
74 |
self.assertEqual(errcode, cluster.LUClusterVerifyConfig.ETYPE_ERROR)
|
75 |
|
76 |
|
77 |
class TestClusterVerifySsh(unittest.TestCase): |
78 |
def testMultipleGroups(self): |
79 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
80 |
mygroupnodes = [ |
81 |
objects.Node(name="node20", group="my", offline=False), |
82 |
objects.Node(name="node21", group="my", offline=False), |
83 |
objects.Node(name="node22", group="my", offline=False), |
84 |
objects.Node(name="node23", group="my", offline=False), |
85 |
objects.Node(name="node24", group="my", offline=False), |
86 |
objects.Node(name="node25", group="my", offline=False), |
87 |
objects.Node(name="node26", group="my", offline=True), |
88 |
] |
89 |
nodes = [ |
90 |
objects.Node(name="node1", group="g1", offline=True), |
91 |
objects.Node(name="node2", group="g1", offline=False), |
92 |
objects.Node(name="node3", group="g1", offline=False), |
93 |
objects.Node(name="node4", group="g1", offline=True), |
94 |
objects.Node(name="node5", group="g1", offline=False), |
95 |
objects.Node(name="node10", group="xyz", offline=False), |
96 |
objects.Node(name="node11", group="xyz", offline=False), |
97 |
objects.Node(name="node40", group="alloff", offline=True), |
98 |
objects.Node(name="node41", group="alloff", offline=True), |
99 |
objects.Node(name="node50", group="aaa", offline=False), |
100 |
] + mygroupnodes |
101 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
102 |
|
103 |
(online, perhost) = fn(mygroupnodes, "my", nodes)
|
104 |
self.assertEqual(online, ["node%s" % i for i in range(20, 26)]) |
105 |
self.assertEqual(set(perhost.keys()), set(online)) |
106 |
|
107 |
self.assertEqual(perhost, {
|
108 |
"node20": ["node10", "node2", "node50"], |
109 |
"node21": ["node11", "node3", "node50"], |
110 |
"node22": ["node10", "node5", "node50"], |
111 |
"node23": ["node11", "node2", "node50"], |
112 |
"node24": ["node10", "node3", "node50"], |
113 |
"node25": ["node11", "node5", "node50"], |
114 |
}) |
115 |
|
116 |
def testSingleGroup(self): |
117 |
fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes |
118 |
nodes = [ |
119 |
objects.Node(name="node1", group="default", offline=True), |
120 |
objects.Node(name="node2", group="default", offline=False), |
121 |
objects.Node(name="node3", group="default", offline=False), |
122 |
objects.Node(name="node4", group="default", offline=True), |
123 |
] |
124 |
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes)) |
125 |
|
126 |
(online, perhost) = fn(nodes, "default", nodes)
|
127 |
self.assertEqual(online, ["node2", "node3"]) |
128 |
self.assertEqual(set(perhost.keys()), set(online)) |
129 |
|
130 |
self.assertEqual(perhost, {
|
131 |
"node2": [],
|
132 |
"node3": [],
|
133 |
}) |
134 |
|
135 |
|
136 |
class TestClusterVerifyFiles(unittest.TestCase): |
137 |
@staticmethod
|
138 |
def _FakeErrorIf(errors, cond, ecode, item, msg, *args, **kwargs): |
139 |
assert ((ecode == constants.CV_ENODEFILECHECK and |
140 |
ht.TNonEmptyString(item)) or
|
141 |
(ecode == constants.CV_ECLUSTERFILECHECK and
|
142 |
item is None)) |
143 |
|
144 |
if args:
|
145 |
msg = msg % args |
146 |
|
147 |
if cond:
|
148 |
errors.append((item, msg)) |
149 |
|
150 |
def test(self): |
151 |
errors = [] |
152 |
nodeinfo = [ |
153 |
objects.Node(name="master.example.com",
|
154 |
uuid="master-uuid",
|
155 |
offline=False,
|
156 |
vm_capable=True),
|
157 |
objects.Node(name="node2.example.com",
|
158 |
uuid="node2-uuid",
|
159 |
offline=False,
|
160 |
vm_capable=True),
|
161 |
objects.Node(name="node3.example.com",
|
162 |
uuid="node3-uuid",
|
163 |
master_candidate=True,
|
164 |
vm_capable=False),
|
165 |
objects.Node(name="node4.example.com",
|
166 |
uuid="node4-uuid",
|
167 |
offline=False,
|
168 |
vm_capable=True),
|
169 |
objects.Node(name="nodata.example.com",
|
170 |
uuid="nodata-uuid",
|
171 |
offline=False,
|
172 |
vm_capable=True),
|
173 |
objects.Node(name="offline.example.com",
|
174 |
uuid="offline-uuid",
|
175 |
offline=True),
|
176 |
] |
177 |
files_all = set([
|
178 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
179 |
pathutils.RAPI_CERT_FILE, |
180 |
pathutils.RAPI_USERS_FILE, |
181 |
]) |
182 |
files_opt = set([
|
183 |
pathutils.RAPI_USERS_FILE, |
184 |
hv_xen.XL_CONFIG_FILE, |
185 |
pathutils.VNC_PASSWORD_FILE, |
186 |
]) |
187 |
files_mc = set([
|
188 |
pathutils.CLUSTER_CONF_FILE, |
189 |
]) |
190 |
files_vm = set([
|
191 |
hv_xen.XEND_CONFIG_FILE, |
192 |
hv_xen.XL_CONFIG_FILE, |
193 |
pathutils.VNC_PASSWORD_FILE, |
194 |
]) |
195 |
nvinfo = { |
196 |
"master-uuid": rpc.RpcResult(data=(True, { |
197 |
constants.NV_FILELIST: { |
198 |
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
|
199 |
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
|
200 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
201 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
202 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
203 |
}})), |
204 |
"node2-uuid": rpc.RpcResult(data=(True, { |
205 |
constants.NV_FILELIST: { |
206 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
207 |
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
|
208 |
} |
209 |
})), |
210 |
"node3-uuid": rpc.RpcResult(data=(True, { |
211 |
constants.NV_FILELIST: { |
212 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
213 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
214 |
} |
215 |
})), |
216 |
"node4-uuid": rpc.RpcResult(data=(True, { |
217 |
constants.NV_FILELIST: { |
218 |
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
|
219 |
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
|
220 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
|
221 |
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
|
222 |
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
|
223 |
} |
224 |
})), |
225 |
"nodata-uuid": rpc.RpcResult(data=(True, {})), |
226 |
"offline-uuid": rpc.RpcResult(offline=True), |
227 |
} |
228 |
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo)) |
229 |
|
230 |
verify_lu = cluster.LUClusterVerifyGroup(mocks.FakeProc(), |
231 |
opcodes.OpClusterVerify(), |
232 |
mocks.FakeContext(), |
233 |
None)
|
234 |
|
235 |
verify_lu._ErrorIf = compat.partial(self._FakeErrorIf, errors)
|
236 |
|
237 |
# TODO: That's a bit hackish to mock only this single method. We should
|
238 |
# build a better FakeConfig which provides such a feature already.
|
239 |
def GetNodeName(node_uuid): |
240 |
for node in nodeinfo: |
241 |
if node.uuid == node_uuid:
|
242 |
return node.name
|
243 |
return None |
244 |
|
245 |
verify_lu.cfg.GetNodeName = GetNodeName |
246 |
|
247 |
verify_lu._VerifyFiles(nodeinfo, "master-uuid", nvinfo,
|
248 |
(files_all, files_opt, files_mc, files_vm)) |
249 |
self.assertEqual(sorted(errors), sorted([ |
250 |
(None, ("File %s found with 2 different checksums (variant 1 on" |
251 |
" node2.example.com, node3.example.com, node4.example.com;"
|
252 |
" variant 2 on master.example.com)" % pathutils.RAPI_CERT_FILE)),
|
253 |
(None, ("File %s is missing from node(s) node2.example.com" % |
254 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE)), |
255 |
(None, ("File %s should not exist on node(s) node4.example.com" % |
256 |
pathutils.CLUSTER_CONF_FILE)), |
257 |
(None, ("File %s is missing from node(s) node4.example.com" % |
258 |
hv_xen.XEND_CONFIG_FILE)), |
259 |
(None, ("File %s is missing from node(s) node3.example.com" % |
260 |
pathutils.CLUSTER_CONF_FILE)), |
261 |
(None, ("File %s found with 2 different checksums (variant 1 on" |
262 |
" master.example.com; variant 2 on node4.example.com)" %
|
263 |
pathutils.CLUSTER_CONF_FILE)), |
264 |
(None, ("File %s is optional, but it must exist on all or no nodes (not" |
265 |
" found on master.example.com, node2.example.com,"
|
266 |
" node3.example.com)" % pathutils.RAPI_USERS_FILE)),
|
267 |
(None, ("File %s is optional, but it must exist on all or no nodes (not" |
268 |
" found on node2.example.com)" % hv_xen.XL_CONFIG_FILE)),
|
269 |
("nodata.example.com", "Node did not return file checksum data"), |
270 |
])) |
271 |
|
272 |
|
273 |
class TestLUClusterActivateMasterIp(CmdlibTestCase): |
274 |
def testSuccess(self): |
275 |
op = opcodes.OpClusterActivateMasterIp() |
276 |
|
277 |
self.rpc.call_node_activate_master_ip.return_value = \
|
278 |
self.RpcResultsBuilder() \
|
279 |
.CreateSuccessfulNodeResult(self.master)
|
280 |
|
281 |
self.ExecOpCode(op)
|
282 |
|
283 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
284 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
285 |
|
286 |
def testFailure(self): |
287 |
op = opcodes.OpClusterActivateMasterIp() |
288 |
|
289 |
self.rpc.call_node_activate_master_ip.return_value = \
|
290 |
self.RpcResultsBuilder() \
|
291 |
.CreateFailedNodeResult(self.master) \
|
292 |
|
293 |
self.ExecOpCodeExpectOpExecError(op)
|
294 |
|
295 |
|
296 |
class TestLUClusterDeactivateMasterIp(CmdlibTestCase): |
297 |
def testSuccess(self): |
298 |
op = opcodes.OpClusterDeactivateMasterIp() |
299 |
|
300 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
301 |
self.RpcResultsBuilder() \
|
302 |
.CreateSuccessfulNodeResult(self.master)
|
303 |
|
304 |
self.ExecOpCode(op)
|
305 |
|
306 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
307 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
308 |
|
309 |
def testFailure(self): |
310 |
op = opcodes.OpClusterDeactivateMasterIp() |
311 |
|
312 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
313 |
self.RpcResultsBuilder() \
|
314 |
.CreateFailedNodeResult(self.master) \
|
315 |
|
316 |
self.ExecOpCodeExpectOpExecError(op)
|
317 |
|
318 |
|
319 |
class TestLUClusterConfigQuery(CmdlibTestCase): |
320 |
def testInvalidField(self): |
321 |
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
|
322 |
|
323 |
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny") |
324 |
|
325 |
def testAllFields(self): |
326 |
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys()) |
327 |
|
328 |
self.rpc.call_get_watcher_pause.return_value = \
|
329 |
self.RpcResultsBuilder() \
|
330 |
.CreateSuccessfulNodeResult(self.master, -1) |
331 |
|
332 |
ret = self.ExecOpCode(op)
|
333 |
|
334 |
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count) |
335 |
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS)) |
336 |
|
337 |
def testEmpytFields(self): |
338 |
op = opcodes.OpClusterConfigQuery(output_fields=[]) |
339 |
|
340 |
self.ExecOpCode(op)
|
341 |
|
342 |
self.assertFalse(self.rpc.call_get_watcher_pause.called) |
343 |
|
344 |
|
345 |
class TestLUClusterDestroy(CmdlibTestCase): |
346 |
def testExistingNodes(self): |
347 |
op = opcodes.OpClusterDestroy() |
348 |
|
349 |
self.cfg.AddNewNode()
|
350 |
self.cfg.AddNewNode()
|
351 |
|
352 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)") |
353 |
|
354 |
def testExistingInstances(self): |
355 |
op = opcodes.OpClusterDestroy() |
356 |
|
357 |
self.cfg.AddNewInstance()
|
358 |
self.cfg.AddNewInstance()
|
359 |
|
360 |
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)") |
361 |
|
362 |
def testEmptyCluster(self): |
363 |
op = opcodes.OpClusterDestroy() |
364 |
|
365 |
self.ExecOpCode(op)
|
366 |
|
367 |
self.assertSingleHooksCall([self.master.name], |
368 |
"cluster-destroy",
|
369 |
constants.HOOKS_PHASE_POST) |
370 |
|
371 |
|
372 |
class TestLUClusterPostInit(CmdlibTestCase): |
373 |
def testExecuion(self): |
374 |
op = opcodes.OpClusterPostInit() |
375 |
|
376 |
self.ExecOpCode(op)
|
377 |
|
378 |
self.assertSingleHooksCall([self.master.name], |
379 |
"cluster-init",
|
380 |
constants.HOOKS_PHASE_POST) |
381 |
|
382 |
|
383 |
class TestLUClusterQuery(CmdlibTestCase): |
384 |
def testSimpleInvocation(self): |
385 |
op = opcodes.OpClusterQuery() |
386 |
|
387 |
self.ExecOpCode(op)
|
388 |
|
389 |
def testIPv6Cluster(self): |
390 |
op = opcodes.OpClusterQuery() |
391 |
|
392 |
self.cluster.primary_ip_family = netutils.IP6Address.family
|
393 |
|
394 |
self.ExecOpCode(op)
|
395 |
|
396 |
|
397 |
class TestLUClusterRedistConf(CmdlibTestCase): |
398 |
def testSimpleInvocation(self): |
399 |
op = opcodes.OpClusterRedistConf() |
400 |
|
401 |
self.ExecOpCode(op)
|
402 |
|
403 |
|
404 |
class TestLUClusterRename(CmdlibTestCase): |
405 |
NEW_NAME = "new-name.example.com"
|
406 |
NEW_IP = "1.2.3.4"
|
407 |
|
408 |
def testNoChanges(self): |
409 |
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
|
410 |
|
411 |
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address") |
412 |
|
413 |
def testReachableIp(self): |
414 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
415 |
|
416 |
self.netutils_mod.GetHostname.return_value = \
|
417 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
418 |
self.netutils_mod.TcpPing.return_value = True |
419 |
|
420 |
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network") |
421 |
|
422 |
def testValidRename(self): |
423 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
424 |
|
425 |
self.netutils_mod.GetHostname.return_value = \
|
426 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
427 |
|
428 |
self.ExecOpCode(op)
|
429 |
|
430 |
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count) |
431 |
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
|
432 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
433 |
self.rpc.call_node_activate_master_ip.assert_called_once_with(
|
434 |
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False) |
435 |
|
436 |
def testRenameOfflineMaster(self): |
437 |
op = opcodes.OpClusterRename(name=self.NEW_NAME)
|
438 |
|
439 |
self.master.offline = True |
440 |
self.netutils_mod.GetHostname.return_value = \
|
441 |
HostnameMock(self.NEW_NAME, self.NEW_IP) |
442 |
|
443 |
self.ExecOpCode(op)
|
444 |
|
445 |
|
446 |
class TestLUClusterRepairDiskSizes(CmdlibTestCase): |
447 |
def testNoInstances(self): |
448 |
op = opcodes.OpClusterRepairDiskSizes() |
449 |
|
450 |
self.ExecOpCode(op)
|
451 |
|
452 |
def _SetUpInstanceSingleDisk(self, dev_type=constants.LD_LV): |
453 |
pnode = self.master
|
454 |
snode = self.cfg.AddNewNode()
|
455 |
|
456 |
disk = self.cfg.CreateDisk(dev_type=dev_type,
|
457 |
primary_node=pnode, |
458 |
secondary_node=snode) |
459 |
inst = self.cfg.AddNewInstance(disks=[disk])
|
460 |
|
461 |
return (inst, disk)
|
462 |
|
463 |
def testSingleInstanceOnFailingNode(self): |
464 |
(inst, _) = self._SetUpInstanceSingleDisk()
|
465 |
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name]) |
466 |
|
467 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
468 |
self.RpcResultsBuilder() \
|
469 |
.CreateFailedNodeResult(self.master)
|
470 |
|
471 |
self.ExecOpCode(op)
|
472 |
|
473 |
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions") |
474 |
|
475 |
def _ExecOpClusterRepairDiskSizes(self, node_data): |
476 |
# not specifying instances repairs all
|
477 |
op = opcodes.OpClusterRepairDiskSizes() |
478 |
|
479 |
self.rpc.call_blockdev_getdimensions.return_value = \
|
480 |
self.RpcResultsBuilder() \
|
481 |
.CreateSuccessfulNodeResult(self.master, node_data)
|
482 |
|
483 |
return self.ExecOpCode(op) |
484 |
|
485 |
def testInvalidResultData(self): |
486 |
for data in [[], [None], ["invalid"], [("still", "invalid")]]: |
487 |
self.ResetMocks()
|
488 |
|
489 |
self._SetUpInstanceSingleDisk()
|
490 |
self._ExecOpClusterRepairDiskSizes(data)
|
491 |
|
492 |
self.mcpu.assertLogContainsRegex("ignoring") |
493 |
|
494 |
def testCorrectSize(self): |
495 |
self._SetUpInstanceSingleDisk()
|
496 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
497 |
self.mcpu.assertLogIsEmpty()
|
498 |
self.assertEqual(0, len(changed)) |
499 |
|
500 |
def testWrongSize(self): |
501 |
self._SetUpInstanceSingleDisk()
|
502 |
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)]) |
503 |
self.assertEqual(1, len(changed)) |
504 |
|
505 |
def testCorrectDRBD(self): |
506 |
self._SetUpInstanceSingleDisk(dev_type=constants.LD_DRBD8)
|
507 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
508 |
self.mcpu.assertLogIsEmpty()
|
509 |
self.assertEqual(0, len(changed)) |
510 |
|
511 |
def testWrongDRBDChild(self): |
512 |
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.LD_DRBD8)
|
513 |
disk.children[0].size = 512 |
514 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
515 |
self.assertEqual(1, len(changed)) |
516 |
|
517 |
def testExclusiveStorageInvalidResultData(self): |
518 |
self._SetUpInstanceSingleDisk()
|
519 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
520 |
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)]) |
521 |
|
522 |
self.mcpu.assertLogContainsRegex(
|
523 |
"did not return valid spindles information")
|
524 |
|
525 |
def testExclusiveStorageCorrectSpindles(self): |
526 |
(_, disk) = self._SetUpInstanceSingleDisk()
|
527 |
disk.spindles = 1
|
528 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
529 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
530 |
self.assertEqual(0, len(changed)) |
531 |
|
532 |
def testExclusiveStorageWrongSpindles(self): |
533 |
self._SetUpInstanceSingleDisk()
|
534 |
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True |
535 |
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)]) |
536 |
self.assertEqual(1, len(changed)) |
537 |
|
538 |
|
539 |
class TestLUClusterSetParams(CmdlibTestCase): |
540 |
UID_POOL = [(10, 1000)] |
541 |
|
542 |
def testUidPool(self): |
543 |
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
|
544 |
self.ExecOpCode(op)
|
545 |
self.assertEqual(self.UID_POOL, self.cluster.uid_pool) |
546 |
|
547 |
def testAddUids(self): |
548 |
old_pool = [(1, 9)] |
549 |
self.cluster.uid_pool = list(old_pool) |
550 |
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
|
551 |
self.ExecOpCode(op)
|
552 |
self.assertEqual(set(self.UID_POOL + old_pool), |
553 |
set(self.cluster.uid_pool)) |
554 |
|
555 |
def testRemoveUids(self): |
556 |
additional_pool = [(1, 9)] |
557 |
self.cluster.uid_pool = self.UID_POOL + additional_pool |
558 |
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
|
559 |
self.ExecOpCode(op)
|
560 |
self.assertEqual(additional_pool, self.cluster.uid_pool) |
561 |
|
562 |
def testMasterNetmask(self): |
563 |
op = opcodes.OpClusterSetParams(master_netmask=0xFFFF0000)
|
564 |
self.ExecOpCode(op)
|
565 |
self.assertEqual(0xFFFF0000, self.cluster.master_netmask) |
566 |
|
567 |
def testInvalidDiskparams(self): |
568 |
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}}, |
569 |
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}}]:
|
570 |
self.ResetMocks()
|
571 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
572 |
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams") |
573 |
|
574 |
def testValidDiskparams(self): |
575 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
576 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
577 |
self.ExecOpCode(op)
|
578 |
self.assertEqual(diskparams[constants.DT_RBD],
|
579 |
self.cluster.diskparams[constants.DT_RBD])
|
580 |
|
581 |
def testMinimalDiskparams(self): |
582 |
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
|
583 |
self.cluster.diskparams = {}
|
584 |
op = opcodes.OpClusterSetParams(diskparams=diskparams) |
585 |
self.ExecOpCode(op)
|
586 |
self.assertEqual(diskparams, self.cluster.diskparams) |
587 |
|
588 |
def testUnsetDrbdHelperWithDrbdDisks(self): |
589 |
self.cfg.AddNewInstance(disks=[
|
590 |
self.cfg.CreateDisk(dev_type=constants.LD_DRBD8, create_nodes=True)]) |
591 |
op = opcodes.OpClusterSetParams(drbd_helper="")
|
592 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper") |
593 |
|
594 |
def testFileStorageDir(self): |
595 |
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
|
596 |
self.ExecOpCode(op)
|
597 |
|
598 |
def testSetFileStorageDirToCurrentValue(self): |
599 |
op = opcodes.OpClusterSetParams( |
600 |
file_storage_dir=self.cluster.file_storage_dir)
|
601 |
self.ExecOpCode(op)
|
602 |
|
603 |
self.mcpu.assertLogContainsRegex("file storage dir already set to value") |
604 |
|
605 |
def testValidDrbdHelper(self): |
606 |
node1 = self.cfg.AddNewNode()
|
607 |
node1.offline = True
|
608 |
self.rpc.call_drbd_helper.return_value = \
|
609 |
self.RpcResultsBuilder() \
|
610 |
.AddSuccessfulNode(self.master, "/bin/true") \ |
611 |
.AddOfflineNode(node1) \ |
612 |
.Build() |
613 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
614 |
self.ExecOpCode(op)
|
615 |
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node") |
616 |
|
617 |
def testDrbdHelperFailingNode(self): |
618 |
self.rpc.call_drbd_helper.return_value = \
|
619 |
self.RpcResultsBuilder() \
|
620 |
.AddFailedNode(self.master) \
|
621 |
.Build() |
622 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
623 |
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper") |
624 |
|
625 |
def testInvalidDrbdHelper(self): |
626 |
self.rpc.call_drbd_helper.return_value = \
|
627 |
self.RpcResultsBuilder() \
|
628 |
.AddSuccessfulNode(self.master, "/bin/false") \ |
629 |
.Build() |
630 |
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
|
631 |
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false") |
632 |
|
633 |
def testDrbdHelperWithoutDrbdDiskTemplate(self): |
634 |
drbd_helper = "/bin/random_helper"
|
635 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
636 |
self.rpc.call_drbd_helper.return_value = \
|
637 |
self.RpcResultsBuilder() \
|
638 |
.AddSuccessfulNode(self.master, drbd_helper) \
|
639 |
.Build() |
640 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
641 |
self.ExecOpCode(op)
|
642 |
|
643 |
self.mcpu.assertLogContainsRegex("but did not enable") |
644 |
|
645 |
def testResetDrbdHelper(self): |
646 |
drbd_helper = ""
|
647 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
648 |
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper) |
649 |
self.ExecOpCode(op)
|
650 |
|
651 |
self.assertEqual(None, self.cluster.drbd_usermode_helper) |
652 |
|
653 |
def testBeparams(self): |
654 |
beparams = {constants.BE_VCPUS: 32}
|
655 |
op = opcodes.OpClusterSetParams(beparams=beparams) |
656 |
self.ExecOpCode(op)
|
657 |
self.assertEqual(32, self.cluster |
658 |
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS]) |
659 |
|
660 |
def testNdparams(self): |
661 |
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
|
662 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
663 |
self.ExecOpCode(op)
|
664 |
self.assertEqual(True, self.cluster |
665 |
.ndparams[constants.ND_EXCLUSIVE_STORAGE]) |
666 |
|
667 |
def testNdparamsResetOobProgram(self): |
668 |
ndparams = {constants.ND_OOB_PROGRAM: ""}
|
669 |
op = opcodes.OpClusterSetParams(ndparams=ndparams) |
670 |
self.ExecOpCode(op)
|
671 |
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
|
672 |
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
|
673 |
|
674 |
def testHvState(self): |
675 |
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
|
676 |
op = opcodes.OpClusterSetParams(hv_state=hv_state) |
677 |
self.ExecOpCode(op)
|
678 |
self.assertEqual(8, self.cluster.hv_state_static |
679 |
[constants.HT_FAKE][constants.HVST_CPU_TOTAL]) |
680 |
|
681 |
def testDiskState(self): |
682 |
disk_state = { |
683 |
constants.LD_LV: { |
684 |
"mock_vg": {constants.DS_DISK_TOTAL: 10} |
685 |
} |
686 |
} |
687 |
op = opcodes.OpClusterSetParams(disk_state=disk_state) |
688 |
self.ExecOpCode(op)
|
689 |
self.assertEqual(10, self.cluster |
690 |
.disk_state_static[constants.LD_LV]["mock_vg"]
|
691 |
[constants.DS_DISK_TOTAL]) |
692 |
|
693 |
def testDefaultIPolicy(self): |
694 |
ipolicy = constants.IPOLICY_DEFAULTS |
695 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
696 |
self.ExecOpCode(op)
|
697 |
|
698 |
def testIPolicyNewViolation(self): |
699 |
import ganeti.constants as C |
700 |
ipolicy = C.IPOLICY_DEFAULTS |
701 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128 |
702 |
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128 |
703 |
|
704 |
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512}) |
705 |
op = opcodes.OpClusterSetParams(ipolicy=ipolicy) |
706 |
self.ExecOpCode(op)
|
707 |
|
708 |
self.mcpu.assertLogContainsRegex("instances violate them") |
709 |
|
710 |
def testNicparamsNoInstance(self): |
711 |
nicparams = { |
712 |
constants.NIC_LINK: "mock_bridge"
|
713 |
} |
714 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
715 |
self.ExecOpCode(op)
|
716 |
|
717 |
self.assertEqual("mock_bridge", |
718 |
self.cluster.nicparams
|
719 |
[constants.PP_DEFAULT][constants.NIC_LINK]) |
720 |
|
721 |
def testNicparamsInvalidConf(self): |
722 |
nicparams = { |
723 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
724 |
constants.NIC_LINK: ""
|
725 |
} |
726 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
727 |
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link") |
728 |
|
729 |
def testNicparamsInvalidInstanceConf(self): |
730 |
nicparams = { |
731 |
constants.NIC_MODE: constants.NIC_MODE_BRIDGED, |
732 |
constants.NIC_LINK: "mock_bridge"
|
733 |
} |
734 |
self.cfg.AddNewInstance(nics=[
|
735 |
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})]) |
736 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
737 |
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link") |
738 |
|
739 |
def testNicparamsMissingIp(self): |
740 |
nicparams = { |
741 |
constants.NIC_MODE: constants.NIC_MODE_ROUTED |
742 |
} |
743 |
self.cfg.AddNewInstance()
|
744 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
745 |
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address") |
746 |
|
747 |
def testNicparamsWithInstance(self): |
748 |
nicparams = { |
749 |
constants.NIC_LINK: "mock_bridge"
|
750 |
} |
751 |
self.cfg.AddNewInstance()
|
752 |
op = opcodes.OpClusterSetParams(nicparams=nicparams) |
753 |
self.ExecOpCode(op)
|
754 |
|
755 |
def testDefaultHvparams(self): |
756 |
hvparams = constants.HVC_DEFAULTS |
757 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
758 |
self.ExecOpCode(op)
|
759 |
|
760 |
self.assertEqual(hvparams, self.cluster.hvparams) |
761 |
|
762 |
def testMinimalHvparams(self): |
763 |
hvparams = { |
764 |
constants.HT_FAKE: { |
765 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
766 |
} |
767 |
} |
768 |
self.cluster.hvparams = {}
|
769 |
op = opcodes.OpClusterSetParams(hvparams=hvparams) |
770 |
self.ExecOpCode(op)
|
771 |
|
772 |
self.assertEqual(hvparams, self.cluster.hvparams) |
773 |
|
774 |
def testOsHvp(self): |
775 |
os_hvp = { |
776 |
"mocked_os": {
|
777 |
constants.HT_FAKE: { |
778 |
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE |
779 |
} |
780 |
}, |
781 |
"other_os": constants.HVC_DEFAULTS
|
782 |
} |
783 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
784 |
self.ExecOpCode(op)
|
785 |
|
786 |
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
|
787 |
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE] |
788 |
[constants.HV_MIGRATION_MODE]) |
789 |
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"]) |
790 |
|
791 |
def testRemoveOsHvp(self): |
792 |
os_hvp = {"mocked_os": {constants.HT_FAKE: None}} |
793 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
794 |
self.ExecOpCode(op)
|
795 |
|
796 |
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"] |
797 |
|
798 |
def testDefaultOsHvp(self): |
799 |
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
|
800 |
self.cluster.os_hvp = {"mocked_os": {}} |
801 |
op = opcodes.OpClusterSetParams(os_hvp=os_hvp) |
802 |
self.ExecOpCode(op)
|
803 |
|
804 |
self.assertEqual(os_hvp, self.cluster.os_hvp) |
805 |
|
806 |
def testOsparams(self): |
807 |
osparams = { |
808 |
"mocked_os": {
|
809 |
"param1": "value1", |
810 |
"param2": None |
811 |
}, |
812 |
"other_os": {
|
813 |
"param1": None |
814 |
} |
815 |
} |
816 |
self.cluster.osparams = {"other_os": {"param1": "value1"}} |
817 |
op = opcodes.OpClusterSetParams(osparams=osparams) |
818 |
self.ExecOpCode(op)
|
819 |
|
820 |
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams) |
821 |
|
822 |
def testEnabledHypervisors(self): |
823 |
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM] |
824 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
825 |
self.ExecOpCode(op)
|
826 |
|
827 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
828 |
|
829 |
def testEnabledHypervisorsWithoutHypervisorParams(self): |
830 |
enabled_hypervisors = [constants.HT_FAKE] |
831 |
self.cluster.hvparams = {}
|
832 |
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors) |
833 |
self.ExecOpCode(op)
|
834 |
|
835 |
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors) |
836 |
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
|
837 |
self.cluster.hvparams[constants.HT_FAKE])
|
838 |
|
839 |
@testutils.patch_object(utils, "FindFile") |
840 |
def testValidDefaultIallocator(self, find_file_mock): |
841 |
find_file_mock.return_value = "/random/path"
|
842 |
default_iallocator = "/random/path"
|
843 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
844 |
self.ExecOpCode(op)
|
845 |
|
846 |
self.assertEqual(default_iallocator, self.cluster.default_iallocator) |
847 |
|
848 |
@testutils.patch_object(utils, "FindFile") |
849 |
def testInvalidDefaultIallocator(self, find_file_mock): |
850 |
find_file_mock.return_value = None
|
851 |
default_iallocator = "/random/path"
|
852 |
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator) |
853 |
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script") |
854 |
|
855 |
def testEnabledDiskTemplates(self): |
856 |
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN] |
857 |
op = opcodes.OpClusterSetParams( |
858 |
enabled_disk_templates=enabled_disk_templates) |
859 |
self.ExecOpCode(op)
|
860 |
|
861 |
self.assertEqual(enabled_disk_templates,
|
862 |
self.cluster.enabled_disk_templates)
|
863 |
|
864 |
def testEnabledDiskTemplatesWithoutVgName(self): |
865 |
enabled_disk_templates = [constants.DT_PLAIN] |
866 |
self.cluster.volume_group_name = None |
867 |
op = opcodes.OpClusterSetParams( |
868 |
enabled_disk_templates=enabled_disk_templates) |
869 |
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group") |
870 |
|
871 |
def testDisableDiskTemplateWithExistingInstance(self): |
872 |
enabled_disk_templates = [constants.DT_DISKLESS] |
873 |
self.cfg.AddNewInstance(
|
874 |
disks=[self.cfg.CreateDisk(dev_type=constants.LD_LV)])
|
875 |
op = opcodes.OpClusterSetParams( |
876 |
enabled_disk_templates=enabled_disk_templates) |
877 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template") |
878 |
|
879 |
def testVgNameNoLvmDiskTemplateEnabled(self): |
880 |
vg_name = "test_vg"
|
881 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
882 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
883 |
self.ExecOpCode(op)
|
884 |
|
885 |
self.assertEqual(vg_name, self.cluster.volume_group_name) |
886 |
self.mcpu.assertLogContainsRegex("enable any lvm disk template") |
887 |
|
888 |
def testUnsetVgNameWithLvmDiskTemplateEnabled(self): |
889 |
vg_name = ""
|
890 |
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
|
891 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
892 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group") |
893 |
|
894 |
def testUnsetVgNameWithLvmInstance(self): |
895 |
vg_name = ""
|
896 |
self.cfg.AddNewInstance(
|
897 |
disks=[self.cfg.CreateDisk(dev_type=constants.LD_LV)])
|
898 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
899 |
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable lvm storage") |
900 |
|
901 |
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self): |
902 |
vg_name = ""
|
903 |
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
|
904 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
905 |
self.ExecOpCode(op)
|
906 |
|
907 |
self.assertEqual(None, self.cluster.volume_group_name) |
908 |
|
909 |
def testVgNameToOldName(self): |
910 |
vg_name = self.cluster.volume_group_name
|
911 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
912 |
self.ExecOpCode(op)
|
913 |
|
914 |
self.mcpu.assertLogContainsRegex("already in desired state") |
915 |
|
916 |
def testVgNameWithFailingNode(self): |
917 |
vg_name = "test_vg"
|
918 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
919 |
self.rpc.call_vg_list.return_value = \
|
920 |
self.RpcResultsBuilder() \
|
921 |
.AddFailedNode(self.master) \
|
922 |
.Build() |
923 |
self.ExecOpCode(op)
|
924 |
|
925 |
self.mcpu.assertLogContainsRegex("Error while gathering data on node") |
926 |
|
927 |
def testVgNameWithValidNode(self): |
928 |
vg_name = "test_vg"
|
929 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
930 |
self.rpc.call_vg_list.return_value = \
|
931 |
self.RpcResultsBuilder() \
|
932 |
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \ |
933 |
.Build() |
934 |
self.ExecOpCode(op)
|
935 |
|
936 |
def testVgNameWithTooSmallNode(self): |
937 |
vg_name = "test_vg"
|
938 |
op = opcodes.OpClusterSetParams(vg_name=vg_name) |
939 |
self.rpc.call_vg_list.return_value = \
|
940 |
self.RpcResultsBuilder() \
|
941 |
.AddSuccessfulNode(self.master, {vg_name: 1}) \ |
942 |
.Build() |
943 |
self.ExecOpCodeExpectOpPrereqError(op, "too small") |
944 |
|
945 |
def testMiscParameters(self): |
946 |
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
|
947 |
maintain_node_health=True,
|
948 |
modify_etc_hosts=True,
|
949 |
prealloc_wipe_disks=True,
|
950 |
reserved_lvs=["/dev/mock_lv"],
|
951 |
use_external_mip_script=True)
|
952 |
self.ExecOpCode(op)
|
953 |
|
954 |
self.mcpu.assertLogIsEmpty()
|
955 |
self.assertEqual(123, self.cluster.candidate_pool_size) |
956 |
self.assertEqual(True, self.cluster.maintain_node_health) |
957 |
self.assertEqual(True, self.cluster.modify_etc_hosts) |
958 |
self.assertEqual(True, self.cluster.prealloc_wipe_disks) |
959 |
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs) |
960 |
self.assertEqual(True, self.cluster.use_external_mip_script) |
961 |
|
962 |
def testAddHiddenOs(self): |
963 |
self.cluster.hidden_os = ["hidden1", "hidden2"] |
964 |
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
|
965 |
(constants.DDM_ADD, "hidden3")])
|
966 |
self.ExecOpCode(op)
|
967 |
|
968 |
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os) |
969 |
self.mcpu.assertLogContainsRegex("OS hidden2 already") |
970 |
|
971 |
def testRemoveBlacklistedOs(self): |
972 |
self.cluster.blacklisted_os = ["blisted1", "blisted2"] |
973 |
op = opcodes.OpClusterSetParams(blacklisted_os=[ |
974 |
(constants.DDM_REMOVE, "blisted2"),
|
975 |
(constants.DDM_REMOVE, "blisted3")])
|
976 |
self.ExecOpCode(op)
|
977 |
|
978 |
self.assertEqual(["blisted1"], self.cluster.blacklisted_os) |
979 |
self.mcpu.assertLogContainsRegex("OS blisted3 not found") |
980 |
|
981 |
def testMasterNetdev(self): |
982 |
master_netdev = "test_dev"
|
983 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
984 |
self.ExecOpCode(op)
|
985 |
|
986 |
self.assertEqual(master_netdev, self.cluster.master_netdev) |
987 |
|
988 |
def testMasterNetdevFailNoForce(self): |
989 |
master_netdev = "test_dev"
|
990 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev) |
991 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
992 |
self.RpcResultsBuilder() \
|
993 |
.CreateFailedNodeResult(self.master)
|
994 |
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip") |
995 |
|
996 |
def testMasterNetdevFailForce(self): |
997 |
master_netdev = "test_dev"
|
998 |
op = opcodes.OpClusterSetParams(master_netdev=master_netdev, |
999 |
force=True)
|
1000 |
self.rpc.call_node_deactivate_master_ip.return_value = \
|
1001 |
self.RpcResultsBuilder() \
|
1002 |
.CreateFailedNodeResult(self.master)
|
1003 |
self.ExecOpCode(op)
|
1004 |
|
1005 |
self.mcpu.assertLogContainsRegex("Could not disable the master ip") |
1006 |
|
1007 |
|
1008 |
class TestLUClusterVerify(CmdlibTestCase): |
1009 |
def testVerifyAllGroups(self): |
1010 |
op = opcodes.OpClusterVerify() |
1011 |
result = self.ExecOpCode(op)
|
1012 |
|
1013 |
self.assertEqual(2, len(result["jobs"])) |
1014 |
|
1015 |
def testVerifyDefaultGroups(self): |
1016 |
op = opcodes.OpClusterVerify(group_name="default")
|
1017 |
result = self.ExecOpCode(op)
|
1018 |
|
1019 |
self.assertEqual(1, len(result["jobs"])) |
1020 |
|
1021 |
|
1022 |
class TestLUClusterVerifyConfig(CmdlibTestCase): |
1023 |
|
1024 |
def setUp(self): |
1025 |
super(TestLUClusterVerifyConfig, self).setUp() |
1026 |
|
1027 |
self._load_cert_patcher = testutils \
|
1028 |
.patch_object(OpenSSL.crypto, "load_certificate")
|
1029 |
self._load_cert_mock = self._load_cert_patcher.start() |
1030 |
self._verify_cert_patcher = testutils \
|
1031 |
.patch_object(utils, "VerifyX509Certificate")
|
1032 |
self._verify_cert_mock = self._verify_cert_patcher.start() |
1033 |
self._read_file_patcher = testutils.patch_object(utils, "ReadFile") |
1034 |
self._read_file_mock = self._read_file_patcher.start() |
1035 |
self._can_read_patcher = testutils.patch_object(utils, "CanRead") |
1036 |
self._can_read_mock = self._can_read_patcher.start() |
1037 |
|
1038 |
self._can_read_mock.return_value = True |
1039 |
self._read_file_mock.return_value = True |
1040 |
self._verify_cert_mock.return_value = (None, "") |
1041 |
self._load_cert_mock.return_value = True |
1042 |
|
1043 |
def tearDown(self): |
1044 |
super(TestLUClusterVerifyConfig, self).tearDown() |
1045 |
|
1046 |
self._can_read_patcher.stop()
|
1047 |
self._read_file_patcher.stop()
|
1048 |
self._verify_cert_patcher.stop()
|
1049 |
self._load_cert_patcher.stop()
|
1050 |
|
1051 |
def testSuccessfulRun(self): |
1052 |
self.cfg.AddNewInstance()
|
1053 |
op = opcodes.OpClusterVerifyConfig() |
1054 |
result = self.ExecOpCode(op)
|
1055 |
|
1056 |
self.assertTrue(result)
|
1057 |
|
1058 |
def testDanglingNode(self): |
1059 |
node = self.cfg.AddNewNode()
|
1060 |
self.cfg.AddNewInstance(primary_node=node)
|
1061 |
node.group = "invalid"
|
1062 |
op = opcodes.OpClusterVerifyConfig() |
1063 |
result = self.ExecOpCode(op)
|
1064 |
|
1065 |
self.mcpu.assertLogContainsRegex(
|
1066 |
"following nodes \(and their instances\) belong to a non existing group")
|
1067 |
self.assertFalse(result)
|
1068 |
|
1069 |
def testDanglingInstance(self): |
1070 |
inst = self.cfg.AddNewInstance()
|
1071 |
inst.primary_node = "invalid"
|
1072 |
op = opcodes.OpClusterVerifyConfig() |
1073 |
result = self.ExecOpCode(op)
|
1074 |
|
1075 |
self.mcpu.assertLogContainsRegex(
|
1076 |
"following instances have a non-existing primary-node")
|
1077 |
self.assertFalse(result)
|
1078 |
|
1079 |
|
1080 |
if __name__ == "__main__": |
1081 |
testutils.GanetiTestProgram() |