root / snf-cyclades-app / synnefo / logic / tests.py @ 7ab30015
History | View | Annotate | Download (29.5 kB)
1 |
# vim: set fileencoding=utf-8 :
|
---|---|
2 |
# Copyright 2012 GRNET S.A. All rights reserved.
|
3 |
#
|
4 |
# Redistribution and use in source and binary forms, with or without
|
5 |
# modification, are permitted provided that the following conditions
|
6 |
# are met:
|
7 |
#
|
8 |
# 1. Redistributions of source code must retain the above copyright
|
9 |
# notice, this list of conditions and the following disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above copyright
|
12 |
# notice, this list of conditions and the following disclaimer in the
|
13 |
# documentation and/or other materials provided with the distribution.
|
14 |
#
|
15 |
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16 |
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19 |
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20 |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21 |
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22 |
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24 |
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25 |
# SUCH DAMAGE.
|
26 |
#
|
27 |
# The views and conclusions contained in the software and documentation are
|
28 |
# those of the authors and should not be interpreted as representing official
|
29 |
# policies, either expressed or implied, of GRNET S.A.
|
30 |
|
31 |
# Provides automated tests for logic module
|
32 |
|
33 |
from random import randint |
34 |
|
35 |
from django.test import TestCase |
36 |
|
37 |
from synnefo.db.models import * |
38 |
from synnefo.db import models_factory as mfactory |
39 |
from synnefo.logic import reconciliation |
40 |
from synnefo.lib.utils import split_time |
41 |
from datetime import datetime |
42 |
from mock import patch |
43 |
from synnefo.api.util import allocate_resource |
44 |
from synnefo.logic.callbacks import (update_db, update_network, |
45 |
update_build_progress) |
46 |
from snf_django.utils.testing import mocked_quotaholder |
47 |
|
48 |
now = datetime.now |
49 |
from time import time |
50 |
import json |
51 |
|
52 |
## Test Callbacks
|
53 |
|
54 |
|
55 |
@patch('synnefo.lib.amqp.AMQPClient') |
56 |
class UpdateDBTest(TestCase): |
57 |
def create_msg(self, **kwargs): |
58 |
"""Create snf-ganeti-eventd message"""
|
59 |
msg = {'event_time': split_time(time())}
|
60 |
msg['type'] = 'ganeti-op-status' |
61 |
msg['status'] = 'success' |
62 |
msg['jobId'] = 1 |
63 |
msg['logmsg'] = 'Dummy Log' |
64 |
for key, val in kwargs.items(): |
65 |
msg[key] = val |
66 |
message = {'body': json.dumps(msg)}
|
67 |
return message
|
68 |
|
69 |
def test_missing_attribute(self, client): |
70 |
update_db(client, json.dumps({'body': {}}))
|
71 |
self.assertTrue(client.basic_reject.called)
|
72 |
|
73 |
def test_unhandled_exception(self, client): |
74 |
update_db(client, {}) |
75 |
client.basic_reject.assert_called_once() |
76 |
|
77 |
def test_missing_instance(self, client): |
78 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
79 |
instance='foo')
|
80 |
update_db(client, msg) |
81 |
self.assertTrue(client.basic_ack.called)
|
82 |
|
83 |
def test_wrong_type(self, client): |
84 |
msg = self.create_msg(type="WRONG_TYPE") |
85 |
update_db(client, msg) |
86 |
self.assertTrue(client.basic_nack.called)
|
87 |
|
88 |
def test_old_msg(self, client): |
89 |
from time import sleep |
90 |
from datetime import datetime |
91 |
old_time = time() |
92 |
sleep(0.01)
|
93 |
new_time = datetime.fromtimestamp(time()) |
94 |
vm = mfactory.VirtualMachineFactory(backendtime=new_time) |
95 |
vm.operstate = 'STOPPED'
|
96 |
vm.save() |
97 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
98 |
event_time=split_time(old_time), |
99 |
instance=vm.backend_vm_id) |
100 |
update_db(client, msg) |
101 |
self.assertTrue(client.basic_ack.called)
|
102 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
103 |
self.assertEquals(db_vm.operstate, "STOPPED") |
104 |
self.assertEquals(db_vm.backendtime, new_time)
|
105 |
|
106 |
def test_start(self, client): |
107 |
vm = mfactory.VirtualMachineFactory() |
108 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
109 |
instance=vm.backend_vm_id) |
110 |
update_db(client, msg) |
111 |
self.assertTrue(client.basic_ack.called)
|
112 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
113 |
self.assertEqual(db_vm.operstate, 'STARTED') |
114 |
|
115 |
def test_stop(self, client): |
116 |
vm = mfactory.VirtualMachineFactory() |
117 |
msg = self.create_msg(operation='OP_INSTANCE_SHUTDOWN', |
118 |
instance=vm.backend_vm_id) |
119 |
update_db(client, msg) |
120 |
self.assertTrue(client.basic_ack.called)
|
121 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
122 |
self.assertEqual(db_vm.operstate, 'STOPPED') |
123 |
|
124 |
def test_reboot(self, client): |
125 |
vm = mfactory.VirtualMachineFactory() |
126 |
msg = self.create_msg(operation='OP_INSTANCE_REBOOT', |
127 |
instance=vm.backend_vm_id) |
128 |
update_db(client, msg) |
129 |
self.assertTrue(client.basic_ack.called)
|
130 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
131 |
self.assertEqual(db_vm.operstate, 'STARTED') |
132 |
|
133 |
def test_remove(self, client): |
134 |
vm = mfactory.VirtualMachineFactory() |
135 |
# Also create a NIC
|
136 |
mfactory.NetworkInterfaceFactory(machine=vm) |
137 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
138 |
instance=vm.backend_vm_id) |
139 |
with mocked_quotaholder():
|
140 |
update_db(client, msg) |
141 |
self.assertTrue(client.basic_ack.called)
|
142 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
143 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
144 |
self.assertTrue(db_vm.deleted)
|
145 |
# Check that nics are deleted
|
146 |
self.assertFalse(db_vm.nics.all())
|
147 |
|
148 |
def test_create(self, client): |
149 |
vm = mfactory.VirtualMachineFactory() |
150 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
151 |
instance=vm.backend_vm_id) |
152 |
update_db(client, msg) |
153 |
self.assertTrue(client.basic_ack.called)
|
154 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
155 |
self.assertEqual(db_vm.operstate, 'STARTED') |
156 |
|
157 |
def test_create_error(self, client): |
158 |
"""Test that error create sets vm to ERROR state"""
|
159 |
vm = mfactory.VirtualMachineFactory() |
160 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
161 |
instance=vm.backend_vm_id, |
162 |
status='error')
|
163 |
update_db(client, msg) |
164 |
self.assertTrue(client.basic_ack.called)
|
165 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
166 |
self.assertEqual(db_vm.operstate, 'ERROR') |
167 |
|
168 |
def test_remove_from_error(self, client): |
169 |
"""Test that error removes delete error builds"""
|
170 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
171 |
# Also create a NIC
|
172 |
mfactory.NetworkInterfaceFactory(machine=vm) |
173 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
174 |
instance=vm.backend_vm_id) |
175 |
with mocked_quotaholder():
|
176 |
update_db(client, msg) |
177 |
self.assertTrue(client.basic_ack.called)
|
178 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
179 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
180 |
self.assertTrue(db_vm.deleted)
|
181 |
# Check that nics are deleted
|
182 |
self.assertFalse(db_vm.nics.all())
|
183 |
|
184 |
def test_other_error(self, client): |
185 |
"""Test that other error messages do no affect the VM"""
|
186 |
vm = mfactory.VirtualMachineFactory() |
187 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
188 |
instance=vm.backend_vm_id, |
189 |
status='error')
|
190 |
update_db(client, msg) |
191 |
self.assertTrue(client.basic_ack.called)
|
192 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
193 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
194 |
self.assertEqual(db_vm.backendtime, vm.backendtime)
|
195 |
|
196 |
def test_resize_msg(self, client): |
197 |
vm = mfactory.VirtualMachineFactory() |
198 |
# Test empty beparams
|
199 |
for status in ["success", "error"]: |
200 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
201 |
instance=vm.backend_vm_id, |
202 |
beparams={}, |
203 |
status=status) |
204 |
client.reset_mock() |
205 |
update_db(client, msg) |
206 |
self.assertTrue(client.basic_ack.called)
|
207 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
208 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
209 |
# Test intermediate states
|
210 |
for status in ["queued", "waiting", "running"]: |
211 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
212 |
instance=vm.backend_vm_id, |
213 |
beparams={"vcpus": 4, "minmem": 2048, |
214 |
"maxmem": 2048}, |
215 |
status=status) |
216 |
client.reset_mock() |
217 |
update_db(client, msg) |
218 |
self.assertTrue(client.basic_ack.called)
|
219 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
220 |
self.assertEqual(db_vm.operstate, "RESIZE") |
221 |
# Test operstate after error
|
222 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
223 |
instance=vm.backend_vm_id, |
224 |
beparams={"vcpus": 4}, |
225 |
status="error")
|
226 |
client.reset_mock() |
227 |
update_db(client, msg) |
228 |
self.assertTrue(client.basic_ack.called)
|
229 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
230 |
self.assertEqual(db_vm.operstate, "STOPPED") |
231 |
# Test success
|
232 |
f1 = mfactory.FlavorFactory(cpu=4, ram=1024, disk_template="drbd", |
233 |
disk=1024)
|
234 |
vm.flavor = f1 |
235 |
vm.save() |
236 |
f2 = mfactory.FlavorFactory(cpu=8, ram=2048, disk_template="drbd", |
237 |
disk=1024)
|
238 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
239 |
instance=vm.backend_vm_id, |
240 |
beparams={"vcpus": 8, "minmem": 2048, |
241 |
"maxmem": 2048}, |
242 |
status="success")
|
243 |
client.reset_mock() |
244 |
update_db(client, msg) |
245 |
self.assertTrue(client.basic_ack.called)
|
246 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
247 |
self.assertEqual(db_vm.operstate, "STOPPED") |
248 |
self.assertEqual(db_vm.flavor, f2)
|
249 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
250 |
instance=vm.backend_vm_id, |
251 |
beparams={"vcpus": 100, "minmem": 2048, |
252 |
"maxmem": 2048}, |
253 |
status="success")
|
254 |
client.reset_mock() |
255 |
update_db(client, msg) |
256 |
self.assertTrue(client.basic_reject.called)
|
257 |
|
258 |
|
259 |
@patch('synnefo.lib.amqp.AMQPClient') |
260 |
class UpdateNetTest(TestCase): |
261 |
def create_msg(self, **kwargs): |
262 |
"""Create snf-ganeti-hook message"""
|
263 |
msg = {'event_time': split_time(time())}
|
264 |
msg['type'] = 'ganeti-op-status' |
265 |
msg['operation'] = 'OP_INSTANCE_SET_PARAMS' |
266 |
msg['status'] = 'success' |
267 |
msg['jobId'] = 1 |
268 |
msg['logmsg'] = 'Dummy Log' |
269 |
for key, val in kwargs.items(): |
270 |
msg[key] = val |
271 |
message = {'body': json.dumps(msg)}
|
272 |
return message
|
273 |
|
274 |
def test_missing_attribute(self, client): |
275 |
update_db(client, json.dumps({'body': {}}))
|
276 |
self.assertTrue(client.basic_reject.called)
|
277 |
|
278 |
def test_unhandled_exception(self, client): |
279 |
update_db(client, {}) |
280 |
client.basic_reject.assert_called_once() |
281 |
|
282 |
def test_wrong_type(self, client): |
283 |
msg = self.create_msg(type="WRONG_TYPE") |
284 |
update_db(client, msg) |
285 |
self.assertTrue(client.basic_nack.called)
|
286 |
|
287 |
def test_missing_instance(self, client): |
288 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
289 |
instance='foo')
|
290 |
update_db(client, msg) |
291 |
self.assertTrue(client.basic_ack.called)
|
292 |
|
293 |
def test_no_nics(self, client): |
294 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
295 |
mfactory.NetworkInterfaceFactory(machine=vm) |
296 |
mfactory.NetworkInterfaceFactory(machine=vm) |
297 |
mfactory.NetworkInterfaceFactory(machine=vm) |
298 |
self.assertEqual(len(vm.nics.all()), 3) |
299 |
msg = self.create_msg(nics=[],
|
300 |
instance=vm.backend_vm_id) |
301 |
update_db(client, msg) |
302 |
self.assertTrue(client.basic_ack.called)
|
303 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
304 |
self.assertEqual(len(db_vm.nics.all()), 0) |
305 |
|
306 |
def test_empty_nic(self, client): |
307 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
308 |
for public in [True, False]: |
309 |
net = mfactory.NetworkFactory(public=public) |
310 |
msg = self.create_msg(nics=[{'network': net.backend_id}], |
311 |
instance=vm.backend_vm_id) |
312 |
update_db(client, msg) |
313 |
self.assertTrue(client.basic_ack.called)
|
314 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
315 |
nics = db_vm.nics.all() |
316 |
self.assertEqual(len(nics), 1) |
317 |
self.assertEqual(nics[0].index, 0) |
318 |
self.assertEqual(nics[0].ipv4, '') |
319 |
self.assertEqual(nics[0].ipv6, '') |
320 |
self.assertEqual(nics[0].mac, '') |
321 |
if public:
|
322 |
self.assertEqual(nics[0].firewall_profile, |
323 |
settings.DEFAULT_FIREWALL_PROFILE) |
324 |
else:
|
325 |
self.assertEqual(nics[0].firewall_profile, '') |
326 |
|
327 |
def test_full_nic(self, client): |
328 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
329 |
net = mfactory.NetworkFactory(subnet='10.0.0.0/24')
|
330 |
pool = net.get_pool() |
331 |
self.assertTrue(pool.is_available('10.0.0.22')) |
332 |
pool.save() |
333 |
msg = self.create_msg(nics=[{'network': net.backend_id, |
334 |
'ip': '10.0.0.22', |
335 |
'mac': 'aa:bb:cc:00:11:22'}], |
336 |
instance=vm.backend_vm_id) |
337 |
update_db(client, msg) |
338 |
self.assertTrue(client.basic_ack.called)
|
339 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
340 |
nics = db_vm.nics.all() |
341 |
self.assertEqual(len(nics), 1) |
342 |
self.assertEqual(nics[0].index, 0) |
343 |
self.assertEqual(nics[0].ipv4, '10.0.0.22') |
344 |
self.assertEqual(nics[0].ipv6, '') |
345 |
self.assertEqual(nics[0].mac, 'aa:bb:cc:00:11:22') |
346 |
pool = net.get_pool() |
347 |
self.assertFalse(pool.is_available('10.0.0.22')) |
348 |
pool.save() |
349 |
|
350 |
|
351 |
@patch('synnefo.lib.amqp.AMQPClient') |
352 |
class UpdateNetworkTest(TestCase): |
353 |
def create_msg(self, **kwargs): |
354 |
"""Create snf-ganeti-eventd message"""
|
355 |
msg = {'event_time': split_time(time())}
|
356 |
msg['type'] = 'ganeti-network-status' |
357 |
msg['status'] = 'success' |
358 |
msg['jobId'] = 1 |
359 |
msg['logmsg'] = 'Dummy Log' |
360 |
for key, val in kwargs.items(): |
361 |
msg[key] = val |
362 |
message = {'body': json.dumps(msg)}
|
363 |
return message
|
364 |
|
365 |
def test_missing_attribute(self, client): |
366 |
update_network(client, json.dumps({'body': {}}))
|
367 |
self.assertTrue(client.basic_reject.called)
|
368 |
|
369 |
def test_unhandled_exception(self, client): |
370 |
update_network(client, {}) |
371 |
client.basic_reject.assert_called_once() |
372 |
|
373 |
def test_wrong_type(self, client): |
374 |
msg = self.create_msg(type="WRONG_TYPE") |
375 |
update_network(client, msg) |
376 |
self.assertTrue(client.basic_nack.called)
|
377 |
|
378 |
def test_missing_network(self, client): |
379 |
msg = self.create_msg(operation='OP_NETWORK_CREATE', |
380 |
network='foo')
|
381 |
update_network(client, msg) |
382 |
self.assertTrue(client.basic_ack.called)
|
383 |
|
384 |
def test_create(self, client): |
385 |
back_network = mfactory.BackendNetworkFactory(operstate='PENDING')
|
386 |
net = back_network.network |
387 |
net.state = 'ACTIVE'
|
388 |
net.save() |
389 |
back1 = back_network.backend |
390 |
|
391 |
back_network2 = mfactory.BackendNetworkFactory(operstate='PENDING',
|
392 |
network=net) |
393 |
back2 = back_network2.backend |
394 |
# Message from first backend network
|
395 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
396 |
network=net.backend_id, |
397 |
cluster=back1.clustername) |
398 |
update_network(client, msg) |
399 |
self.assertTrue(client.basic_ack.called)
|
400 |
|
401 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
402 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
403 |
db_net = Network.objects.get(id=net.id) |
404 |
self.assertEqual(db_net.state, 'ACTIVE') |
405 |
# msg from second backend network
|
406 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
407 |
network=net.backend_id, |
408 |
cluster=back2.clustername) |
409 |
update_network(client, msg) |
410 |
self.assertTrue(client.basic_ack.called)
|
411 |
|
412 |
db_net = Network.objects.get(id=net.id) |
413 |
self.assertEqual(db_net.state, 'ACTIVE') |
414 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
415 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
416 |
|
417 |
def test_create_offline_backend(self, client): |
418 |
"""Test network creation when a backend is offline"""
|
419 |
net = mfactory.NetworkFactory(state='ACTIVE')
|
420 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
421 |
bn2 = mfactory.BackendNetworkFactory(network=net, |
422 |
backend__offline=True)
|
423 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
424 |
network=net.backend_id, |
425 |
cluster=bn1.backend.clustername) |
426 |
update_network(client, msg) |
427 |
self.assertTrue(client.basic_ack.called)
|
428 |
new_net = Network.objects.get(id=net.id) |
429 |
self.assertEqual(new_net.state, 'ACTIVE') |
430 |
|
431 |
def test_disconnect(self, client): |
432 |
bn1 = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
433 |
net1 = bn1.network |
434 |
net1.state = "ACTIVE"
|
435 |
net1.state = 'ACTIVE'
|
436 |
net1.save() |
437 |
bn2 = mfactory.BackendNetworkFactory(operstate='ACTIVE',
|
438 |
network=net1) |
439 |
msg = self.create_msg(operation='OP_NETWORK_DISCONNECT', |
440 |
network=net1.backend_id, |
441 |
cluster=bn2.backend.clustername) |
442 |
update_network(client, msg) |
443 |
self.assertTrue(client.basic_ack.called)
|
444 |
self.assertEqual(Network.objects.get(id=net1.id).state, 'ACTIVE') |
445 |
self.assertEqual(BackendNetwork.objects.get(id=bn2.id).operstate,
|
446 |
'PENDING')
|
447 |
|
448 |
def test_remove(self, client): |
449 |
mfactory.MacPrefixPoolTableFactory() |
450 |
mfactory.BridgePoolTableFactory() |
451 |
bn = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
452 |
for old_state in ['success', 'canceled', 'error']: |
453 |
for flavor in Network.FLAVORS.keys(): |
454 |
bn.operstate = old_state |
455 |
bn.save() |
456 |
net = bn.network |
457 |
net.state = 'ACTIVE'
|
458 |
net.flavor = flavor |
459 |
if flavor == 'PHYSICAL_VLAN': |
460 |
net.link = allocate_resource('bridge')
|
461 |
if flavor == 'MAC_FILTERED': |
462 |
net.mac_prefix = allocate_resource('mac_prefix')
|
463 |
net.save() |
464 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
465 |
network=net.backend_id, |
466 |
cluster=bn.backend.clustername) |
467 |
with mocked_quotaholder():
|
468 |
update_network(client, msg) |
469 |
self.assertTrue(client.basic_ack.called)
|
470 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
471 |
self.assertEqual(db_bnet.operstate,
|
472 |
'DELETED')
|
473 |
db_net = Network.objects.get(id=net.id) |
474 |
self.assertEqual(db_net.state, 'DELETED', flavor) |
475 |
self.assertTrue(db_net.deleted)
|
476 |
if flavor == 'PHYSICAL_VLAN': |
477 |
pool = BridgePoolTable.get_pool() |
478 |
self.assertTrue(pool.is_available(net.link))
|
479 |
if flavor == 'MAC_FILTERED': |
480 |
pool = MacPrefixPoolTable.get_pool() |
481 |
self.assertTrue(pool.is_available(net.mac_prefix))
|
482 |
|
483 |
def test_remove_offline_backend(self, client): |
484 |
"""Test network removing when a backend is offline"""
|
485 |
mfactory.BridgePoolTableFactory() |
486 |
net = mfactory.NetworkFactory(flavor='PHYSICAL_VLAN',
|
487 |
state='ACTIVE',
|
488 |
link='prv12')
|
489 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
490 |
mfactory.BackendNetworkFactory(network=net, |
491 |
operstate="ACTIVE",
|
492 |
backend__offline=True)
|
493 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
494 |
network=net.backend_id, |
495 |
cluster=bn1.backend.clustername) |
496 |
with mocked_quotaholder():
|
497 |
update_network(client, msg) |
498 |
self.assertTrue(client.basic_ack.called)
|
499 |
new_net = Network.objects.get(id=net.id) |
500 |
self.assertEqual(new_net.state, 'ACTIVE') |
501 |
self.assertFalse(new_net.deleted)
|
502 |
|
503 |
def test_error_opcode(self, client): |
504 |
mfactory.MacPrefixPoolTableFactory() |
505 |
mfactory.BridgePoolTableFactory() |
506 |
for state, _ in Network.OPER_STATES: |
507 |
bn = mfactory.BackendNetworkFactory(operstate="ACTIVE")
|
508 |
bn.operstate = state |
509 |
bn.save() |
510 |
network = bn.network |
511 |
network.state = state |
512 |
network.save() |
513 |
for opcode, _ in BackendNetwork.BACKEND_OPCODES: |
514 |
if opcode in ['OP_NETWORK_REMOVE', 'OP_NETWORK_ADD']: |
515 |
continue
|
516 |
msg = self.create_msg(operation=opcode,
|
517 |
network=bn.network.backend_id, |
518 |
status='error',
|
519 |
add_reserved_ips=[], |
520 |
remove_reserved_ips=[], |
521 |
cluster=bn.backend.clustername) |
522 |
with mocked_quotaholder():
|
523 |
update_network(client, msg) |
524 |
self.assertTrue(client.basic_ack.called)
|
525 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
526 |
self.assertEqual(bn.operstate, db_bnet.operstate)
|
527 |
self.assertEqual(bn.network.state, db_bnet.network.state)
|
528 |
|
529 |
def test_ips(self, client): |
530 |
network = mfactory.NetworkFactory(subnet='10.0.0.0/24')
|
531 |
bn = mfactory.BackendNetworkFactory(network=network) |
532 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
533 |
network=network.backend_id, |
534 |
cluster=bn.backend.clustername, |
535 |
status='success',
|
536 |
add_reserved_ips=['10.0.0.10', '10.0.0.20'], |
537 |
remove_reserved_ips=[]) |
538 |
update_network(client, msg) |
539 |
self.assertTrue(client.basic_ack.called)
|
540 |
pool = network.get_pool() |
541 |
self.assertTrue(pool.is_reserved('10.0.0.10')) |
542 |
self.assertTrue(pool.is_reserved('10.0.0.20')) |
543 |
pool.save() |
544 |
# Release them
|
545 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
546 |
network=network.backend_id, |
547 |
cluster=bn.backend.clustername, |
548 |
add_reserved_ips=[], |
549 |
remove_reserved_ips=['10.0.0.10', '10.0.0.20']) |
550 |
update_network(client, msg) |
551 |
self.assertTrue(client.basic_ack.called)
|
552 |
pool = network.get_pool() |
553 |
self.assertFalse(pool.is_reserved('10.0.0.10')) |
554 |
self.assertFalse(pool.is_reserved('10.0.0.20')) |
555 |
|
556 |
|
557 |
@patch('synnefo.lib.amqp.AMQPClient') |
558 |
class UpdateBuildProgressTest(TestCase): |
559 |
def setUp(self): |
560 |
self.vm = mfactory.VirtualMachineFactory()
|
561 |
|
562 |
def get_db_vm(self): |
563 |
return VirtualMachine.objects.get(id=self.vm.id) |
564 |
|
565 |
def create_msg(self, **kwargs): |
566 |
"""Create snf-progress-monitor message"""
|
567 |
msg = {'event_time': split_time(time())}
|
568 |
msg['type'] = 'image-copy-progress' |
569 |
msg['progress'] = 0 |
570 |
for key, val in kwargs.items(): |
571 |
msg[key] = val |
572 |
message = {'body': json.dumps(msg)}
|
573 |
return message
|
574 |
|
575 |
def test_missing_attribute(self, client): |
576 |
update_build_progress(client, json.dumps({'body': {}}))
|
577 |
self.assertTrue(client.basic_reject.called)
|
578 |
|
579 |
def test_unhandled_exception(self, client): |
580 |
update_build_progress(client, {}) |
581 |
client.basic_reject.assert_called_once() |
582 |
|
583 |
def test_missing_instance(self, client): |
584 |
msg = self.create_msg(instance='foo') |
585 |
update_build_progress(client, msg) |
586 |
self.assertTrue(client.basic_ack.called)
|
587 |
|
588 |
def test_wrong_type(self, client): |
589 |
msg = self.create_msg(type="WRONG_TYPE") |
590 |
update_build_progress(client, msg) |
591 |
self.assertTrue(client.basic_nack.called)
|
592 |
|
593 |
def test_progress_update(self, client): |
594 |
rprogress = randint(10, 100) |
595 |
msg = self.create_msg(progress=rprogress,
|
596 |
instance=self.vm.backend_vm_id)
|
597 |
update_build_progress(client, msg) |
598 |
self.assertTrue(client.basic_ack.called)
|
599 |
vm = self.get_db_vm()
|
600 |
self.assertEqual(vm.buildpercentage, rprogress)
|
601 |
|
602 |
def test_invalid_value(self, client): |
603 |
old = self.vm.buildpercentage
|
604 |
for rprogress in [0, -1, 'a']: |
605 |
msg = self.create_msg(progress=rprogress,
|
606 |
instance=self.vm.backend_vm_id)
|
607 |
update_build_progress(client, msg) |
608 |
self.assertTrue(client.basic_ack.called)
|
609 |
vm = self.get_db_vm()
|
610 |
self.assertEqual(vm.buildpercentage, old)
|
611 |
|
612 |
|
613 |
from synnefo.logic.reconciliation import VMState |
614 |
class ReconciliationTest(TestCase): |
615 |
def get_vm(self, operstate, deleted=False): |
616 |
flavor = mfactory.FlavorFactory(cpu=2, ram=1024) |
617 |
vm = mfactory.VirtualMachineFactory(deleted=deleted, flavor=flavor) |
618 |
vm.operstate = operstate |
619 |
vm.save() |
620 |
return vm
|
621 |
|
622 |
def test_get_servers_from_db(self): |
623 |
"""Test getting a dictionary from each server to its operstate"""
|
624 |
backend = 30000
|
625 |
vm1 = self.get_vm('STARTED') |
626 |
vm2 = self.get_vm('DESTROYED', deleted=True) |
627 |
vm3 = self.get_vm('STOPPED') |
628 |
self.assertEquals(reconciliation.get_servers_from_db(),
|
629 |
{vm1.id: VMState(state='STARTED', cpu=2, ram=1024, nics=[]), |
630 |
vm3.id: VMState(state='STOPPED', cpu=2, ram=1024, nics=[])} |
631 |
) |
632 |
|
633 |
def test_stale_servers_in_db(self): |
634 |
"""Test discovery of stale entries in DB"""
|
635 |
|
636 |
D = {1: None, 2: 'None', 3: None, 30000: 'BUILD', |
637 |
30002: 'None'} |
638 |
G = {1: True, 3: True, 30000: True} |
639 |
self.assertEquals(reconciliation.stale_servers_in_db(D, G),
|
640 |
set([2, 30002])) |
641 |
|
642 |
@patch("synnefo.db.models.get_rapi_client") |
643 |
def test_stale_building_vm(self, client): |
644 |
vm = mfactory.VirtualMachineFactory() |
645 |
vm.state = 'BUILD'
|
646 |
vm.backendjobid = 42
|
647 |
vm.save() |
648 |
D = {vm.id: 'BUILD'}
|
649 |
G = {} |
650 |
for status in ['queued', 'waiting', 'running']: |
651 |
client.return_value.GetJobStatus.return_value = {'status': status}
|
652 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G), set([])) |
653 |
client.return_value.GetJobStatus\ |
654 |
.assert_called_once_with(vm.backendjobid) |
655 |
client.reset_mock() |
656 |
for status in ['success', 'error', 'canceled']: |
657 |
client.return_value.GetJobStatus.return_value = {'status': status}
|
658 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G), set([])) |
659 |
client.return_value.GetInstance\ |
660 |
.assert_called_once_with(vm.backend_vm_id) |
661 |
client.return_value.GetJobStatus\ |
662 |
.assert_called_once_with(vm.backendjobid) |
663 |
client.reset_mock() |
664 |
from synnefo.logic.rapi import GanetiApiError |
665 |
client.return_value.GetJobStatus.side_effect = GanetiApiError('Foo')
|
666 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G),
|
667 |
set([vm.id]))
|
668 |
|
669 |
def test_orphan_instances_in_ganeti(self): |
670 |
"""Test discovery of orphan instances in Ganeti, without a DB entry"""
|
671 |
|
672 |
G = {1: True, 2: False, 3: False, 4: True, 50: True} |
673 |
D = {1: True, 3: False} |
674 |
self.assertEquals(reconciliation.orphan_instances_in_ganeti(D, G),
|
675 |
set([2, 4, 50])) |
676 |
|
677 |
def test_unsynced_operstate(self): |
678 |
"""Test discovery of unsynced operstate between the DB and Ganeti"""
|
679 |
mkstate = lambda state: VMState(state=state, cpu=1, ram=1024, nics=[]) |
680 |
vm1 = self.get_vm("STARTED") |
681 |
vm2 = self.get_vm("STARTED") |
682 |
vm3= self.get_vm("BUILD") |
683 |
vm4 = self.get_vm("STARTED") |
684 |
vm5 = self.get_vm("BUILD") |
685 |
|
686 |
D = {1: mkstate("STARTED"), 2: mkstate("STARTED"), 3: mkstate("BUILD"), |
687 |
4: mkstate("STARTED"), 50: mkstate("BUILD")} |
688 |
G = {vm1.id: mkstate(True), vm2.id: mkstate(False), |
689 |
vm4.id: mkstate(True), vm4.id: mkstate(False), |
690 |
vm5.id: mkstate(False)}
|
691 |
self.assertEquals(reconciliation.unsynced_operstate(D, G),
|
692 |
set([(vm2.id, "STARTED", False), |
693 |
(vm4.id, "STARTED", False)])) |
694 |
|
695 |
from synnefo.logic.test.rapi_pool_tests import * |
696 |
from synnefo.logic.test.utils_tests import * |