root / snf-cyclades-app / synnefo / logic / tests.py @ 72dea98f
History | View | Annotate | Download (36.2 kB)
1 |
# vim: set fileencoding=utf-8 :
|
---|---|
2 |
# Copyright 2012 GRNET S.A. All rights reserved.
|
3 |
#
|
4 |
# Redistribution and use in source and binary forms, with or without
|
5 |
# modification, are permitted provided that the following conditions
|
6 |
# are met:
|
7 |
#
|
8 |
# 1. Redistributions of source code must retain the above copyright
|
9 |
# notice, this list of conditions and the following disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above copyright
|
12 |
# notice, this list of conditions and the following disclaimer in the
|
13 |
# documentation and/or other materials provided with the distribution.
|
14 |
#
|
15 |
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16 |
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19 |
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20 |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21 |
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22 |
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24 |
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25 |
# SUCH DAMAGE.
|
26 |
#
|
27 |
# The views and conclusions contained in the software and documentation are
|
28 |
# those of the authors and should not be interpreted as representing official
|
29 |
# policies, either expressed or implied, of GRNET S.A.
|
30 |
|
31 |
# Provides automated tests for logic module
|
32 |
|
33 |
from random import randint |
34 |
|
35 |
from django.test import TestCase |
36 |
|
37 |
from snf_django.lib.api import faults |
38 |
from synnefo.db.models import * |
39 |
from synnefo.db import models_factory as mfactory |
40 |
from synnefo.logic import reconciliation, servers |
41 |
from synnefo.lib.utils import split_time |
42 |
from datetime import datetime |
43 |
from mock import patch |
44 |
from synnefo.api.util import allocate_resource |
45 |
from synnefo.logic.callbacks import (update_db, update_network, |
46 |
update_build_progress) |
47 |
from snf_django.utils.testing import mocked_quotaholder |
48 |
|
49 |
now = datetime.now |
50 |
from time import time |
51 |
import json |
52 |
|
53 |
|
54 |
@patch("synnefo.logic.rapi_pool.GanetiRapiClient") |
55 |
class ServerCommandTest(TestCase): |
56 |
def test_pending_task(self, mrapi): |
57 |
vm = mfactory.VirtualMachineFactory(task="REBOOT", task_job_id=1) |
58 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
59 |
vm = mfactory.VirtualMachineFactory(task="BUILD", task_job_id=1) |
60 |
self.assertRaises(faults.BuildInProgress, servers.start, vm)
|
61 |
# Assert always succeeds
|
62 |
vm = mfactory.VirtualMachineFactory(task="BUILD", task_job_id=1) |
63 |
mrapi().DeleteInstance.return_value = 1
|
64 |
with mocked_quotaholder():
|
65 |
servers.destroy(vm) |
66 |
vm = mfactory.VirtualMachineFactory(task="REBOOT", task_job_id=1) |
67 |
with mocked_quotaholder():
|
68 |
servers.destroy(vm) |
69 |
|
70 |
def test_deleted_vm(self, mrapi): |
71 |
vm = mfactory.VirtualMachineFactory(deleted=True)
|
72 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
73 |
|
74 |
def test_invalid_operstate_for_action(self, mrapi): |
75 |
vm = mfactory.VirtualMachineFactory(operstate="STARTED")
|
76 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
77 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
78 |
self.assertRaises(faults.BadRequest, servers.stop, vm)
|
79 |
vm = mfactory.VirtualMachineFactory(operstate="STARTED")
|
80 |
self.assertRaises(faults.BadRequest, servers.resize, vm)
|
81 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
82 |
self.assertRaises(faults.BadRequest, servers.stop, vm)
|
83 |
#test valid
|
84 |
mrapi().StartupInstance.return_value = 1
|
85 |
with mocked_quotaholder():
|
86 |
servers.start(vm) |
87 |
vm.task = None
|
88 |
vm.task_job_id = None
|
89 |
vm.save() |
90 |
mrapi().RebootInstance.return_value = 1
|
91 |
with mocked_quotaholder():
|
92 |
servers.reboot(vm, "HARD")
|
93 |
|
94 |
def test_commission(self, mrapi): |
95 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
96 |
# Still pending
|
97 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=200,
|
98 |
resolved=False,
|
99 |
pending=True)
|
100 |
serial = vm.serial |
101 |
mrapi().StartupInstance.return_value = 1
|
102 |
with mocked_quotaholder() as m: |
103 |
servers.start(vm) |
104 |
m.resolve_commissions.assert_called_once_with('', [],
|
105 |
[serial.serial]) |
106 |
self.assertTrue(m.issue_one_commission.called)
|
107 |
# Not pending, rejct
|
108 |
vm.task = None
|
109 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=400,
|
110 |
resolved=False,
|
111 |
pending=False,
|
112 |
accept=False)
|
113 |
serial = vm.serial |
114 |
mrapi().StartupInstance.return_value = 1
|
115 |
with mocked_quotaholder() as m: |
116 |
servers.start(vm) |
117 |
m.resolve_commissions.assert_called_once_with('', [],
|
118 |
[serial.serial]) |
119 |
self.assertTrue(m.issue_one_commission.called)
|
120 |
# Not pending, accept
|
121 |
vm.task = None
|
122 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=600,
|
123 |
resolved=False,
|
124 |
pending=False,
|
125 |
accept=True)
|
126 |
serial = vm.serial |
127 |
mrapi().StartupInstance.return_value = 1
|
128 |
with mocked_quotaholder() as m: |
129 |
servers.start(vm) |
130 |
m.resolve_commissions.assert_called_once_with('', [serial.serial],
|
131 |
[]) |
132 |
self.assertTrue(m.issue_one_commission.called)
|
133 |
|
134 |
mrapi().StartupInstance.side_effect = ValueError
|
135 |
vm.task = None
|
136 |
vm.serial = None
|
137 |
# Test reject if Ganeti erro
|
138 |
with mocked_quotaholder() as m: |
139 |
try:
|
140 |
servers.start(vm) |
141 |
except:
|
142 |
m.resolve_commissions.assert_called_once_with('', [],
|
143 |
[vm.serial.serial]) |
144 |
|
145 |
def test_task_after(self, mrapi): |
146 |
return
|
147 |
vm = mfactory.VirtualMachineFactory() |
148 |
mrapi().StartupInstance.return_value = 1
|
149 |
mrapi().ShutdownInstance.return_value = 2
|
150 |
mrapi().RebootInstance.return_value = 2
|
151 |
with mocked_quotaholder() as m: |
152 |
vm.task = None
|
153 |
vm.operstate = "STOPPED"
|
154 |
servers.start(vm) |
155 |
self.assertEqual(vm.task, "START") |
156 |
self.assertEqual(vm.task_job_id, 1) |
157 |
with mocked_quotaholder() as m: |
158 |
vm.task = None
|
159 |
vm.operstate = "STARTED"
|
160 |
servers.stop(vm) |
161 |
self.assertEqual(vm.task, "STOP") |
162 |
self.assertEqual(vm.task_job_id, 2) |
163 |
with mocked_quotaholder() as m: |
164 |
vm.task = None
|
165 |
servers.reboot(vm) |
166 |
self.assertEqual(vm.task, "REBOOT") |
167 |
self.assertEqual(vm.task_job_id, 3) |
168 |
|
169 |
|
170 |
|
171 |
## Test Callbacks
|
172 |
|
173 |
|
174 |
@patch('synnefo.lib.amqp.AMQPClient') |
175 |
class UpdateDBTest(TestCase): |
176 |
def create_msg(self, **kwargs): |
177 |
"""Create snf-ganeti-eventd message"""
|
178 |
msg = {'event_time': split_time(time())}
|
179 |
msg['type'] = 'ganeti-op-status' |
180 |
msg['status'] = 'success' |
181 |
msg['jobId'] = 1 |
182 |
msg['logmsg'] = 'Dummy Log' |
183 |
for key, val in kwargs.items(): |
184 |
msg[key] = val |
185 |
message = {'body': json.dumps(msg)}
|
186 |
return message
|
187 |
|
188 |
def test_missing_attribute(self, client): |
189 |
update_db(client, json.dumps({'body': {}}))
|
190 |
self.assertTrue(client.basic_reject.called)
|
191 |
|
192 |
def test_unhandled_exception(self, client): |
193 |
update_db(client, {}) |
194 |
client.basic_reject.assert_called_once() |
195 |
|
196 |
def test_missing_instance(self, client): |
197 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
198 |
instance='foo')
|
199 |
update_db(client, msg) |
200 |
self.assertTrue(client.basic_ack.called)
|
201 |
|
202 |
def test_wrong_type(self, client): |
203 |
msg = self.create_msg(type="WRONG_TYPE") |
204 |
update_db(client, msg) |
205 |
self.assertTrue(client.basic_nack.called)
|
206 |
|
207 |
def test_old_msg(self, client): |
208 |
from time import sleep |
209 |
from datetime import datetime |
210 |
old_time = time() |
211 |
sleep(0.01)
|
212 |
new_time = datetime.fromtimestamp(time()) |
213 |
vm = mfactory.VirtualMachineFactory(backendtime=new_time) |
214 |
vm.operstate = 'STOPPED'
|
215 |
vm.save() |
216 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
217 |
event_time=split_time(old_time), |
218 |
instance=vm.backend_vm_id) |
219 |
update_db(client, msg) |
220 |
self.assertTrue(client.basic_ack.called)
|
221 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
222 |
self.assertEquals(db_vm.operstate, "STOPPED") |
223 |
self.assertEquals(db_vm.backendtime, new_time)
|
224 |
|
225 |
def test_start(self, client): |
226 |
vm = mfactory.VirtualMachineFactory() |
227 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
228 |
instance=vm.backend_vm_id) |
229 |
with mocked_quotaholder():
|
230 |
update_db(client, msg) |
231 |
self.assertTrue(client.basic_ack.called)
|
232 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
233 |
self.assertEqual(db_vm.operstate, 'STARTED') |
234 |
|
235 |
def test_stop(self, client): |
236 |
vm = mfactory.VirtualMachineFactory() |
237 |
msg = self.create_msg(operation='OP_INSTANCE_SHUTDOWN', |
238 |
instance=vm.backend_vm_id) |
239 |
with mocked_quotaholder():
|
240 |
update_db(client, msg) |
241 |
self.assertTrue(client.basic_ack.called)
|
242 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
243 |
self.assertEqual(db_vm.operstate, 'STOPPED') |
244 |
|
245 |
def test_reboot(self, client): |
246 |
vm = mfactory.VirtualMachineFactory() |
247 |
msg = self.create_msg(operation='OP_INSTANCE_REBOOT', |
248 |
instance=vm.backend_vm_id) |
249 |
update_db(client, msg) |
250 |
self.assertTrue(client.basic_ack.called)
|
251 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
252 |
self.assertEqual(db_vm.operstate, 'STARTED') |
253 |
|
254 |
def test_remove(self, client): |
255 |
vm = mfactory.VirtualMachineFactory() |
256 |
# Also create a NIC
|
257 |
nic = mfactory.NetworkInterfaceFactory(machine=vm) |
258 |
nic.network.get_pool().reserve(nic.ipv4) |
259 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
260 |
instance=vm.backend_vm_id) |
261 |
with mocked_quotaholder():
|
262 |
update_db(client, msg) |
263 |
self.assertTrue(client.basic_ack.called)
|
264 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
265 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
266 |
self.assertTrue(db_vm.deleted)
|
267 |
# Check that nics are deleted
|
268 |
self.assertFalse(db_vm.nics.all())
|
269 |
self.assertTrue(nic.network.get_pool().is_available(nic.ipv4))
|
270 |
vm2 = mfactory.VirtualMachineFactory() |
271 |
network = mfactory.NetworkFactory() |
272 |
fp1 = mfactory.FloatingIPFactory(machine=vm2, network=network) |
273 |
fp2 = mfactory.FloatingIPFactory(machine=vm2, network=network) |
274 |
mfactory.NetworkInterfaceFactory(machine=vm2, network=network, |
275 |
ipv4=fp1.ipv4) |
276 |
mfactory.NetworkInterfaceFactory(machine=vm2, network=network, |
277 |
ipv4=fp2.ipv4) |
278 |
pool = network.get_pool() |
279 |
pool.reserve(fp1.ipv4) |
280 |
pool.reserve(fp2.ipv4) |
281 |
pool.save() |
282 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
283 |
instance=vm2.backend_vm_id) |
284 |
with mocked_quotaholder():
|
285 |
update_db(client, msg) |
286 |
client.basic_ack.assert_called_once() |
287 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
288 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
289 |
self.assertTrue(db_vm.deleted)
|
290 |
self.assertEqual(FloatingIP.objects.get(id=fp1.id).machine, None) |
291 |
self.assertEqual(FloatingIP.objects.get(id=fp2.id).machine, None) |
292 |
pool = network.get_pool() |
293 |
# Test that floating ips are not released
|
294 |
self.assertFalse(pool.is_available(fp1.ipv4))
|
295 |
self.assertFalse(pool.is_available(fp2.ipv4))
|
296 |
|
297 |
def test_create(self, client): |
298 |
vm = mfactory.VirtualMachineFactory() |
299 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
300 |
instance=vm.backend_vm_id) |
301 |
update_db(client, msg) |
302 |
self.assertTrue(client.basic_ack.called)
|
303 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
304 |
self.assertEqual(db_vm.operstate, 'STARTED') |
305 |
|
306 |
def test_create_error(self, client): |
307 |
"""Test that error create sets vm to ERROR state"""
|
308 |
vm = mfactory.VirtualMachineFactory() |
309 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
310 |
instance=vm.backend_vm_id, |
311 |
status='error')
|
312 |
update_db(client, msg) |
313 |
self.assertTrue(client.basic_ack.called)
|
314 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
315 |
self.assertEqual(db_vm.operstate, 'ERROR') |
316 |
|
317 |
def test_remove_from_error(self, client): |
318 |
"""Test that error removes delete error builds"""
|
319 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
320 |
# Also create a NIC
|
321 |
mfactory.NetworkInterfaceFactory(machine=vm) |
322 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
323 |
instance=vm.backend_vm_id) |
324 |
with mocked_quotaholder():
|
325 |
update_db(client, msg) |
326 |
self.assertTrue(client.basic_ack.called)
|
327 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
328 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
329 |
self.assertTrue(db_vm.deleted)
|
330 |
# Check that nics are deleted
|
331 |
self.assertFalse(db_vm.nics.all())
|
332 |
|
333 |
def test_other_error(self, client): |
334 |
"""Test that other error messages do no affect the VM"""
|
335 |
vm = mfactory.VirtualMachineFactory() |
336 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
337 |
instance=vm.backend_vm_id, |
338 |
status='error')
|
339 |
update_db(client, msg) |
340 |
self.assertTrue(client.basic_ack.called)
|
341 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
342 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
343 |
self.assertEqual(db_vm.backendtime, vm.backendtime)
|
344 |
|
345 |
def test_resize_msg(self, client): |
346 |
vm = mfactory.VirtualMachineFactory() |
347 |
# Test empty beparams
|
348 |
for status in ["success", "error"]: |
349 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
350 |
instance=vm.backend_vm_id, |
351 |
beparams={}, |
352 |
status=status) |
353 |
client.reset_mock() |
354 |
with mocked_quotaholder():
|
355 |
update_db(client, msg) |
356 |
self.assertTrue(client.basic_ack.called)
|
357 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
358 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
359 |
# Test intermediate states
|
360 |
for status in ["queued", "waiting", "running"]: |
361 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
362 |
instance=vm.backend_vm_id, |
363 |
beparams={"vcpus": 4, "minmem": 2048, |
364 |
"maxmem": 2048}, |
365 |
status=status) |
366 |
client.reset_mock() |
367 |
update_db(client, msg) |
368 |
self.assertTrue(client.basic_ack.called)
|
369 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
370 |
self.assertEqual(db_vm.operstate, "STOPPED") |
371 |
# Test operstate after error
|
372 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
373 |
instance=vm.backend_vm_id, |
374 |
beparams={"vcpus": 4}, |
375 |
status="error")
|
376 |
client.reset_mock() |
377 |
with mocked_quotaholder():
|
378 |
update_db(client, msg) |
379 |
self.assertTrue(client.basic_ack.called)
|
380 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
381 |
self.assertEqual(db_vm.operstate, "STOPPED") |
382 |
# Test success
|
383 |
f1 = mfactory.FlavorFactory(cpu=4, ram=1024, disk_template="drbd", |
384 |
disk=1024)
|
385 |
vm.flavor = f1 |
386 |
vm.save() |
387 |
f2 = mfactory.FlavorFactory(cpu=8, ram=2048, disk_template="drbd", |
388 |
disk=1024)
|
389 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
390 |
instance=vm.backend_vm_id, |
391 |
beparams={"vcpus": 8, "minmem": 2048, |
392 |
"maxmem": 2048}, |
393 |
status="success")
|
394 |
client.reset_mock() |
395 |
with mocked_quotaholder():
|
396 |
update_db(client, msg) |
397 |
self.assertTrue(client.basic_ack.called)
|
398 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
399 |
self.assertEqual(db_vm.operstate, "STOPPED") |
400 |
self.assertEqual(db_vm.flavor, f2)
|
401 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
402 |
instance=vm.backend_vm_id, |
403 |
beparams={"vcpus": 100, "minmem": 2048, |
404 |
"maxmem": 2048}, |
405 |
status="success")
|
406 |
client.reset_mock() |
407 |
with mocked_quotaholder():
|
408 |
update_db(client, msg) |
409 |
self.assertTrue(client.basic_reject.called)
|
410 |
|
411 |
|
412 |
@patch('synnefo.lib.amqp.AMQPClient') |
413 |
class UpdateNetTest(TestCase): |
414 |
def create_msg(self, **kwargs): |
415 |
"""Create snf-ganeti-hook message"""
|
416 |
msg = {'event_time': split_time(time())}
|
417 |
msg['type'] = 'ganeti-op-status' |
418 |
msg['operation'] = 'OP_INSTANCE_SET_PARAMS' |
419 |
msg['status'] = 'success' |
420 |
msg['jobId'] = 1 |
421 |
msg['logmsg'] = 'Dummy Log' |
422 |
for key, val in kwargs.items(): |
423 |
msg[key] = val |
424 |
message = {'body': json.dumps(msg)}
|
425 |
return message
|
426 |
|
427 |
def test_missing_attribute(self, client): |
428 |
update_db(client, json.dumps({'body': {}}))
|
429 |
self.assertTrue(client.basic_reject.called)
|
430 |
|
431 |
def test_unhandled_exception(self, client): |
432 |
update_db(client, {}) |
433 |
client.basic_reject.assert_called_once() |
434 |
|
435 |
def test_wrong_type(self, client): |
436 |
msg = self.create_msg(type="WRONG_TYPE") |
437 |
update_db(client, msg) |
438 |
self.assertTrue(client.basic_nack.called)
|
439 |
|
440 |
def test_missing_instance(self, client): |
441 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
442 |
instance='foo')
|
443 |
update_db(client, msg) |
444 |
self.assertTrue(client.basic_ack.called)
|
445 |
|
446 |
def test_no_nics(self, client): |
447 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
448 |
mfactory.NetworkInterfaceFactory(machine=vm) |
449 |
mfactory.NetworkInterfaceFactory(machine=vm) |
450 |
mfactory.NetworkInterfaceFactory(machine=vm) |
451 |
self.assertEqual(len(vm.nics.all()), 3) |
452 |
msg = self.create_msg(nics=[],
|
453 |
instance=vm.backend_vm_id) |
454 |
update_db(client, msg) |
455 |
self.assertTrue(client.basic_ack.called)
|
456 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
457 |
self.assertEqual(len(db_vm.nics.all()), 0) |
458 |
|
459 |
def test_empty_nic(self, client): |
460 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
461 |
for public in [True, False]: |
462 |
net = mfactory.NetworkFactory(public=public) |
463 |
msg = self.create_msg(nics=[{'network': net.backend_id}], |
464 |
instance=vm.backend_vm_id) |
465 |
update_db(client, msg) |
466 |
self.assertTrue(client.basic_ack.called)
|
467 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
468 |
nics = db_vm.nics.all() |
469 |
self.assertEqual(len(nics), 1) |
470 |
self.assertEqual(nics[0].index, 0) |
471 |
self.assertEqual(nics[0].ipv4, '') |
472 |
self.assertEqual(nics[0].ipv6, '') |
473 |
self.assertEqual(nics[0].mac, '') |
474 |
if public:
|
475 |
self.assertEqual(nics[0].firewall_profile, |
476 |
settings.DEFAULT_FIREWALL_PROFILE) |
477 |
else:
|
478 |
self.assertEqual(nics[0].firewall_profile, '') |
479 |
|
480 |
def test_full_nic(self, client): |
481 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
482 |
net = mfactory.NetworkFactory(subnet='10.0.0.0/24')
|
483 |
pool = net.get_pool() |
484 |
self.assertTrue(pool.is_available('10.0.0.22')) |
485 |
pool.save() |
486 |
msg = self.create_msg(nics=[{'network': net.backend_id, |
487 |
'ip': '10.0.0.22', |
488 |
'mac': 'aa:bb:cc:00:11:22'}], |
489 |
instance=vm.backend_vm_id) |
490 |
update_db(client, msg) |
491 |
self.assertTrue(client.basic_ack.called)
|
492 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
493 |
nics = db_vm.nics.all() |
494 |
self.assertEqual(len(nics), 1) |
495 |
self.assertEqual(nics[0].index, 0) |
496 |
self.assertEqual(nics[0].ipv4, '10.0.0.22') |
497 |
self.assertEqual(nics[0].ipv6, '') |
498 |
self.assertEqual(nics[0].mac, 'aa:bb:cc:00:11:22') |
499 |
pool = net.get_pool() |
500 |
self.assertFalse(pool.is_available('10.0.0.22')) |
501 |
pool.save() |
502 |
|
503 |
|
504 |
@patch('synnefo.lib.amqp.AMQPClient') |
505 |
class UpdateNetworkTest(TestCase): |
506 |
def create_msg(self, **kwargs): |
507 |
"""Create snf-ganeti-eventd message"""
|
508 |
msg = {'event_time': split_time(time())}
|
509 |
msg['type'] = 'ganeti-network-status' |
510 |
msg['status'] = 'success' |
511 |
msg['jobId'] = 1 |
512 |
msg['logmsg'] = 'Dummy Log' |
513 |
for key, val in kwargs.items(): |
514 |
msg[key] = val |
515 |
message = {'body': json.dumps(msg)}
|
516 |
return message
|
517 |
|
518 |
def test_missing_attribute(self, client): |
519 |
update_network(client, json.dumps({'body': {}}))
|
520 |
self.assertTrue(client.basic_reject.called)
|
521 |
|
522 |
def test_unhandled_exception(self, client): |
523 |
update_network(client, {}) |
524 |
client.basic_reject.assert_called_once() |
525 |
|
526 |
def test_wrong_type(self, client): |
527 |
msg = self.create_msg(type="WRONG_TYPE") |
528 |
update_network(client, msg) |
529 |
self.assertTrue(client.basic_nack.called)
|
530 |
|
531 |
def test_missing_network(self, client): |
532 |
msg = self.create_msg(operation='OP_NETWORK_CREATE', |
533 |
network='foo')
|
534 |
update_network(client, msg) |
535 |
self.assertTrue(client.basic_ack.called)
|
536 |
|
537 |
def test_create(self, client): |
538 |
back_network = mfactory.BackendNetworkFactory(operstate='PENDING')
|
539 |
net = back_network.network |
540 |
net.state = 'ACTIVE'
|
541 |
net.save() |
542 |
back1 = back_network.backend |
543 |
|
544 |
back_network2 = mfactory.BackendNetworkFactory(operstate='PENDING',
|
545 |
network=net) |
546 |
back2 = back_network2.backend |
547 |
# Message from first backend network
|
548 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
549 |
network=net.backend_id, |
550 |
cluster=back1.clustername) |
551 |
update_network(client, msg) |
552 |
self.assertTrue(client.basic_ack.called)
|
553 |
|
554 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
555 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
556 |
db_net = Network.objects.get(id=net.id) |
557 |
self.assertEqual(db_net.state, 'ACTIVE') |
558 |
# msg from second backend network
|
559 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
560 |
network=net.backend_id, |
561 |
cluster=back2.clustername) |
562 |
update_network(client, msg) |
563 |
self.assertTrue(client.basic_ack.called)
|
564 |
|
565 |
db_net = Network.objects.get(id=net.id) |
566 |
self.assertEqual(db_net.state, 'ACTIVE') |
567 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
568 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
569 |
|
570 |
def test_create_offline_backend(self, client): |
571 |
"""Test network creation when a backend is offline"""
|
572 |
net = mfactory.NetworkFactory(state='ACTIVE')
|
573 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
574 |
bn2 = mfactory.BackendNetworkFactory(network=net, |
575 |
backend__offline=True)
|
576 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
577 |
network=net.backend_id, |
578 |
cluster=bn1.backend.clustername) |
579 |
update_network(client, msg) |
580 |
self.assertTrue(client.basic_ack.called)
|
581 |
new_net = Network.objects.get(id=net.id) |
582 |
self.assertEqual(new_net.state, 'ACTIVE') |
583 |
|
584 |
def test_disconnect(self, client): |
585 |
bn1 = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
586 |
net1 = bn1.network |
587 |
net1.state = "ACTIVE"
|
588 |
net1.state = 'ACTIVE'
|
589 |
net1.save() |
590 |
bn2 = mfactory.BackendNetworkFactory(operstate='ACTIVE',
|
591 |
network=net1) |
592 |
msg = self.create_msg(operation='OP_NETWORK_DISCONNECT', |
593 |
network=net1.backend_id, |
594 |
cluster=bn2.backend.clustername) |
595 |
update_network(client, msg) |
596 |
self.assertTrue(client.basic_ack.called)
|
597 |
self.assertEqual(Network.objects.get(id=net1.id).state, 'ACTIVE') |
598 |
self.assertEqual(BackendNetwork.objects.get(id=bn2.id).operstate,
|
599 |
'PENDING')
|
600 |
|
601 |
def test_remove(self, client): |
602 |
mfactory.MacPrefixPoolTableFactory() |
603 |
mfactory.BridgePoolTableFactory() |
604 |
bn = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
605 |
for old_state in ['success', 'canceled', 'error']: |
606 |
for flavor in Network.FLAVORS.keys(): |
607 |
bn.operstate = old_state |
608 |
bn.save() |
609 |
net = bn.network |
610 |
net.state = 'ACTIVE'
|
611 |
net.flavor = flavor |
612 |
if flavor == 'PHYSICAL_VLAN': |
613 |
net.link = allocate_resource('bridge')
|
614 |
if flavor == 'MAC_FILTERED': |
615 |
net.mac_prefix = allocate_resource('mac_prefix')
|
616 |
net.save() |
617 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
618 |
network=net.backend_id, |
619 |
cluster=bn.backend.clustername) |
620 |
with mocked_quotaholder():
|
621 |
update_network(client, msg) |
622 |
self.assertTrue(client.basic_ack.called)
|
623 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
624 |
self.assertEqual(db_bnet.operstate,
|
625 |
'DELETED')
|
626 |
db_net = Network.objects.get(id=net.id) |
627 |
self.assertEqual(db_net.state, 'DELETED', flavor) |
628 |
self.assertTrue(db_net.deleted)
|
629 |
if flavor == 'PHYSICAL_VLAN': |
630 |
pool = BridgePoolTable.get_pool() |
631 |
self.assertTrue(pool.is_available(net.link))
|
632 |
if flavor == 'MAC_FILTERED': |
633 |
pool = MacPrefixPoolTable.get_pool() |
634 |
self.assertTrue(pool.is_available(net.mac_prefix))
|
635 |
|
636 |
def test_remove_offline_backend(self, client): |
637 |
"""Test network removing when a backend is offline"""
|
638 |
mfactory.BridgePoolTableFactory() |
639 |
net = mfactory.NetworkFactory(flavor='PHYSICAL_VLAN',
|
640 |
state='ACTIVE',
|
641 |
link='prv12')
|
642 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
643 |
mfactory.BackendNetworkFactory(network=net, |
644 |
operstate="ACTIVE",
|
645 |
backend__offline=True)
|
646 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
647 |
network=net.backend_id, |
648 |
cluster=bn1.backend.clustername) |
649 |
with mocked_quotaholder():
|
650 |
update_network(client, msg) |
651 |
self.assertTrue(client.basic_ack.called)
|
652 |
new_net = Network.objects.get(id=net.id) |
653 |
self.assertEqual(new_net.state, 'ACTIVE') |
654 |
self.assertFalse(new_net.deleted)
|
655 |
|
656 |
def test_error_opcode(self, client): |
657 |
mfactory.MacPrefixPoolTableFactory() |
658 |
mfactory.BridgePoolTableFactory() |
659 |
for state, _ in Network.OPER_STATES: |
660 |
bn = mfactory.BackendNetworkFactory(operstate="ACTIVE")
|
661 |
bn.operstate = state |
662 |
bn.save() |
663 |
network = bn.network |
664 |
network.state = state |
665 |
network.save() |
666 |
for opcode, _ in BackendNetwork.BACKEND_OPCODES: |
667 |
if opcode in ['OP_NETWORK_REMOVE', 'OP_NETWORK_ADD']: |
668 |
continue
|
669 |
msg = self.create_msg(operation=opcode,
|
670 |
network=bn.network.backend_id, |
671 |
status='error',
|
672 |
add_reserved_ips=[], |
673 |
remove_reserved_ips=[], |
674 |
cluster=bn.backend.clustername) |
675 |
with mocked_quotaholder():
|
676 |
update_network(client, msg) |
677 |
self.assertTrue(client.basic_ack.called)
|
678 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
679 |
self.assertEqual(bn.operstate, db_bnet.operstate)
|
680 |
self.assertEqual(bn.network.state, db_bnet.network.state)
|
681 |
|
682 |
def test_ips(self, client): |
683 |
network = mfactory.NetworkFactory(subnet='10.0.0.0/24')
|
684 |
bn = mfactory.BackendNetworkFactory(network=network) |
685 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
686 |
network=network.backend_id, |
687 |
cluster=bn.backend.clustername, |
688 |
status='success',
|
689 |
add_reserved_ips=['10.0.0.10', '10.0.0.20'], |
690 |
remove_reserved_ips=[]) |
691 |
update_network(client, msg) |
692 |
self.assertTrue(client.basic_ack.called)
|
693 |
pool = network.get_pool() |
694 |
self.assertTrue(pool.is_reserved('10.0.0.10')) |
695 |
self.assertTrue(pool.is_reserved('10.0.0.20')) |
696 |
pool.save() |
697 |
# Release them
|
698 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
699 |
network=network.backend_id, |
700 |
cluster=bn.backend.clustername, |
701 |
add_reserved_ips=[], |
702 |
remove_reserved_ips=['10.0.0.10', '10.0.0.20']) |
703 |
update_network(client, msg) |
704 |
self.assertTrue(client.basic_ack.called)
|
705 |
pool = network.get_pool() |
706 |
self.assertFalse(pool.is_reserved('10.0.0.10')) |
707 |
self.assertFalse(pool.is_reserved('10.0.0.20')) |
708 |
|
709 |
|
710 |
@patch('synnefo.lib.amqp.AMQPClient') |
711 |
class UpdateBuildProgressTest(TestCase): |
712 |
def setUp(self): |
713 |
self.vm = mfactory.VirtualMachineFactory()
|
714 |
|
715 |
def get_db_vm(self): |
716 |
return VirtualMachine.objects.get(id=self.vm.id) |
717 |
|
718 |
def create_msg(self, **kwargs): |
719 |
"""Create snf-progress-monitor message"""
|
720 |
msg = {'event_time': split_time(time())}
|
721 |
msg['type'] = 'image-copy-progress' |
722 |
msg['progress'] = 0 |
723 |
for key, val in kwargs.items(): |
724 |
msg[key] = val |
725 |
message = {'body': json.dumps(msg)}
|
726 |
return message
|
727 |
|
728 |
def test_missing_attribute(self, client): |
729 |
update_build_progress(client, json.dumps({'body': {}}))
|
730 |
self.assertTrue(client.basic_reject.called)
|
731 |
|
732 |
def test_unhandled_exception(self, client): |
733 |
update_build_progress(client, {}) |
734 |
client.basic_reject.assert_called_once() |
735 |
|
736 |
def test_missing_instance(self, client): |
737 |
msg = self.create_msg(instance='foo') |
738 |
update_build_progress(client, msg) |
739 |
self.assertTrue(client.basic_ack.called)
|
740 |
|
741 |
def test_wrong_type(self, client): |
742 |
msg = self.create_msg(type="WRONG_TYPE") |
743 |
update_build_progress(client, msg) |
744 |
self.assertTrue(client.basic_nack.called)
|
745 |
|
746 |
def test_progress_update(self, client): |
747 |
rprogress = randint(10, 100) |
748 |
msg = self.create_msg(progress=rprogress,
|
749 |
instance=self.vm.backend_vm_id)
|
750 |
update_build_progress(client, msg) |
751 |
self.assertTrue(client.basic_ack.called)
|
752 |
vm = self.get_db_vm()
|
753 |
self.assertEqual(vm.buildpercentage, rprogress)
|
754 |
|
755 |
def test_invalid_value(self, client): |
756 |
old = self.vm.buildpercentage
|
757 |
for rprogress in [0, -1, 'a']: |
758 |
msg = self.create_msg(progress=rprogress,
|
759 |
instance=self.vm.backend_vm_id)
|
760 |
update_build_progress(client, msg) |
761 |
self.assertTrue(client.basic_ack.called)
|
762 |
vm = self.get_db_vm()
|
763 |
self.assertEqual(vm.buildpercentage, old)
|
764 |
|
765 |
|
766 |
from synnefo.logic.reconciliation import VMState |
767 |
class ReconciliationTest(TestCase): |
768 |
def get_vm(self, operstate, deleted=False): |
769 |
flavor = mfactory.FlavorFactory(cpu=2, ram=1024) |
770 |
vm = mfactory.VirtualMachineFactory(deleted=deleted, flavor=flavor) |
771 |
vm.operstate = operstate |
772 |
vm.save() |
773 |
return vm
|
774 |
|
775 |
def test_get_servers_from_db(self): |
776 |
"""Test getting a dictionary from each server to its operstate"""
|
777 |
backend = 30000
|
778 |
vm1 = self.get_vm('STARTED') |
779 |
vm2 = self.get_vm('DESTROYED', deleted=True) |
780 |
vm3 = self.get_vm('STOPPED') |
781 |
self.assertEquals(reconciliation.get_servers_from_db(),
|
782 |
{vm1.id: VMState(state='STARTED', cpu=2, ram=1024, nics=[]), |
783 |
vm3.id: VMState(state='STOPPED', cpu=2, ram=1024, nics=[])} |
784 |
) |
785 |
|
786 |
def test_stale_servers_in_db(self): |
787 |
"""Test discovery of stale entries in DB"""
|
788 |
|
789 |
D = {1: None, 2: 'None', 3: None, 30000: 'BUILD', |
790 |
30002: 'None'} |
791 |
G = {1: True, 3: True, 30000: True} |
792 |
self.assertEquals(reconciliation.stale_servers_in_db(D, G),
|
793 |
set([2, 30002])) |
794 |
|
795 |
@patch("synnefo.db.models.get_rapi_client") |
796 |
def test_stale_building_vm(self, client): |
797 |
vm = mfactory.VirtualMachineFactory() |
798 |
vm.state = 'BUILD'
|
799 |
vm.backendjobid = 42
|
800 |
vm.save() |
801 |
D = {vm.id: 'BUILD'}
|
802 |
G = {} |
803 |
for status in ['queued', 'waiting', 'running']: |
804 |
client.return_value.GetJobStatus.return_value = {'status': status}
|
805 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G), set([])) |
806 |
client.return_value.GetJobStatus\ |
807 |
.assert_called_once_with(vm.backendjobid) |
808 |
client.reset_mock() |
809 |
for status in ['success', 'error', 'canceled']: |
810 |
client.return_value.GetJobStatus.return_value = {'status': status}
|
811 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G), set([])) |
812 |
client.return_value.GetInstance\ |
813 |
.assert_called_once_with(vm.backend_vm_id) |
814 |
client.return_value.GetJobStatus\ |
815 |
.assert_called_once_with(vm.backendjobid) |
816 |
client.reset_mock() |
817 |
from synnefo.logic.rapi import GanetiApiError |
818 |
client.return_value.GetJobStatus.side_effect = GanetiApiError('Foo')
|
819 |
self.assertEqual(reconciliation.stale_servers_in_db(D, G),
|
820 |
set([vm.id]))
|
821 |
|
822 |
def test_orphan_instances_in_ganeti(self): |
823 |
"""Test discovery of orphan instances in Ganeti, without a DB entry"""
|
824 |
|
825 |
G = {1: True, 2: False, 3: False, 4: True, 50: True} |
826 |
D = {1: True, 3: False} |
827 |
self.assertEquals(reconciliation.orphan_instances_in_ganeti(D, G),
|
828 |
set([2, 4, 50])) |
829 |
|
830 |
def test_unsynced_operstate(self): |
831 |
"""Test discovery of unsynced operstate between the DB and Ganeti"""
|
832 |
mkstate = lambda state: VMState(state=state, cpu=1, ram=1024, nics=[]) |
833 |
vm1 = self.get_vm("STARTED") |
834 |
vm2 = self.get_vm("STARTED") |
835 |
vm3= self.get_vm("BUILD") |
836 |
vm4 = self.get_vm("STARTED") |
837 |
vm5 = self.get_vm("BUILD") |
838 |
|
839 |
D = {1: mkstate("STARTED"), 2: mkstate("STARTED"), 3: mkstate("BUILD"), |
840 |
4: mkstate("STARTED"), 50: mkstate("BUILD")} |
841 |
G = {vm1.id: mkstate(True), vm2.id: mkstate(False), |
842 |
vm4.id: mkstate(True), vm4.id: mkstate(False), |
843 |
vm5.id: mkstate(False)}
|
844 |
self.assertEquals(reconciliation.unsynced_operstate(D, G),
|
845 |
set([(vm2.id, "STARTED", False), |
846 |
(vm4.id, "STARTED", False)])) |
847 |
|
848 |
from synnefo.logic.test.rapi_pool_tests import * |
849 |
from synnefo.logic.test.utils_tests import * |