root / snf-cyclades-app / synnefo / logic / tests.py @ 5962d9a6
History | View | Annotate | Download (41.1 kB)
1 |
# vim: set fileencoding=utf-8 :
|
---|---|
2 |
# Copyright 2012 GRNET S.A. All rights reserved.
|
3 |
#
|
4 |
# Redistribution and use in source and binary forms, with or without
|
5 |
# modification, are permitted provided that the following conditions
|
6 |
# are met:
|
7 |
#
|
8 |
# 1. Redistributions of source code must retain the above copyright
|
9 |
# notice, this list of conditions and the following disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above copyright
|
12 |
# notice, this list of conditions and the following disclaimer in the
|
13 |
# documentation and/or other materials provided with the distribution.
|
14 |
#
|
15 |
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16 |
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19 |
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20 |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21 |
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22 |
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24 |
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25 |
# SUCH DAMAGE.
|
26 |
#
|
27 |
# The views and conclusions contained in the software and documentation are
|
28 |
# those of the authors and should not be interpreted as representing official
|
29 |
# policies, either expressed or implied, of GRNET S.A.
|
30 |
|
31 |
# Provides automated tests for logic module
|
32 |
|
33 |
from random import randint |
34 |
|
35 |
from django.test import TestCase |
36 |
|
37 |
from snf_django.lib.api import faults |
38 |
from synnefo.db.models import * |
39 |
from synnefo.db import models_factory as mfactory |
40 |
from synnefo.logic import reconciliation, servers |
41 |
from synnefo.lib.utils import split_time |
42 |
from datetime import datetime |
43 |
from mock import patch |
44 |
from synnefo.api.util import allocate_resource |
45 |
from synnefo.logic.callbacks import (update_db, update_network, |
46 |
update_build_progress) |
47 |
from snf_django.utils.testing import mocked_quotaholder |
48 |
|
49 |
now = datetime.now |
50 |
from time import time |
51 |
import json |
52 |
|
53 |
|
54 |
@patch("synnefo.logic.rapi_pool.GanetiRapiClient") |
55 |
class ServerCommandTest(TestCase): |
56 |
def test_pending_task(self, mrapi): |
57 |
vm = mfactory.VirtualMachineFactory(task="REBOOT", task_job_id=1) |
58 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
59 |
vm = mfactory.VirtualMachineFactory(task="BUILD", task_job_id=1) |
60 |
self.assertRaises(faults.BuildInProgress, servers.start, vm)
|
61 |
# Assert always succeeds
|
62 |
vm = mfactory.VirtualMachineFactory(task="BUILD", task_job_id=1) |
63 |
mrapi().DeleteInstance.return_value = 1
|
64 |
with mocked_quotaholder():
|
65 |
servers.destroy(vm) |
66 |
vm = mfactory.VirtualMachineFactory(task="REBOOT", task_job_id=1) |
67 |
with mocked_quotaholder():
|
68 |
servers.destroy(vm) |
69 |
|
70 |
def test_deleted_vm(self, mrapi): |
71 |
vm = mfactory.VirtualMachineFactory(deleted=True)
|
72 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
73 |
|
74 |
def test_invalid_operstate_for_action(self, mrapi): |
75 |
vm = mfactory.VirtualMachineFactory(operstate="STARTED")
|
76 |
self.assertRaises(faults.BadRequest, servers.start, vm)
|
77 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
78 |
self.assertRaises(faults.BadRequest, servers.stop, vm)
|
79 |
vm = mfactory.VirtualMachineFactory(operstate="STARTED")
|
80 |
self.assertRaises(faults.BadRequest, servers.resize, vm)
|
81 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
82 |
self.assertRaises(faults.BadRequest, servers.stop, vm)
|
83 |
#test valid
|
84 |
mrapi().StartupInstance.return_value = 1
|
85 |
with mocked_quotaholder():
|
86 |
servers.start(vm) |
87 |
vm.task = None
|
88 |
vm.task_job_id = None
|
89 |
vm.save() |
90 |
mrapi().RebootInstance.return_value = 1
|
91 |
with mocked_quotaholder():
|
92 |
servers.reboot(vm, "HARD")
|
93 |
|
94 |
def test_commission(self, mrapi): |
95 |
vm = mfactory.VirtualMachineFactory(operstate="STOPPED")
|
96 |
# Still pending
|
97 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=200,
|
98 |
resolved=False,
|
99 |
pending=True)
|
100 |
serial = vm.serial |
101 |
mrapi().StartupInstance.return_value = 1
|
102 |
with mocked_quotaholder() as m: |
103 |
servers.start(vm) |
104 |
m.resolve_commissions.assert_called_once_with('', [],
|
105 |
[serial.serial]) |
106 |
self.assertTrue(m.issue_one_commission.called)
|
107 |
# Not pending, rejct
|
108 |
vm.task = None
|
109 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=400,
|
110 |
resolved=False,
|
111 |
pending=False,
|
112 |
accept=False)
|
113 |
serial = vm.serial |
114 |
mrapi().StartupInstance.return_value = 1
|
115 |
with mocked_quotaholder() as m: |
116 |
servers.start(vm) |
117 |
m.resolve_commissions.assert_called_once_with('', [],
|
118 |
[serial.serial]) |
119 |
self.assertTrue(m.issue_one_commission.called)
|
120 |
# Not pending, accept
|
121 |
vm.task = None
|
122 |
vm.serial = mfactory.QuotaHolderSerialFactory(serial=600,
|
123 |
resolved=False,
|
124 |
pending=False,
|
125 |
accept=True)
|
126 |
serial = vm.serial |
127 |
mrapi().StartupInstance.return_value = 1
|
128 |
with mocked_quotaholder() as m: |
129 |
servers.start(vm) |
130 |
m.resolve_commissions.assert_called_once_with('', [serial.serial],
|
131 |
[]) |
132 |
self.assertTrue(m.issue_one_commission.called)
|
133 |
|
134 |
mrapi().StartupInstance.side_effect = ValueError
|
135 |
vm.task = None
|
136 |
vm.serial = None
|
137 |
# Test reject if Ganeti erro
|
138 |
with mocked_quotaholder() as m: |
139 |
try:
|
140 |
servers.start(vm) |
141 |
except:
|
142 |
m.resolve_commissions\ |
143 |
.assert_called_once_with('', [], [vm.serial.serial])
|
144 |
|
145 |
def test_task_after(self, mrapi): |
146 |
return
|
147 |
vm = mfactory.VirtualMachineFactory() |
148 |
mrapi().StartupInstance.return_value = 1
|
149 |
mrapi().ShutdownInstance.return_value = 2
|
150 |
mrapi().RebootInstance.return_value = 2
|
151 |
with mocked_quotaholder():
|
152 |
vm.task = None
|
153 |
vm.operstate = "STOPPED"
|
154 |
servers.start(vm) |
155 |
self.assertEqual(vm.task, "START") |
156 |
self.assertEqual(vm.task_job_id, 1) |
157 |
with mocked_quotaholder():
|
158 |
vm.task = None
|
159 |
vm.operstate = "STARTED"
|
160 |
servers.stop(vm) |
161 |
self.assertEqual(vm.task, "STOP") |
162 |
self.assertEqual(vm.task_job_id, 2) |
163 |
with mocked_quotaholder():
|
164 |
vm.task = None
|
165 |
servers.reboot(vm) |
166 |
self.assertEqual(vm.task, "REBOOT") |
167 |
self.assertEqual(vm.task_job_id, 3) |
168 |
|
169 |
|
170 |
## Test Callbacks
|
171 |
@patch('synnefo.lib.amqp.AMQPClient') |
172 |
class UpdateDBTest(TestCase): |
173 |
def create_msg(self, **kwargs): |
174 |
"""Create snf-ganeti-eventd message"""
|
175 |
msg = {'event_time': split_time(time())}
|
176 |
msg['type'] = 'ganeti-op-status' |
177 |
msg['status'] = 'success' |
178 |
msg['jobId'] = 1 |
179 |
msg['logmsg'] = 'Dummy Log' |
180 |
for key, val in kwargs.items(): |
181 |
msg[key] = val |
182 |
message = {'body': json.dumps(msg)}
|
183 |
return message
|
184 |
|
185 |
def test_missing_attribute(self, client): |
186 |
update_db(client, json.dumps({'body': {}}))
|
187 |
self.assertTrue(client.basic_reject.called)
|
188 |
|
189 |
def test_unhandled_exception(self, client): |
190 |
update_db(client, {}) |
191 |
client.basic_reject.assert_called_once() |
192 |
|
193 |
def test_missing_instance(self, client): |
194 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
195 |
instance='foo')
|
196 |
update_db(client, msg) |
197 |
self.assertTrue(client.basic_ack.called)
|
198 |
|
199 |
def test_wrong_type(self, client): |
200 |
msg = self.create_msg(type="WRONG_TYPE") |
201 |
update_db(client, msg) |
202 |
self.assertTrue(client.basic_nack.called)
|
203 |
|
204 |
def test_old_msg(self, client): |
205 |
from time import sleep |
206 |
from datetime import datetime |
207 |
old_time = time() |
208 |
sleep(0.01)
|
209 |
new_time = datetime.fromtimestamp(time()) |
210 |
vm = mfactory.VirtualMachineFactory(backendtime=new_time) |
211 |
vm.operstate = 'STOPPED'
|
212 |
vm.save() |
213 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
214 |
event_time=split_time(old_time), |
215 |
instance=vm.backend_vm_id) |
216 |
update_db(client, msg) |
217 |
self.assertTrue(client.basic_ack.called)
|
218 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
219 |
self.assertEquals(db_vm.operstate, "STOPPED") |
220 |
self.assertEquals(db_vm.backendtime, new_time)
|
221 |
|
222 |
def test_start(self, client): |
223 |
vm = mfactory.VirtualMachineFactory() |
224 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
225 |
instance=vm.backend_vm_id) |
226 |
with mocked_quotaholder():
|
227 |
update_db(client, msg) |
228 |
self.assertTrue(client.basic_ack.called)
|
229 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
230 |
self.assertEqual(db_vm.operstate, 'STARTED') |
231 |
|
232 |
def test_stop(self, client): |
233 |
vm = mfactory.VirtualMachineFactory() |
234 |
msg = self.create_msg(operation='OP_INSTANCE_SHUTDOWN', |
235 |
instance=vm.backend_vm_id) |
236 |
with mocked_quotaholder():
|
237 |
update_db(client, msg) |
238 |
self.assertTrue(client.basic_ack.called)
|
239 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
240 |
self.assertEqual(db_vm.operstate, 'STOPPED') |
241 |
|
242 |
def test_reboot(self, client): |
243 |
vm = mfactory.VirtualMachineFactory() |
244 |
msg = self.create_msg(operation='OP_INSTANCE_REBOOT', |
245 |
instance=vm.backend_vm_id) |
246 |
update_db(client, msg) |
247 |
self.assertTrue(client.basic_ack.called)
|
248 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
249 |
self.assertEqual(db_vm.operstate, 'STARTED') |
250 |
|
251 |
def test_remove(self, client): |
252 |
vm = mfactory.VirtualMachineFactory() |
253 |
# Also create a NIC
|
254 |
nic = mfactory.NetworkInterfaceFactory(machine=vm) |
255 |
nic.network.get_pool().reserve(nic.ipv4) |
256 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
257 |
instance=vm.backend_vm_id) |
258 |
with mocked_quotaholder():
|
259 |
update_db(client, msg) |
260 |
self.assertTrue(client.basic_ack.called)
|
261 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
262 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
263 |
self.assertTrue(db_vm.deleted)
|
264 |
# Check that nics are deleted
|
265 |
self.assertFalse(db_vm.nics.all())
|
266 |
self.assertTrue(nic.network.get_pool().is_available(nic.ipv4))
|
267 |
vm2 = mfactory.VirtualMachineFactory() |
268 |
network = mfactory.NetworkFactory(floating_ip_pool=True)
|
269 |
fp1 = mfactory.FloatingIPFactory(machine=vm2, network=network) |
270 |
fp2 = mfactory.FloatingIPFactory(machine=vm2, network=network) |
271 |
mfactory.NetworkInterfaceFactory(machine=vm2, network=network, |
272 |
ipv4=fp1.ipv4) |
273 |
mfactory.NetworkInterfaceFactory(machine=vm2, network=network, |
274 |
ipv4=fp2.ipv4) |
275 |
pool = network.get_pool() |
276 |
pool.reserve(fp1.ipv4) |
277 |
pool.reserve(fp2.ipv4) |
278 |
pool.save() |
279 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
280 |
instance=vm2.backend_vm_id) |
281 |
with mocked_quotaholder():
|
282 |
update_db(client, msg) |
283 |
client.basic_ack.assert_called_once() |
284 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
285 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
286 |
self.assertTrue(db_vm.deleted)
|
287 |
self.assertEqual(FloatingIP.objects.get(id=fp1.id).machine, None) |
288 |
self.assertEqual(FloatingIP.objects.get(id=fp2.id).machine, None) |
289 |
pool = network.get_pool() |
290 |
# Test that floating ips are not released
|
291 |
self.assertFalse(pool.is_available(fp1.ipv4))
|
292 |
self.assertFalse(pool.is_available(fp2.ipv4))
|
293 |
|
294 |
def test_create(self, client): |
295 |
vm = mfactory.VirtualMachineFactory() |
296 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
297 |
instance=vm.backend_vm_id) |
298 |
update_db(client, msg) |
299 |
self.assertTrue(client.basic_ack.called)
|
300 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
301 |
self.assertEqual(db_vm.operstate, 'STARTED') |
302 |
|
303 |
def test_create_error(self, client): |
304 |
"""Test that error create sets vm to ERROR state"""
|
305 |
vm = mfactory.VirtualMachineFactory() |
306 |
msg = self.create_msg(operation='OP_INSTANCE_CREATE', |
307 |
instance=vm.backend_vm_id, |
308 |
status='error')
|
309 |
update_db(client, msg) |
310 |
self.assertTrue(client.basic_ack.called)
|
311 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
312 |
self.assertEqual(db_vm.operstate, 'ERROR') |
313 |
|
314 |
def test_remove_from_error(self, client): |
315 |
"""Test that error removes delete error builds"""
|
316 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
317 |
# Also create a NIC
|
318 |
mfactory.NetworkInterfaceFactory(machine=vm) |
319 |
msg = self.create_msg(operation='OP_INSTANCE_REMOVE', |
320 |
instance=vm.backend_vm_id) |
321 |
with mocked_quotaholder():
|
322 |
update_db(client, msg) |
323 |
self.assertTrue(client.basic_ack.called)
|
324 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
325 |
self.assertEqual(db_vm.operstate, 'DESTROYED') |
326 |
self.assertTrue(db_vm.deleted)
|
327 |
# Check that nics are deleted
|
328 |
self.assertFalse(db_vm.nics.all())
|
329 |
|
330 |
def test_other_error(self, client): |
331 |
"""Test that other error messages do no affect the VM"""
|
332 |
vm = mfactory.VirtualMachineFactory() |
333 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
334 |
instance=vm.backend_vm_id, |
335 |
status='error')
|
336 |
update_db(client, msg) |
337 |
self.assertTrue(client.basic_ack.called)
|
338 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
339 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
340 |
self.assertEqual(db_vm.backendtime, vm.backendtime)
|
341 |
|
342 |
def test_resize_msg(self, client): |
343 |
vm = mfactory.VirtualMachineFactory() |
344 |
# Test empty beparams
|
345 |
for status in ["success", "error"]: |
346 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
347 |
instance=vm.backend_vm_id, |
348 |
beparams={}, |
349 |
status=status) |
350 |
client.reset_mock() |
351 |
with mocked_quotaholder():
|
352 |
update_db(client, msg) |
353 |
self.assertTrue(client.basic_ack.called)
|
354 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
355 |
self.assertEqual(db_vm.operstate, vm.operstate)
|
356 |
# Test intermediate states
|
357 |
vm.operstate = "STOPPED"
|
358 |
vm.save() |
359 |
for status in ["queued", "waiting", "running"]: |
360 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
361 |
instance=vm.backend_vm_id, |
362 |
beparams={"vcpus": 4, "minmem": 2048, |
363 |
"maxmem": 2048}, |
364 |
status=status) |
365 |
client.reset_mock() |
366 |
update_db(client, msg) |
367 |
self.assertTrue(client.basic_ack.called)
|
368 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
369 |
self.assertEqual(db_vm.operstate, "STOPPED") |
370 |
# Test operstate after error
|
371 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
372 |
instance=vm.backend_vm_id, |
373 |
beparams={"vcpus": 4}, |
374 |
status="error")
|
375 |
client.reset_mock() |
376 |
with mocked_quotaholder():
|
377 |
update_db(client, msg) |
378 |
self.assertTrue(client.basic_ack.called)
|
379 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
380 |
self.assertEqual(db_vm.operstate, "STOPPED") |
381 |
# Test success
|
382 |
f1 = mfactory.FlavorFactory(cpu=4, ram=1024, disk_template="drbd", |
383 |
disk=1024)
|
384 |
vm.flavor = f1 |
385 |
vm.save() |
386 |
f2 = mfactory.FlavorFactory(cpu=8, ram=2048, disk_template="drbd", |
387 |
disk=1024)
|
388 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
389 |
instance=vm.backend_vm_id, |
390 |
beparams={"vcpus": 8, "minmem": 2048, |
391 |
"maxmem": 2048}, |
392 |
status="success")
|
393 |
client.reset_mock() |
394 |
with mocked_quotaholder():
|
395 |
update_db(client, msg) |
396 |
self.assertTrue(client.basic_ack.called)
|
397 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
398 |
self.assertEqual(db_vm.operstate, "STOPPED") |
399 |
self.assertEqual(db_vm.flavor, f2)
|
400 |
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS', |
401 |
instance=vm.backend_vm_id, |
402 |
beparams={"vcpus": 100, "minmem": 2048, |
403 |
"maxmem": 2048}, |
404 |
status="success")
|
405 |
client.reset_mock() |
406 |
with mocked_quotaholder():
|
407 |
update_db(client, msg) |
408 |
self.assertTrue(client.basic_reject.called)
|
409 |
|
410 |
|
411 |
@patch('synnefo.lib.amqp.AMQPClient') |
412 |
class UpdateNetTest(TestCase): |
413 |
def create_msg(self, **kwargs): |
414 |
"""Create snf-ganeti-hook message"""
|
415 |
msg = {'event_time': split_time(time())}
|
416 |
msg['type'] = 'ganeti-op-status' |
417 |
msg['operation'] = 'OP_INSTANCE_SET_PARAMS' |
418 |
msg['status'] = 'success' |
419 |
msg['jobId'] = 1 |
420 |
msg['logmsg'] = 'Dummy Log' |
421 |
for key, val in kwargs.items(): |
422 |
msg[key] = val |
423 |
message = {'body': json.dumps(msg)}
|
424 |
return message
|
425 |
|
426 |
def test_missing_attribute(self, client): |
427 |
update_db(client, json.dumps({'body': {}}))
|
428 |
self.assertTrue(client.basic_reject.called)
|
429 |
|
430 |
def test_unhandled_exception(self, client): |
431 |
update_db(client, {}) |
432 |
client.basic_reject.assert_called_once() |
433 |
|
434 |
def test_wrong_type(self, client): |
435 |
msg = self.create_msg(type="WRONG_TYPE") |
436 |
update_db(client, msg) |
437 |
self.assertTrue(client.basic_nack.called)
|
438 |
|
439 |
def test_missing_instance(self, client): |
440 |
msg = self.create_msg(operation='OP_INSTANCE_STARTUP', |
441 |
instance='foo')
|
442 |
update_db(client, msg) |
443 |
self.assertTrue(client.basic_ack.called)
|
444 |
|
445 |
def test_no_nics(self, client): |
446 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
447 |
mfactory.NetworkInterfaceFactory(machine=vm) |
448 |
mfactory.NetworkInterfaceFactory(machine=vm) |
449 |
mfactory.NetworkInterfaceFactory(machine=vm) |
450 |
self.assertEqual(len(vm.nics.all()), 3) |
451 |
msg = self.create_msg(nics=[],
|
452 |
instance=vm.backend_vm_id) |
453 |
update_db(client, msg) |
454 |
self.assertTrue(client.basic_ack.called)
|
455 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
456 |
self.assertEqual(len(db_vm.nics.all()), 0) |
457 |
|
458 |
def test_empty_nic(self, client): |
459 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
460 |
for public in [True, False]: |
461 |
net = mfactory.NetworkFactory(public=public, subnet6=None)
|
462 |
msg = self.create_msg(nics=[{'network': net.backend_id}], |
463 |
instance=vm.backend_vm_id) |
464 |
update_db(client, msg) |
465 |
self.assertTrue(client.basic_ack.called)
|
466 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
467 |
nics = db_vm.nics.all() |
468 |
self.assertEqual(len(nics), 1) |
469 |
self.assertEqual(nics[0].index, 0) |
470 |
self.assertEqual(nics[0].ipv4, None) |
471 |
self.assertEqual(nics[0].ipv6, None) |
472 |
self.assertEqual(nics[0].mac, None) |
473 |
if public:
|
474 |
self.assertEqual(nics[0].firewall_profile, |
475 |
settings.DEFAULT_FIREWALL_PROFILE) |
476 |
else:
|
477 |
self.assertEqual(nics[0].firewall_profile, None) |
478 |
|
479 |
def test_full_nic(self, client): |
480 |
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
|
481 |
net = mfactory.NetworkFactory(subnet='10.0.0.0/24', subnet6=None) |
482 |
pool = net.get_pool() |
483 |
self.assertTrue(pool.is_available('10.0.0.22')) |
484 |
pool.save() |
485 |
msg = self.create_msg(nics=[{'network': net.backend_id, |
486 |
'ip': '10.0.0.22', |
487 |
'mac': 'aa:bb:cc:00:11:22'}], |
488 |
instance=vm.backend_vm_id) |
489 |
update_db(client, msg) |
490 |
self.assertTrue(client.basic_ack.called)
|
491 |
db_vm = VirtualMachine.objects.get(id=vm.id) |
492 |
nics = db_vm.nics.all() |
493 |
self.assertEqual(len(nics), 1) |
494 |
self.assertEqual(nics[0].index, 0) |
495 |
self.assertEqual(nics[0].ipv4, '10.0.0.22') |
496 |
self.assertEqual(nics[0].ipv6, None) |
497 |
self.assertEqual(nics[0].mac, 'aa:bb:cc:00:11:22') |
498 |
pool = net.get_pool() |
499 |
self.assertFalse(pool.is_available('10.0.0.22')) |
500 |
pool.save() |
501 |
|
502 |
|
503 |
@patch('synnefo.lib.amqp.AMQPClient') |
504 |
class UpdateNetworkTest(TestCase): |
505 |
def create_msg(self, **kwargs): |
506 |
"""Create snf-ganeti-eventd message"""
|
507 |
msg = {'event_time': split_time(time())}
|
508 |
msg['type'] = 'ganeti-network-status' |
509 |
msg['status'] = 'success' |
510 |
msg['jobId'] = 1 |
511 |
msg['logmsg'] = 'Dummy Log' |
512 |
for key, val in kwargs.items(): |
513 |
msg[key] = val |
514 |
message = {'body': json.dumps(msg)}
|
515 |
return message
|
516 |
|
517 |
def test_missing_attribute(self, client): |
518 |
update_network(client, json.dumps({'body': {}}))
|
519 |
self.assertTrue(client.basic_reject.called)
|
520 |
|
521 |
def test_unhandled_exception(self, client): |
522 |
update_network(client, {}) |
523 |
client.basic_reject.assert_called_once() |
524 |
|
525 |
def test_wrong_type(self, client): |
526 |
msg = self.create_msg(type="WRONG_TYPE") |
527 |
update_network(client, msg) |
528 |
self.assertTrue(client.basic_nack.called)
|
529 |
|
530 |
def test_missing_network(self, client): |
531 |
msg = self.create_msg(operation='OP_NETWORK_CREATE', |
532 |
network='foo')
|
533 |
update_network(client, msg) |
534 |
self.assertTrue(client.basic_ack.called)
|
535 |
|
536 |
def test_create(self, client): |
537 |
back_network = mfactory.BackendNetworkFactory(operstate='PENDING')
|
538 |
net = back_network.network |
539 |
net.state = 'ACTIVE'
|
540 |
net.save() |
541 |
back1 = back_network.backend |
542 |
|
543 |
back_network2 = mfactory.BackendNetworkFactory(operstate='PENDING',
|
544 |
network=net) |
545 |
back2 = back_network2.backend |
546 |
# Message from first backend network
|
547 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
548 |
network=net.backend_id, |
549 |
cluster=back1.clustername) |
550 |
update_network(client, msg) |
551 |
self.assertTrue(client.basic_ack.called)
|
552 |
|
553 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
554 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
555 |
db_net = Network.objects.get(id=net.id) |
556 |
self.assertEqual(db_net.state, 'ACTIVE') |
557 |
# msg from second backend network
|
558 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
559 |
network=net.backend_id, |
560 |
cluster=back2.clustername) |
561 |
update_network(client, msg) |
562 |
self.assertTrue(client.basic_ack.called)
|
563 |
|
564 |
db_net = Network.objects.get(id=net.id) |
565 |
self.assertEqual(db_net.state, 'ACTIVE') |
566 |
back_net = BackendNetwork.objects.get(id=back_network.id) |
567 |
self.assertEqual(back_net.operstate, 'ACTIVE') |
568 |
|
569 |
def test_create_offline_backend(self, client): |
570 |
"""Test network creation when a backend is offline"""
|
571 |
net = mfactory.NetworkFactory(state='ACTIVE')
|
572 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
573 |
mfactory.BackendNetworkFactory(network=net, |
574 |
backend__offline=True)
|
575 |
msg = self.create_msg(operation='OP_NETWORK_CONNECT', |
576 |
network=net.backend_id, |
577 |
cluster=bn1.backend.clustername) |
578 |
update_network(client, msg) |
579 |
self.assertTrue(client.basic_ack.called)
|
580 |
new_net = Network.objects.get(id=net.id) |
581 |
self.assertEqual(new_net.state, 'ACTIVE') |
582 |
|
583 |
def test_disconnect(self, client): |
584 |
bn1 = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
585 |
net1 = bn1.network |
586 |
net1.state = "ACTIVE"
|
587 |
net1.state = 'ACTIVE'
|
588 |
net1.save() |
589 |
bn2 = mfactory.BackendNetworkFactory(operstate='ACTIVE',
|
590 |
network=net1) |
591 |
msg = self.create_msg(operation='OP_NETWORK_DISCONNECT', |
592 |
network=net1.backend_id, |
593 |
cluster=bn2.backend.clustername) |
594 |
update_network(client, msg) |
595 |
self.assertTrue(client.basic_ack.called)
|
596 |
self.assertEqual(Network.objects.get(id=net1.id).state, 'ACTIVE') |
597 |
self.assertEqual(BackendNetwork.objects.get(id=bn2.id).operstate,
|
598 |
'PENDING')
|
599 |
|
600 |
def test_remove(self, client): |
601 |
mfactory.MacPrefixPoolTableFactory() |
602 |
mfactory.BridgePoolTableFactory() |
603 |
bn = mfactory.BackendNetworkFactory(operstate='ACTIVE')
|
604 |
for old_state in ['success', 'canceled', 'error']: |
605 |
for flavor in Network.FLAVORS.keys(): |
606 |
bn.operstate = old_state |
607 |
bn.save() |
608 |
net = bn.network |
609 |
net.state = 'ACTIVE'
|
610 |
net.flavor = flavor |
611 |
if flavor == 'PHYSICAL_VLAN': |
612 |
net.link = allocate_resource('bridge')
|
613 |
if flavor == 'MAC_FILTERED': |
614 |
net.mac_prefix = allocate_resource('mac_prefix')
|
615 |
net.save() |
616 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
617 |
network=net.backend_id, |
618 |
cluster=bn.backend.clustername) |
619 |
with mocked_quotaholder():
|
620 |
update_network(client, msg) |
621 |
self.assertTrue(client.basic_ack.called)
|
622 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
623 |
self.assertEqual(db_bnet.operstate,
|
624 |
'DELETED')
|
625 |
db_net = Network.objects.get(id=net.id) |
626 |
self.assertEqual(db_net.state, 'DELETED', flavor) |
627 |
self.assertTrue(db_net.deleted)
|
628 |
if flavor == 'PHYSICAL_VLAN': |
629 |
pool = BridgePoolTable.get_pool() |
630 |
self.assertTrue(pool.is_available(net.link))
|
631 |
if flavor == 'MAC_FILTERED': |
632 |
pool = MacPrefixPoolTable.get_pool() |
633 |
self.assertTrue(pool.is_available(net.mac_prefix))
|
634 |
|
635 |
def test_remove_offline_backend(self, client): |
636 |
"""Test network removing when a backend is offline"""
|
637 |
mfactory.BridgePoolTableFactory() |
638 |
net = mfactory.NetworkFactory(flavor='PHYSICAL_VLAN',
|
639 |
state='ACTIVE',
|
640 |
link='prv12')
|
641 |
bn1 = mfactory.BackendNetworkFactory(network=net) |
642 |
mfactory.BackendNetworkFactory(network=net, |
643 |
operstate="ACTIVE",
|
644 |
backend__offline=True)
|
645 |
msg = self.create_msg(operation='OP_NETWORK_REMOVE', |
646 |
network=net.backend_id, |
647 |
cluster=bn1.backend.clustername) |
648 |
with mocked_quotaholder():
|
649 |
update_network(client, msg) |
650 |
self.assertTrue(client.basic_ack.called)
|
651 |
new_net = Network.objects.get(id=net.id) |
652 |
self.assertEqual(new_net.state, 'ACTIVE') |
653 |
self.assertFalse(new_net.deleted)
|
654 |
|
655 |
def test_error_opcode(self, client): |
656 |
mfactory.MacPrefixPoolTableFactory() |
657 |
mfactory.BridgePoolTableFactory() |
658 |
for state, _ in Network.OPER_STATES: |
659 |
bn = mfactory.BackendNetworkFactory(operstate="ACTIVE")
|
660 |
bn.operstate = state |
661 |
bn.save() |
662 |
network = bn.network |
663 |
network.state = state |
664 |
network.save() |
665 |
for opcode, _ in BackendNetwork.BACKEND_OPCODES: |
666 |
if opcode in ['OP_NETWORK_REMOVE', 'OP_NETWORK_ADD']: |
667 |
continue
|
668 |
msg = self.create_msg(operation=opcode,
|
669 |
network=bn.network.backend_id, |
670 |
status='error',
|
671 |
add_reserved_ips=[], |
672 |
remove_reserved_ips=[], |
673 |
cluster=bn.backend.clustername) |
674 |
with mocked_quotaholder():
|
675 |
update_network(client, msg) |
676 |
self.assertTrue(client.basic_ack.called)
|
677 |
db_bnet = BackendNetwork.objects.get(id=bn.id) |
678 |
self.assertEqual(bn.operstate, db_bnet.operstate)
|
679 |
self.assertEqual(bn.network.state, db_bnet.network.state)
|
680 |
|
681 |
def test_ips(self, client): |
682 |
network = mfactory.NetworkFactory(subnet='10.0.0.0/24')
|
683 |
bn = mfactory.BackendNetworkFactory(network=network) |
684 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
685 |
network=network.backend_id, |
686 |
cluster=bn.backend.clustername, |
687 |
status='success',
|
688 |
add_reserved_ips=['10.0.0.10', '10.0.0.20'], |
689 |
remove_reserved_ips=[]) |
690 |
update_network(client, msg) |
691 |
self.assertTrue(client.basic_ack.called)
|
692 |
pool = network.get_pool() |
693 |
self.assertTrue(pool.is_reserved('10.0.0.10')) |
694 |
self.assertTrue(pool.is_reserved('10.0.0.20')) |
695 |
pool.save() |
696 |
# Release them
|
697 |
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS', |
698 |
network=network.backend_id, |
699 |
cluster=bn.backend.clustername, |
700 |
add_reserved_ips=[], |
701 |
remove_reserved_ips=['10.0.0.10', '10.0.0.20']) |
702 |
update_network(client, msg) |
703 |
self.assertTrue(client.basic_ack.called)
|
704 |
pool = network.get_pool() |
705 |
self.assertFalse(pool.is_reserved('10.0.0.10')) |
706 |
self.assertFalse(pool.is_reserved('10.0.0.20')) |
707 |
|
708 |
|
709 |
@patch('synnefo.lib.amqp.AMQPClient') |
710 |
class UpdateBuildProgressTest(TestCase): |
711 |
def setUp(self): |
712 |
self.vm = mfactory.VirtualMachineFactory()
|
713 |
|
714 |
def get_db_vm(self): |
715 |
return VirtualMachine.objects.get(id=self.vm.id) |
716 |
|
717 |
def create_msg(self, **kwargs): |
718 |
"""Create snf-progress-monitor message"""
|
719 |
msg = {'event_time': split_time(time())}
|
720 |
msg['type'] = 'image-copy-progress' |
721 |
msg['progress'] = 0 |
722 |
for key, val in kwargs.items(): |
723 |
msg[key] = val |
724 |
message = {'body': json.dumps(msg)}
|
725 |
return message
|
726 |
|
727 |
def test_missing_attribute(self, client): |
728 |
update_build_progress(client, json.dumps({'body': {}}))
|
729 |
self.assertTrue(client.basic_reject.called)
|
730 |
|
731 |
def test_unhandled_exception(self, client): |
732 |
update_build_progress(client, {}) |
733 |
client.basic_reject.assert_called_once() |
734 |
|
735 |
def test_missing_instance(self, client): |
736 |
msg = self.create_msg(instance='foo') |
737 |
update_build_progress(client, msg) |
738 |
self.assertTrue(client.basic_ack.called)
|
739 |
|
740 |
def test_wrong_type(self, client): |
741 |
msg = self.create_msg(type="WRONG_TYPE") |
742 |
update_build_progress(client, msg) |
743 |
self.assertTrue(client.basic_nack.called)
|
744 |
|
745 |
def test_progress_update(self, client): |
746 |
rprogress = randint(10, 100) |
747 |
msg = self.create_msg(progress=rprogress,
|
748 |
instance=self.vm.backend_vm_id)
|
749 |
update_build_progress(client, msg) |
750 |
self.assertTrue(client.basic_ack.called)
|
751 |
vm = self.get_db_vm()
|
752 |
self.assertEqual(vm.buildpercentage, rprogress)
|
753 |
|
754 |
def test_invalid_value(self, client): |
755 |
old = self.vm.buildpercentage
|
756 |
for rprogress in [0, -1, 'a']: |
757 |
msg = self.create_msg(progress=rprogress,
|
758 |
instance=self.vm.backend_vm_id)
|
759 |
update_build_progress(client, msg) |
760 |
self.assertTrue(client.basic_ack.called)
|
761 |
vm = self.get_db_vm()
|
762 |
self.assertEqual(vm.buildpercentage, old)
|
763 |
|
764 |
|
765 |
import logging |
766 |
from datetime import timedelta |
767 |
|
768 |
|
769 |
@patch("synnefo.logic.rapi_pool.GanetiRapiClient") |
770 |
class ReconciliationTest(TestCase): |
771 |
@patch("synnefo.logic.rapi_pool.GanetiRapiClient") |
772 |
def setUp(self, mrapi): |
773 |
self.backend = mfactory.BackendFactory()
|
774 |
log = logging.getLogger() |
775 |
options = {"fix_unsynced": True, |
776 |
"fix_stale": True, |
777 |
"fix_orphans": True, |
778 |
"fix_unsynced_nics": True, |
779 |
"fix_unsynced_flavors": True} |
780 |
self.reconciler = reconciliation.BackendReconciler(self.backend, |
781 |
options=options, |
782 |
logger=log) |
783 |
|
784 |
def test_building_vm(self, mrapi): |
785 |
mrapi = self.reconciler.client
|
786 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
787 |
backendjobid=None,
|
788 |
operstate="BUILD")
|
789 |
self.reconciler.reconcile()
|
790 |
# Assert not deleted
|
791 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
792 |
self.assertFalse(vm1.deleted)
|
793 |
self.assertEqual(vm1.operstate, "BUILD") |
794 |
|
795 |
vm1.created = vm1.created - timedelta(seconds=120)
|
796 |
vm1.save() |
797 |
with mocked_quotaholder():
|
798 |
self.reconciler.reconcile()
|
799 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
800 |
self.assertEqual(vm1.operstate, "ERROR") |
801 |
|
802 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
803 |
backendjobid=1,
|
804 |
deleted=False,
|
805 |
operstate="BUILD")
|
806 |
vm1.backendtime = vm1.created - timedelta(seconds=120)
|
807 |
vm1.backendjobid = 10
|
808 |
vm1.save() |
809 |
for status in ["queued", "waiting", "running"]: |
810 |
mrapi.GetJobStatus.return_value = {"status": status}
|
811 |
with mocked_quotaholder():
|
812 |
self.reconciler.reconcile()
|
813 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
814 |
self.assertFalse(vm1.deleted)
|
815 |
self.assertEqual(vm1.operstate, "BUILD") |
816 |
|
817 |
mrapi.GetJobStatus.return_value = {"status": "error"} |
818 |
with mocked_quotaholder():
|
819 |
self.reconciler.reconcile()
|
820 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
821 |
self.assertFalse(vm1.deleted)
|
822 |
self.assertEqual(vm1.operstate, "ERROR") |
823 |
|
824 |
for status in ["success", "cancelled"]: |
825 |
vm1.deleted = False
|
826 |
vm1.save() |
827 |
mrapi.GetJobStatus.return_value = {"status": status}
|
828 |
with mocked_quotaholder():
|
829 |
self.reconciler.reconcile()
|
830 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
831 |
self.assertTrue(vm1.deleted)
|
832 |
self.assertEqual(vm1.operstate, "DESTROYED") |
833 |
|
834 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
835 |
backendjobid=1,
|
836 |
operstate="BUILD")
|
837 |
vm1.backendtime = vm1.created - timedelta(seconds=120)
|
838 |
vm1.backendjobid = 10
|
839 |
vm1.save() |
840 |
cmrapi = self.reconciler.client
|
841 |
cmrapi.GetInstances.return_value = \ |
842 |
[{"name": vm1.backend_vm_id,
|
843 |
"beparams": {"maxmem": 1024, |
844 |
"minmem": 1024, |
845 |
"vcpus": 4}, |
846 |
"oper_state": False, |
847 |
"mtime": time(),
|
848 |
"disk.sizes": [],
|
849 |
"nic.ips": [],
|
850 |
"nic.macs": [],
|
851 |
"nic.networks": [],
|
852 |
"tags": []}]
|
853 |
mrapi.GetJobStatus.return_value = {"status": "running"} |
854 |
with mocked_quotaholder():
|
855 |
self.reconciler.reconcile()
|
856 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
857 |
self.assertEqual(vm1.operstate, "BUILD") |
858 |
mrapi.GetJobStatus.return_value = {"status": "error"} |
859 |
with mocked_quotaholder():
|
860 |
self.reconciler.reconcile()
|
861 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
862 |
self.assertEqual(vm1.operstate, "ERROR") |
863 |
|
864 |
def test_stale_server(self, mrapi): |
865 |
mrapi.GetInstances = [] |
866 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
867 |
deleted=False,
|
868 |
operstate="ERROR")
|
869 |
with mocked_quotaholder():
|
870 |
self.reconciler.reconcile()
|
871 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
872 |
self.assertTrue(vm1.deleted)
|
873 |
|
874 |
def test_orphan_server(self, mrapi): |
875 |
cmrapi = self.reconciler.client
|
876 |
mrapi().GetInstances.return_value =\ |
877 |
[{"name": "%s22" % settings.BACKEND_PREFIX_ID, |
878 |
"beparams": {"maxmem": 1024, |
879 |
"minmem": 1024, |
880 |
"vcpus": 4}, |
881 |
"oper_state": True, |
882 |
"mtime": time(),
|
883 |
"disk.sizes": [],
|
884 |
"nic.ips": [],
|
885 |
"nic.macs": [],
|
886 |
"nic.networks": [],
|
887 |
"tags": []}]
|
888 |
self.reconciler.reconcile()
|
889 |
cmrapi.DeleteInstance\ |
890 |
.assert_called_once_with("%s22" % settings.BACKEND_PREFIX_ID)
|
891 |
|
892 |
def test_unsynced_operstate(self, mrapi): |
893 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
894 |
deleted=False,
|
895 |
operstate="STOPPED")
|
896 |
mrapi().GetInstances.return_value =\ |
897 |
[{"name": vm1.backend_vm_id,
|
898 |
"beparams": {"maxmem": 1024, |
899 |
"minmem": 1024, |
900 |
"vcpus": 4}, |
901 |
"oper_state": True, |
902 |
"mtime": time(),
|
903 |
"disk.sizes": [],
|
904 |
"nic.ips": [],
|
905 |
"nic.macs": [],
|
906 |
"nic.networks": [],
|
907 |
"tags": []}]
|
908 |
with mocked_quotaholder():
|
909 |
self.reconciler.reconcile()
|
910 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
911 |
self.assertEqual(vm1.operstate, "STARTED") |
912 |
|
913 |
def test_unsynced_flavor(self, mrapi): |
914 |
flavor1 = mfactory.FlavorFactory(cpu=2, ram=1024, disk=1, |
915 |
disk_template="drbd")
|
916 |
flavor2 = mfactory.FlavorFactory(cpu=4, ram=2048, disk=1, |
917 |
disk_template="drbd")
|
918 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
919 |
deleted=False,
|
920 |
flavor=flavor1, |
921 |
operstate="STARTED")
|
922 |
mrapi().GetInstances.return_value =\ |
923 |
[{"name": vm1.backend_vm_id,
|
924 |
"beparams": {"maxmem": 2048, |
925 |
"minmem": 2048, |
926 |
"vcpus": 4}, |
927 |
"oper_state": True, |
928 |
"mtime": time(),
|
929 |
"disk.sizes": [],
|
930 |
"nic.ips": [],
|
931 |
"nic.macs": [],
|
932 |
"nic.networks": [],
|
933 |
"tags": []}]
|
934 |
with mocked_quotaholder():
|
935 |
self.reconciler.reconcile()
|
936 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
937 |
self.assertEqual(vm1.flavor, flavor2)
|
938 |
self.assertEqual(vm1.operstate, "STARTED") |
939 |
|
940 |
def test_unsynced_nics(self, mrapi): |
941 |
network1 = mfactory.NetworkFactory(subnet="10.0.0.0/24")
|
942 |
network2 = mfactory.NetworkFactory(subnet="192.168.2.0/24")
|
943 |
vm1 = mfactory.VirtualMachineFactory(backend=self.backend,
|
944 |
deleted=False,
|
945 |
operstate="STOPPED")
|
946 |
mfactory.NetworkInterfaceFactory(machine=vm1, network=network1, |
947 |
ipv4="10.0.0.0")
|
948 |
mrapi().GetInstances.return_value =\ |
949 |
[{"name": vm1.backend_vm_id,
|
950 |
"beparams": {"maxmem": 2048, |
951 |
"minmem": 2048, |
952 |
"vcpus": 4}, |
953 |
"oper_state": True, |
954 |
"mtime": time(),
|
955 |
"disk.sizes": [],
|
956 |
"nic.ips": ["192.168.2.1"], |
957 |
"nic.macs": ["aa:00:bb:cc:dd:ee"], |
958 |
"nic.networks": [network2.backend_id],
|
959 |
"tags": []}]
|
960 |
with mocked_quotaholder():
|
961 |
self.reconciler.reconcile()
|
962 |
vm1 = VirtualMachine.objects.get(id=vm1.id) |
963 |
self.assertEqual(vm1.operstate, "STARTED") |
964 |
nic = vm1.nics.all()[0]
|
965 |
self.assertEqual(nic.network, network2)
|
966 |
self.assertEqual(nic.ipv4, "192.168.2.1") |
967 |
self.assertEqual(nic.mac, "aa:00:bb:cc:dd:ee") |
968 |
|
969 |
|
970 |
from synnefo.logic.test.rapi_pool_tests import * |
971 |
from synnefo.logic.test.utils_tests import * |
972 |
from synnefo.logic.test.networks import * |
973 |
from synnefo.logic.test.servers import * |