root / snf-cyclades-app / synnefo / logic / backend.py @ 341c818e
History | View | Annotate | Download (23.4 kB)
1 |
# Copyright 2011 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
|
34 |
import json |
35 |
|
36 |
from logging import getLogger |
37 |
from django.conf import settings |
38 |
from django.db import transaction |
39 |
from datetime import datetime |
40 |
|
41 |
from synnefo.db.models import (Backend, VirtualMachine, Network, |
42 |
BackendNetwork, BACKEND_STATUSES) |
43 |
from synnefo.logic import utils, ippool |
44 |
from synnefo.api.faults import OverLimit |
45 |
from synnefo.api.util import backend_public_networks, get_network_free_address |
46 |
from synnefo.util.rapi import GanetiRapiClient |
47 |
|
48 |
log = getLogger('synnefo.logic')
|
49 |
|
50 |
|
51 |
_firewall_tags = { |
52 |
'ENABLED': settings.GANETI_FIREWALL_ENABLED_TAG,
|
53 |
'DISABLED': settings.GANETI_FIREWALL_DISABLED_TAG,
|
54 |
'PROTECTED': settings.GANETI_FIREWALL_PROTECTED_TAG}
|
55 |
|
56 |
_reverse_tags = dict((v.split(':')[3], k) for k, v in _firewall_tags.items()) |
57 |
|
58 |
|
59 |
def create_client(hostname, port=5080, username=None, password=None): |
60 |
return GanetiRapiClient(hostname, port, username, password)
|
61 |
|
62 |
|
63 |
@transaction.commit_on_success
|
64 |
def process_op_status(vm, etime, jobid, opcode, status, logmsg): |
65 |
"""Process a job progress notification from the backend
|
66 |
|
67 |
Process an incoming message from the backend (currently Ganeti).
|
68 |
Job notifications with a terminating status (sucess, error, or canceled),
|
69 |
also update the operating state of the VM.
|
70 |
|
71 |
"""
|
72 |
# See #1492, #1031, #1111 why this line has been removed
|
73 |
#if (opcode not in [x[0] for x in VirtualMachine.BACKEND_OPCODES] or
|
74 |
if status not in [x[0] for x in BACKEND_STATUSES]: |
75 |
raise VirtualMachine.InvalidBackendMsgError(opcode, status)
|
76 |
|
77 |
vm.backendjobid = jobid |
78 |
vm.backendjobstatus = status |
79 |
vm.backendopcode = opcode |
80 |
vm.backendlogmsg = logmsg |
81 |
|
82 |
# Notifications of success change the operating state
|
83 |
state_for_success = VirtualMachine.OPER_STATE_FROM_OPCODE.get(opcode, None)
|
84 |
if status == 'success' and state_for_success is not None: |
85 |
utils.update_state(vm, state_for_success) |
86 |
# Set the deleted flag explicitly, cater for admin-initiated removals
|
87 |
if opcode == 'OP_INSTANCE_REMOVE': |
88 |
release_instance_nics(vm) |
89 |
vm.deleted = True
|
90 |
vm.nics.all().delete() |
91 |
|
92 |
# Special case: if OP_INSTANCE_CREATE fails --> ERROR
|
93 |
if status in ('canceled', 'error') and opcode == 'OP_INSTANCE_CREATE': |
94 |
utils.update_state(vm, 'ERROR')
|
95 |
|
96 |
# Special case: OP_INSTANCE_REMOVE fails for machines in ERROR,
|
97 |
# when no instance exists at the Ganeti backend.
|
98 |
# See ticket #799 for all the details.
|
99 |
#
|
100 |
if (status == 'error' and opcode == 'OP_INSTANCE_REMOVE'): |
101 |
vm.deleted = True
|
102 |
vm.nics.all().delete() |
103 |
|
104 |
vm.backendtime = etime |
105 |
# Any other notification of failure leaves the operating state unchanged
|
106 |
|
107 |
vm.save() |
108 |
|
109 |
|
110 |
@transaction.commit_on_success
|
111 |
def process_net_status(vm, etime, nics): |
112 |
"""Process a net status notification from the backend
|
113 |
|
114 |
Process an incoming message from the Ganeti backend,
|
115 |
detailing the NIC configuration of a VM instance.
|
116 |
|
117 |
Update the state of the VM in the DB accordingly.
|
118 |
"""
|
119 |
|
120 |
# Release the ips of the old nics. Get back the networks as multiple
|
121 |
# changes in the same network, must happen in the same Network object,
|
122 |
# because transaction will be commited only on exit of the function.
|
123 |
networks = release_instance_nics(vm) |
124 |
|
125 |
new_nics = enumerate(nics)
|
126 |
for i, new_nic in new_nics: |
127 |
network = new_nic.get('network', '') |
128 |
n = str(network)
|
129 |
pk = utils.id_from_network_name(n) |
130 |
|
131 |
# Get the cached Network or get it from DB
|
132 |
if pk in networks: |
133 |
net = networks[pk] |
134 |
else:
|
135 |
net = Network.objects.select_for_update().get(pk=pk) |
136 |
|
137 |
# Get the new nic info
|
138 |
mac = new_nic.get('mac', '') |
139 |
ipv4 = new_nic.get('ip', '') |
140 |
ipv6 = new_nic.get('ipv6', '') |
141 |
|
142 |
firewall = new_nic.get('firewall', '') |
143 |
firewall_profile = _reverse_tags.get(firewall, '')
|
144 |
if not firewall_profile and net.public: |
145 |
firewall_profile = settings.DEFAULT_FIREWALL_PROFILE |
146 |
|
147 |
if ipv4:
|
148 |
net.reserve_address(ipv4) |
149 |
|
150 |
vm.nics.create( |
151 |
network=net, |
152 |
index=i, |
153 |
mac=mac, |
154 |
ipv4=ipv4, |
155 |
ipv6=ipv6, |
156 |
firewall_profile=firewall_profile, |
157 |
dirty=False)
|
158 |
|
159 |
vm.backendtime = etime |
160 |
vm.save() |
161 |
|
162 |
|
163 |
def release_instance_nics(vm): |
164 |
networks = {} |
165 |
|
166 |
for nic in vm.nics.all(): |
167 |
pk = nic.network.pk |
168 |
# Get the cached Network or get it from DB
|
169 |
if pk in networks: |
170 |
net = networks[pk] |
171 |
else:
|
172 |
# Get the network object in exclusive mode in order
|
173 |
# to guarantee consistency of the address pool
|
174 |
net = Network.objects.select_for_update().get(pk=pk) |
175 |
if nic.ipv4:
|
176 |
net.release_address(nic.ipv4) |
177 |
nic.delete() |
178 |
|
179 |
return networks
|
180 |
|
181 |
|
182 |
@transaction.commit_on_success
|
183 |
def process_network_status(back_network, etime, jobid, opcode, status, logmsg): |
184 |
if status not in [x[0] for x in BACKEND_STATUSES]: |
185 |
return
|
186 |
#raise Network.InvalidBackendMsgError(opcode, status)
|
187 |
|
188 |
back_network.backendjobid = jobid |
189 |
back_network.backendjobstatus = status |
190 |
back_network.backendopcode = opcode |
191 |
back_network.backendlogmsg = logmsg |
192 |
|
193 |
# Notifications of success change the operating state
|
194 |
state_for_success = BackendNetwork.OPER_STATE_FROM_OPCODE.get(opcode, None)
|
195 |
if status == 'success' and state_for_success is not None: |
196 |
back_network.operstate = state_for_success |
197 |
if opcode == 'OP_NETWORK_REMOVE': |
198 |
back_network.deleted = True
|
199 |
|
200 |
if status in ('canceled', 'error') and opcode == 'OP_NETWORK_CREATE': |
201 |
utils.update_state(back_network, 'ERROR')
|
202 |
|
203 |
if (status == 'error' and opcode == 'OP_NETWORK_REMOVE'): |
204 |
back_network.deleted = True
|
205 |
back_network.operstate = 'DELETED'
|
206 |
|
207 |
back_network.save() |
208 |
|
209 |
|
210 |
@transaction.commit_on_success
|
211 |
def process_create_progress(vm, etime, rprogress, wprogress): |
212 |
|
213 |
# XXX: This only uses the read progress for now.
|
214 |
# Explore whether it would make sense to use the value of wprogress
|
215 |
# somewhere.
|
216 |
percentage = int(rprogress)
|
217 |
|
218 |
# The percentage may exceed 100%, due to the way
|
219 |
# snf-progress-monitor tracks bytes read by image handling processes
|
220 |
percentage = 100 if percentage > 100 else percentage |
221 |
if percentage < 0: |
222 |
raise ValueError("Percentage cannot be negative") |
223 |
|
224 |
# FIXME: log a warning here, see #1033
|
225 |
# if last_update > percentage:
|
226 |
# raise ValueError("Build percentage should increase monotonically " \
|
227 |
# "(old = %d, new = %d)" % (last_update, percentage))
|
228 |
|
229 |
# This assumes that no message of type 'ganeti-create-progress' is going to
|
230 |
# arrive once OP_INSTANCE_CREATE has succeeded for a Ganeti instance and
|
231 |
# the instance is STARTED. What if the two messages are processed by two
|
232 |
# separate dispatcher threads, and the 'ganeti-op-status' message for
|
233 |
# successful creation gets processed before the 'ganeti-create-progress'
|
234 |
# message? [vkoukis]
|
235 |
#
|
236 |
#if not vm.operstate == 'BUILD':
|
237 |
# raise VirtualMachine.IllegalState("VM is not in building state")
|
238 |
|
239 |
vm.buildpercentage = percentage |
240 |
vm.backendtime = etime |
241 |
vm.save() |
242 |
|
243 |
|
244 |
def start_action(vm, action): |
245 |
"""Update the state of a VM when a new action is initiated."""
|
246 |
if not action in [x[0] for x in VirtualMachine.ACTIONS]: |
247 |
raise VirtualMachine.InvalidActionError(action)
|
248 |
|
249 |
# No actions to deleted and no actions beside destroy to suspended VMs
|
250 |
if vm.deleted:
|
251 |
raise VirtualMachine.DeletedError
|
252 |
|
253 |
# No actions to machines being built. They may be destroyed, however.
|
254 |
if vm.operstate == 'BUILD' and action != 'DESTROY': |
255 |
raise VirtualMachine.BuildingError
|
256 |
|
257 |
vm.action = action |
258 |
vm.backendjobid = None
|
259 |
vm.backendopcode = None
|
260 |
vm.backendjobstatus = None
|
261 |
vm.backendlogmsg = None
|
262 |
|
263 |
# Update the relevant flags if the VM is being suspended or destroyed.
|
264 |
# Do not set the deleted flag here, see ticket #721.
|
265 |
#
|
266 |
# The deleted flag is set asynchronously, when an OP_INSTANCE_REMOVE
|
267 |
# completes successfully. Hence, a server may be visible for some time
|
268 |
# after a DELETE /servers/id returns HTTP 204.
|
269 |
#
|
270 |
if action == "DESTROY": |
271 |
# vm.deleted = True
|
272 |
pass
|
273 |
elif action == "SUSPEND": |
274 |
vm.suspended = True
|
275 |
elif action == "START": |
276 |
vm.suspended = False
|
277 |
vm.save() |
278 |
|
279 |
|
280 |
@transaction.commit_on_success
|
281 |
def create_instance(vm, flavor, image, password, personality): |
282 |
"""`image` is a dictionary which should contain the keys:
|
283 |
'backend_id', 'format' and 'metadata'
|
284 |
|
285 |
metadata value should be a dictionary.
|
286 |
"""
|
287 |
|
288 |
if settings.PUBLIC_ROUTED_USE_POOL:
|
289 |
(network, address) = allocate_public_address(vm) |
290 |
if address is None: |
291 |
raise OverLimit("Can not allocate IP for new machine." |
292 |
" Public networks are full.")
|
293 |
nic = {'ip': address, 'network': network.backend_id} |
294 |
else:
|
295 |
nic = {'ip': 'pool', 'network': network.backend_id} |
296 |
|
297 |
if settings.IGNORE_FLAVOR_DISK_SIZES:
|
298 |
if image['backend_id'].find("windows") >= 0: |
299 |
sz = 14000
|
300 |
else:
|
301 |
sz = 4000
|
302 |
else:
|
303 |
sz = flavor.disk * 1024
|
304 |
|
305 |
# Handle arguments to CreateInstance() as a dictionary,
|
306 |
# initialize it based on a deployment-specific value.
|
307 |
# This enables the administrator to override deployment-specific
|
308 |
# arguments, such as the disk template to use, name of os provider
|
309 |
# and hypervisor-specific parameters at will (see Synnefo #785, #835).
|
310 |
#
|
311 |
kw = settings.GANETI_CREATEINSTANCE_KWARGS |
312 |
kw['mode'] = 'create' |
313 |
kw['name'] = vm.backend_vm_id
|
314 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
315 |
|
316 |
# Identify if provider parameter should be set in disk options.
|
317 |
# Current implementation support providers only fo ext template.
|
318 |
# To select specific provider for an ext template, template name
|
319 |
# should be formated as `ext_<provider_name>`.
|
320 |
provider = None
|
321 |
disk_template = flavor.disk_template |
322 |
if flavor.disk_template.startswith("ext"): |
323 |
disk_template, provider = flavor.disk_template.split("_", 1) |
324 |
|
325 |
kw['disk_template'] = disk_template
|
326 |
kw['disks'] = [{"size": sz}] |
327 |
if provider:
|
328 |
kw['disks'][0]['provider'] = provider |
329 |
|
330 |
kw['nics'] = [nic]
|
331 |
if settings.GANETI_USE_HOTPLUG:
|
332 |
kw['hotplug'] = True |
333 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
334 |
# kw['os'] = settings.GANETI_OS_PROVIDER
|
335 |
kw['ip_check'] = False |
336 |
kw['name_check'] = False |
337 |
# Do not specific a node explicitly, have
|
338 |
# Ganeti use an iallocator instead
|
339 |
#
|
340 |
# kw['pnode']=rapi.GetNodes()[0]
|
341 |
kw['dry_run'] = settings.TEST
|
342 |
|
343 |
kw['beparams'] = {
|
344 |
'auto_balance': True, |
345 |
'vcpus': flavor.cpu,
|
346 |
'memory': flavor.ram}
|
347 |
|
348 |
kw['osparams'] = {
|
349 |
'img_id': image['backend_id'], |
350 |
'img_passwd': password,
|
351 |
'img_format': image['format']} |
352 |
if personality:
|
353 |
kw['osparams']['img_personality'] = json.dumps(personality) |
354 |
|
355 |
kw['osparams']['img_properties'] = json.dumps(image['metadata']) |
356 |
|
357 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
358 |
# kw['hvparams'] = dict(serial_console=False)
|
359 |
|
360 |
return vm.client.CreateInstance(**kw)
|
361 |
|
362 |
|
363 |
def allocate_public_address(vm): |
364 |
"""Allocate a public IP for a vm."""
|
365 |
for network in backend_public_networks(vm.backend): |
366 |
try:
|
367 |
address = get_network_free_address(network) |
368 |
return (network, address)
|
369 |
except ippool.IPPool.IPPoolExhausted:
|
370 |
pass
|
371 |
return (None, None) |
372 |
|
373 |
|
374 |
def delete_instance(vm): |
375 |
start_action(vm, 'DESTROY')
|
376 |
vm.client.DeleteInstance(vm.backend_vm_id, dry_run=settings.TEST) |
377 |
|
378 |
|
379 |
def reboot_instance(vm, reboot_type): |
380 |
assert reboot_type in ('soft', 'hard') |
381 |
vm.client.RebootInstance(vm.backend_vm_id, reboot_type, dry_run=settings.TEST) |
382 |
log.info('Rebooting instance %s', vm.backend_vm_id)
|
383 |
|
384 |
|
385 |
def startup_instance(vm): |
386 |
start_action(vm, 'START')
|
387 |
vm.client.StartupInstance(vm.backend_vm_id, dry_run=settings.TEST) |
388 |
|
389 |
|
390 |
def shutdown_instance(vm): |
391 |
start_action(vm, 'STOP')
|
392 |
vm.client.ShutdownInstance(vm.backend_vm_id, dry_run=settings.TEST) |
393 |
|
394 |
|
395 |
def get_instance_console(vm): |
396 |
# RAPI GetInstanceConsole() returns endpoints to the vnc_bind_address,
|
397 |
# which is a cluster-wide setting, either 0.0.0.0 or 127.0.0.1, and pretty
|
398 |
# useless (see #783).
|
399 |
#
|
400 |
# Until this is fixed on the Ganeti side, construct a console info reply
|
401 |
# directly.
|
402 |
#
|
403 |
# WARNING: This assumes that VNC runs on port network_port on
|
404 |
# the instance's primary node, and is probably
|
405 |
# hypervisor-specific.
|
406 |
#
|
407 |
console = {} |
408 |
console['kind'] = 'vnc' |
409 |
i = vm.client.GetInstance(vm.backend_vm_id) |
410 |
if i['hvparams']['serial_console']: |
411 |
raise Exception("hv parameter serial_console cannot be true") |
412 |
console['host'] = i['pnode'] |
413 |
console['port'] = i['network_port'] |
414 |
|
415 |
return console
|
416 |
# return rapi.GetInstanceConsole(vm.backend_vm_id)
|
417 |
|
418 |
|
419 |
def request_status_update(vm): |
420 |
return vm.client.GetInstanceInfo(vm.backend_vm_id)
|
421 |
|
422 |
|
423 |
def update_status(vm, status): |
424 |
utils.update_state(vm, status) |
425 |
|
426 |
|
427 |
def create_network(network, backends=None): |
428 |
""" Add and connect a network to backends.
|
429 |
|
430 |
@param network: Network object
|
431 |
@param backends: List of Backend objects. None defaults to all.
|
432 |
|
433 |
"""
|
434 |
backend_jobs = _create_network(network, backends) |
435 |
connect_network(network, backend_jobs) |
436 |
return network
|
437 |
|
438 |
|
439 |
def _create_network(network, backends=None): |
440 |
"""Add a network to backends.
|
441 |
@param network: Network object
|
442 |
@param backends: List of Backend objects. None defaults to all.
|
443 |
|
444 |
"""
|
445 |
|
446 |
network_type = network.public and 'public' or 'private' |
447 |
if not backends: |
448 |
backends = Backend.objects.exclude(offline=True)
|
449 |
|
450 |
tags = network.backend_tag |
451 |
if network.dhcp:
|
452 |
tags.append('nfdhcpd')
|
453 |
tags = ','.join(tags)
|
454 |
|
455 |
backend_jobs = [] |
456 |
for backend in backends: |
457 |
try:
|
458 |
backend_network = BackendNetwork.objects.get(network=network, |
459 |
backend=backend) |
460 |
except BackendNetwork.DoesNotExist:
|
461 |
raise Exception("BackendNetwork for network '%s' in backend '%s'"\ |
462 |
" does not exist" % (network.id, backend.id))
|
463 |
job = backend.client.CreateNetwork( |
464 |
network_name=network.backend_id, |
465 |
network=network.subnet, |
466 |
gateway=network.gateway, |
467 |
network_type=network_type, |
468 |
mac_prefix=backend_network.mac_prefix, |
469 |
tags=tags) |
470 |
backend_jobs.append((backend, job)) |
471 |
|
472 |
return backend_jobs
|
473 |
|
474 |
|
475 |
def connect_network(network, backend_jobs=None): |
476 |
"""Connect a network to all nodegroups.
|
477 |
|
478 |
@param network: Network object
|
479 |
@param backend_jobs: List of tuples of the form (Backend, jobs) which are
|
480 |
the backends to connect the network and the jobs on
|
481 |
which the connect job depends.
|
482 |
|
483 |
"""
|
484 |
|
485 |
if network.type in ('PUBLIC_ROUTED', 'CUSTOM_ROUTED'): |
486 |
mode = 'routed'
|
487 |
else:
|
488 |
mode = 'bridged'
|
489 |
|
490 |
if not backend_jobs: |
491 |
backend_jobs = [(backend, []) for backend in |
492 |
Backend.objects.exclude(offline=True)]
|
493 |
|
494 |
for backend, job in backend_jobs: |
495 |
client = backend.client |
496 |
for group in client.GetGroups(): |
497 |
client.ConnectNetwork(network.backend_id, group, mode, |
498 |
network.link, [job]) |
499 |
|
500 |
|
501 |
def connect_network_group(backend, network, group): |
502 |
"""Connect a network to a specific nodegroup of a backend.
|
503 |
|
504 |
"""
|
505 |
if network.type in ('PUBLIC_ROUTED', 'CUSTOM_ROUTED'): |
506 |
mode = 'routed'
|
507 |
else:
|
508 |
mode = 'bridged'
|
509 |
|
510 |
return backend.client.ConnectNetwork(network.backend_id, group, mode,
|
511 |
network.link) |
512 |
|
513 |
|
514 |
def delete_network(network, backends=None): |
515 |
""" Disconnect and a remove a network from backends.
|
516 |
|
517 |
@param network: Network object
|
518 |
@param backends: List of Backend objects. None defaults to all.
|
519 |
|
520 |
"""
|
521 |
backend_jobs = disconnect_network(network, backends) |
522 |
_delete_network(network, backend_jobs) |
523 |
|
524 |
|
525 |
def disconnect_network(network, backends=None): |
526 |
"""Disconnect a network from all nodegroups.
|
527 |
|
528 |
@param network: Network object
|
529 |
@param backends: List of Backend objects. None defaults to all.
|
530 |
|
531 |
"""
|
532 |
|
533 |
if not backends: |
534 |
backends = Backend.objects.exclude(offline=True)
|
535 |
|
536 |
backend_jobs = [] |
537 |
for backend in backends: |
538 |
client = backend.client |
539 |
jobs = [] |
540 |
for group in client.GetGroups(): |
541 |
job = client.DisconnectNetwork(network.backend_id, group) |
542 |
jobs.append(job) |
543 |
backend_jobs.append((backend, jobs)) |
544 |
|
545 |
return backend_jobs
|
546 |
|
547 |
|
548 |
def disconnect_from_network(vm, nic): |
549 |
"""Disconnect a virtual machine from a network by removing it's nic.
|
550 |
|
551 |
@param vm: VirtualMachine object
|
552 |
@param network: Network object
|
553 |
|
554 |
"""
|
555 |
|
556 |
op = [('remove', nic.index, {})]
|
557 |
return vm.client.ModifyInstance(vm.backend_vm_id, nics=op,
|
558 |
hotplug=settings.GANETI_USE_HOTPLUG, |
559 |
dry_run=settings.TEST) |
560 |
|
561 |
|
562 |
def _delete_network(network, backend_jobs=None): |
563 |
if not backend_jobs: |
564 |
backend_jobs = [(backend, []) for backend in |
565 |
Backend.objects.exclude(offline=True)]
|
566 |
for backend, jobs in backend_jobs: |
567 |
backend.client.DeleteNetwork(network.backend_id, jobs) |
568 |
|
569 |
|
570 |
def connect_to_network(vm, network, address): |
571 |
"""Connect a virtual machine to a network.
|
572 |
|
573 |
@param vm: VirtualMachine object
|
574 |
@param network: Network object
|
575 |
|
576 |
"""
|
577 |
|
578 |
# ip = network.dhcp and 'pool' or None
|
579 |
|
580 |
nic = {'ip': address, 'network': network.backend_id} |
581 |
vm.client.ModifyInstance(vm.backend_vm_id, nics=[('add', nic)],
|
582 |
hotplug=settings.GANETI_USE_HOTPLUG, |
583 |
dry_run=settings.TEST) |
584 |
|
585 |
|
586 |
def set_firewall_profile(vm, profile): |
587 |
try:
|
588 |
tag = _firewall_tags[profile] |
589 |
except KeyError: |
590 |
raise ValueError("Unsopported Firewall Profile: %s" % profile) |
591 |
|
592 |
client = vm.client |
593 |
# Delete all firewall tags
|
594 |
for t in _firewall_tags.values(): |
595 |
client.DeleteInstanceTags(vm.backend_vm_id, [t], dry_run=settings.TEST) |
596 |
|
597 |
client.AddInstanceTags(vm.backend_vm_id, [tag], dry_run=settings.TEST) |
598 |
|
599 |
# XXX NOP ModifyInstance call to force process_net_status to run
|
600 |
# on the dispatcher
|
601 |
vm.client.ModifyInstance(vm.backend_vm_id, |
602 |
os_name=settings.GANETI_CREATEINSTANCE_KWARGS['os'])
|
603 |
|
604 |
|
605 |
def get_ganeti_instances(backend=None, bulk=False): |
606 |
Instances = [c.client.GetInstances(bulk=bulk)\ |
607 |
for c in get_backends(backend)] |
608 |
return reduce(list.__add__, Instances, []) |
609 |
|
610 |
|
611 |
def get_ganeti_nodes(backend=None, bulk=False): |
612 |
Nodes = [c.client.GetNodes(bulk=bulk) for c in get_backends(backend)] |
613 |
return reduce(list.__add__, Nodes, []) |
614 |
|
615 |
|
616 |
def get_ganeti_jobs(backend=None, bulk=False): |
617 |
Jobs = [c.client.GetJobs(bulk=bulk) for c in get_backends(backend)] |
618 |
return reduce(list.__add__, Jobs, []) |
619 |
|
620 |
##
|
621 |
##
|
622 |
##
|
623 |
|
624 |
|
625 |
def get_backends(backend=None): |
626 |
if backend:
|
627 |
return [backend]
|
628 |
return Backend.objects.filter(offline=False) |
629 |
|
630 |
|
631 |
def get_physical_resources(backend): |
632 |
""" Get the physical resources of a backend.
|
633 |
|
634 |
Get the resources of a backend as reported by the backend (not the db).
|
635 |
|
636 |
"""
|
637 |
nodes = get_ganeti_nodes(backend, bulk=True)
|
638 |
attr = ['mfree', 'mtotal', 'dfree', 'dtotal', 'pinst_cnt', 'ctotal'] |
639 |
res = {} |
640 |
for a in attr: |
641 |
res[a] = 0
|
642 |
for n in nodes: |
643 |
# Filter out drained, offline and not vm_capable nodes since they will
|
644 |
# not take part in the vm allocation process
|
645 |
if n['vm_capable'] and not n['drained'] and not n['offline']\ |
646 |
and n['cnodes']: |
647 |
for a in attr: |
648 |
res[a] += int(n[a])
|
649 |
return res
|
650 |
|
651 |
|
652 |
def update_resources(backend, resources=None): |
653 |
""" Update the state of the backend resources in db.
|
654 |
|
655 |
"""
|
656 |
|
657 |
if not resources: |
658 |
resources = get_physical_resources(backend) |
659 |
|
660 |
backend.mfree = resources['mfree']
|
661 |
backend.mtotal = resources['mtotal']
|
662 |
backend.dfree = resources['dfree']
|
663 |
backend.dtotal = resources['dtotal']
|
664 |
backend.pinst_cnt = resources['pinst_cnt']
|
665 |
backend.ctotal = resources['ctotal']
|
666 |
backend.updated = datetime.now() |
667 |
backend.save() |
668 |
|
669 |
|
670 |
def get_memory_from_instances(backend): |
671 |
""" Get the memory that is used from instances.
|
672 |
|
673 |
Get the used memory of a backend. Note: This is different for
|
674 |
the real memory used, due to kvm's memory de-duplication.
|
675 |
|
676 |
"""
|
677 |
instances = backend.client.GetInstances(bulk=True)
|
678 |
mem = 0
|
679 |
for i in instances: |
680 |
mem += i['oper_ram']
|
681 |
return mem
|
682 |
|
683 |
##
|
684 |
## Synchronized operations for reconciliation
|
685 |
##
|
686 |
|
687 |
|
688 |
def create_network_synced(network, backend): |
689 |
result = _create_network_synced(network, backend) |
690 |
if result[0] != 'success': |
691 |
return result
|
692 |
result = connect_network_synced(network, backend) |
693 |
return result
|
694 |
|
695 |
|
696 |
def _create_network_synced(network, backend): |
697 |
client = backend.client |
698 |
|
699 |
backend_jobs = _create_network(network, [backend]) |
700 |
(_, job) = backend_jobs[0]
|
701 |
return wait_for_job(client, job)
|
702 |
|
703 |
|
704 |
def connect_network_synced(network, backend): |
705 |
if network.type in ('PUBLIC_ROUTED', 'CUSTOM_ROUTED'): |
706 |
mode = 'routed'
|
707 |
else:
|
708 |
mode = 'bridged'
|
709 |
client = backend.client |
710 |
|
711 |
for group in client.GetGroups(): |
712 |
job = client.ConnectNetwork(network.backend_id, group, mode, |
713 |
network.link) |
714 |
result = wait_for_job(client, job) |
715 |
if result[0] != 'success': |
716 |
return result
|
717 |
|
718 |
return result
|
719 |
|
720 |
|
721 |
def wait_for_job(client, jobid): |
722 |
result = client.WaitForJobChange(jobid, ['status', 'opresult'], None, None) |
723 |
status = result['job_info'][0] |
724 |
while status not in ['success', 'error', 'cancel']: |
725 |
result = client.WaitForJobChange(jobid, ['status', 'opresult'], |
726 |
[result], None)
|
727 |
status = result['job_info'][0] |
728 |
|
729 |
if status == 'success': |
730 |
return (status, None) |
731 |
else:
|
732 |
error = result['job_info'][1] |
733 |
return (status, error)
|