root / snf-cyclades-app / synnefo / logic / backend.py @ 2cd3f389
History | View | Annotate | Download (26.1 kB)
1 |
# Copyright 2011 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
from django.conf import settings |
34 |
from django.db import transaction |
35 |
from datetime import datetime |
36 |
|
37 |
from synnefo.db.models import (Backend, VirtualMachine, Network, |
38 |
BackendNetwork, BACKEND_STATUSES, |
39 |
pooled_rapi_client, VirtualMachineDiagnostic) |
40 |
from synnefo.logic import utils |
41 |
from synnefo import quotas |
42 |
from synnefo.api.util import release_resource |
43 |
from synnefo.util.mac2eui64 import mac2eui64 |
44 |
|
45 |
from logging import getLogger |
46 |
log = getLogger(__name__) |
47 |
|
48 |
|
49 |
_firewall_tags = { |
50 |
'ENABLED': settings.GANETI_FIREWALL_ENABLED_TAG,
|
51 |
'DISABLED': settings.GANETI_FIREWALL_DISABLED_TAG,
|
52 |
'PROTECTED': settings.GANETI_FIREWALL_PROTECTED_TAG}
|
53 |
|
54 |
_reverse_tags = dict((v.split(':')[3], k) for k, v in _firewall_tags.items()) |
55 |
|
56 |
|
57 |
@transaction.commit_on_success
|
58 |
def process_op_status(vm, etime, jobid, opcode, status, logmsg, nics=None): |
59 |
"""Process a job progress notification from the backend
|
60 |
|
61 |
Process an incoming message from the backend (currently Ganeti).
|
62 |
Job notifications with a terminating status (sucess, error, or canceled),
|
63 |
also update the operating state of the VM.
|
64 |
|
65 |
"""
|
66 |
# See #1492, #1031, #1111 why this line has been removed
|
67 |
#if (opcode not in [x[0] for x in VirtualMachine.BACKEND_OPCODES] or
|
68 |
if status not in [x[0] for x in BACKEND_STATUSES]: |
69 |
raise VirtualMachine.InvalidBackendMsgError(opcode, status)
|
70 |
|
71 |
vm.backendjobid = jobid |
72 |
vm.backendjobstatus = status |
73 |
vm.backendopcode = opcode |
74 |
vm.backendlogmsg = logmsg |
75 |
|
76 |
# Notifications of success change the operating state
|
77 |
state_for_success = VirtualMachine.OPER_STATE_FROM_OPCODE.get(opcode, None)
|
78 |
if status == 'success' and state_for_success is not None: |
79 |
vm.operstate = state_for_success |
80 |
|
81 |
# Update the NICs of the VM
|
82 |
if status == "success" and nics is not None: |
83 |
_process_net_status(vm, etime, nics) |
84 |
|
85 |
# Special case: if OP_INSTANCE_CREATE fails --> ERROR
|
86 |
if opcode == 'OP_INSTANCE_CREATE' and status in ('canceled', 'error'): |
87 |
vm.operstate = 'ERROR'
|
88 |
vm.backendtime = etime |
89 |
elif opcode == 'OP_INSTANCE_REMOVE': |
90 |
# Set the deleted flag explicitly, cater for admin-initiated removals
|
91 |
# Special case: OP_INSTANCE_REMOVE fails for machines in ERROR,
|
92 |
# when no instance exists at the Ganeti backend.
|
93 |
# See ticket #799 for all the details.
|
94 |
#
|
95 |
if status == 'success' or (status == 'error' and |
96 |
vm.operstate == 'ERROR'):
|
97 |
_process_net_status(vm, etime, nics=[]) |
98 |
vm.deleted = True
|
99 |
vm.operstate = state_for_success |
100 |
vm.backendtime = etime |
101 |
# Issue and accept commission to Quotaholder
|
102 |
quotas.issue_and_accept_commission(vm, delete=True)
|
103 |
|
104 |
# Update backendtime only for jobs that have been successfully completed,
|
105 |
# since only these jobs update the state of the VM. Else a "race condition"
|
106 |
# may occur when a successful job (e.g. OP_INSTANCE_REMOVE) completes
|
107 |
# before an error job and messages arrive in reversed order.
|
108 |
if status == 'success': |
109 |
vm.backendtime = etime |
110 |
|
111 |
vm.save() |
112 |
|
113 |
|
114 |
@transaction.commit_on_success
|
115 |
def process_net_status(vm, etime, nics): |
116 |
"""Wrap _process_net_status inside transaction."""
|
117 |
_process_net_status(vm, etime, nics) |
118 |
|
119 |
|
120 |
def _process_net_status(vm, etime, nics): |
121 |
"""Process a net status notification from the backend
|
122 |
|
123 |
Process an incoming message from the Ganeti backend,
|
124 |
detailing the NIC configuration of a VM instance.
|
125 |
|
126 |
Update the state of the VM in the DB accordingly.
|
127 |
"""
|
128 |
|
129 |
ganeti_nics = process_ganeti_nics(nics) |
130 |
if not nics_changed(vm.nics.order_by('index'), ganeti_nics): |
131 |
log.debug("NICs for VM %s have not changed", vm)
|
132 |
return
|
133 |
|
134 |
# Get X-Lock on backend before getting X-Lock on network IP pools, to
|
135 |
# guarantee that no deadlock will occur with Backend allocator.
|
136 |
Backend.objects.select_for_update().get(id=vm.backend_id) |
137 |
|
138 |
release_instance_nics(vm) |
139 |
|
140 |
for nic in ganeti_nics: |
141 |
ipv4 = nic.get('ipv4', '') |
142 |
net = nic['network']
|
143 |
if ipv4:
|
144 |
net.reserve_address(ipv4) |
145 |
|
146 |
nic['dirty'] = False |
147 |
vm.nics.create(**nic) |
148 |
# Dummy save the network, because UI uses changed-since for VMs
|
149 |
# and Networks in order to show the VM NICs
|
150 |
net.save() |
151 |
|
152 |
vm.backendtime = etime |
153 |
vm.save() |
154 |
|
155 |
|
156 |
def process_ganeti_nics(ganeti_nics): |
157 |
"""Process NIC dict from ganeti hooks."""
|
158 |
new_nics = [] |
159 |
for i, new_nic in enumerate(ganeti_nics): |
160 |
network = new_nic.get('network', '') |
161 |
n = str(network)
|
162 |
pk = utils.id_from_network_name(n) |
163 |
|
164 |
net = Network.objects.get(pk=pk) |
165 |
|
166 |
# Get the new nic info
|
167 |
mac = new_nic.get('mac', '') |
168 |
ipv4 = new_nic.get('ip', '') |
169 |
if net.subnet6:
|
170 |
ipv6 = mac2eui64(mac, net.subnet6) |
171 |
else:
|
172 |
ipv6 = ''
|
173 |
|
174 |
firewall = new_nic.get('firewall', '') |
175 |
firewall_profile = _reverse_tags.get(firewall, '')
|
176 |
if not firewall_profile and net.public: |
177 |
firewall_profile = settings.DEFAULT_FIREWALL_PROFILE |
178 |
|
179 |
nic = { |
180 |
'index': i,
|
181 |
'network': net,
|
182 |
'mac': mac,
|
183 |
'ipv4': ipv4,
|
184 |
'ipv6': ipv6,
|
185 |
'firewall_profile': firewall_profile,
|
186 |
'state': 'ACTIVE'} |
187 |
|
188 |
new_nics.append(nic) |
189 |
return new_nics
|
190 |
|
191 |
|
192 |
def nics_changed(old_nics, new_nics): |
193 |
"""Return True if NICs have changed in any way."""
|
194 |
if len(old_nics) != len(new_nics): |
195 |
return True |
196 |
fields = ["ipv4", "ipv6", "mac", "firewall_profile", "index", "network"] |
197 |
for old_nic, new_nic in zip(old_nics, new_nics): |
198 |
for field in fields: |
199 |
if getattr(old_nic, field) != new_nic[field]: |
200 |
return True |
201 |
return False |
202 |
|
203 |
|
204 |
def release_instance_nics(vm): |
205 |
for nic in vm.nics.all(): |
206 |
net = nic.network |
207 |
if nic.ipv4:
|
208 |
net.release_address(nic.ipv4) |
209 |
nic.delete() |
210 |
net.save() |
211 |
|
212 |
|
213 |
@transaction.commit_on_success
|
214 |
def process_network_status(back_network, etime, jobid, opcode, status, logmsg): |
215 |
if status not in [x[0] for x in BACKEND_STATUSES]: |
216 |
raise Network.InvalidBackendMsgError(opcode, status)
|
217 |
|
218 |
back_network.backendjobid = jobid |
219 |
back_network.backendjobstatus = status |
220 |
back_network.backendopcode = opcode |
221 |
back_network.backendlogmsg = logmsg |
222 |
|
223 |
# Notifications of success change the operating state
|
224 |
state_for_success = BackendNetwork.OPER_STATE_FROM_OPCODE.get(opcode, None)
|
225 |
if status == 'success' and state_for_success is not None: |
226 |
back_network.operstate = state_for_success |
227 |
|
228 |
if status in ('canceled', 'error') and opcode == 'OP_NETWORK_ADD': |
229 |
back_network.operstate = 'ERROR'
|
230 |
back_network.backendtime = etime |
231 |
|
232 |
if opcode == 'OP_NETWORK_REMOVE': |
233 |
if status == 'success' or (status == 'error' and |
234 |
back_network.operstate == 'ERROR'):
|
235 |
back_network.operstate = state_for_success |
236 |
back_network.deleted = True
|
237 |
back_network.backendtime = etime |
238 |
|
239 |
if status == 'success': |
240 |
back_network.backendtime = etime |
241 |
back_network.save() |
242 |
# Also you must update the state of the Network!!
|
243 |
update_network_state(back_network.network) |
244 |
|
245 |
|
246 |
def update_network_state(network): |
247 |
"""Update the state of a Network based on BackendNetwork states.
|
248 |
|
249 |
Update the state of a Network based on the operstate of the networks in the
|
250 |
backends that network exists.
|
251 |
|
252 |
The state of the network is:
|
253 |
* ACTIVE: If it is 'ACTIVE' in at least one backend.
|
254 |
* DELETED: If it is is 'DELETED' in all backends that have been created.
|
255 |
|
256 |
This function also releases the resources (MAC prefix or Bridge) and the
|
257 |
quotas for the network.
|
258 |
|
259 |
"""
|
260 |
if network.deleted:
|
261 |
# Network has already been deleted. Just assert that state is also
|
262 |
# DELETED
|
263 |
if not network.state == "DELETED": |
264 |
network.state = "DELETED"
|
265 |
network.save() |
266 |
return
|
267 |
|
268 |
backend_states = [s.operstate for s in network.backend_networks.all()] |
269 |
if not backend_states and network.action != "DESTROY": |
270 |
if network.state != "ACTIVE": |
271 |
network.state = "ACTIVE"
|
272 |
network.save() |
273 |
return
|
274 |
|
275 |
# Network is deleted when all BackendNetworks go to "DELETED" operstate
|
276 |
deleted = reduce(lambda x, y: x == y and "DELETED", backend_states, |
277 |
"DELETED")
|
278 |
|
279 |
# Release the resources on the deletion of the Network
|
280 |
if deleted:
|
281 |
log.info("Network %r deleted. Releasing link %r mac_prefix %r",
|
282 |
network.id, network.mac_prefix, network.link) |
283 |
network.deleted = True
|
284 |
network.state = "DELETED"
|
285 |
if network.mac_prefix:
|
286 |
if network.FLAVORS[network.flavor]["mac_prefix"] == "pool": |
287 |
release_resource(res_type="mac_prefix",
|
288 |
value=network.mac_prefix) |
289 |
if network.link:
|
290 |
if network.FLAVORS[network.flavor]["link"] == "pool": |
291 |
release_resource(res_type="bridge", value=network.link)
|
292 |
|
293 |
# Issue commission
|
294 |
if network.userid:
|
295 |
quotas.issue_and_accept_commission(network, delete=True)
|
296 |
elif not network.public: |
297 |
log.warning("Network %s does not have an owner!", network.id)
|
298 |
network.save() |
299 |
|
300 |
|
301 |
@transaction.commit_on_success
|
302 |
def process_network_modify(back_network, etime, jobid, opcode, status, |
303 |
add_reserved_ips, remove_reserved_ips): |
304 |
assert (opcode == "OP_NETWORK_SET_PARAMS") |
305 |
if status not in [x[0] for x in BACKEND_STATUSES]: |
306 |
raise Network.InvalidBackendMsgError(opcode, status)
|
307 |
|
308 |
back_network.backendjobid = jobid |
309 |
back_network.backendjobstatus = status |
310 |
back_network.opcode = opcode |
311 |
|
312 |
if add_reserved_ips or remove_reserved_ips: |
313 |
net = back_network.network |
314 |
pool = net.get_pool() |
315 |
if add_reserved_ips:
|
316 |
for ip in add_reserved_ips: |
317 |
pool.reserve(ip, external=True)
|
318 |
if remove_reserved_ips:
|
319 |
for ip in remove_reserved_ips: |
320 |
pool.put(ip, external=True)
|
321 |
pool.save() |
322 |
|
323 |
if status == 'success': |
324 |
back_network.backendtime = etime |
325 |
back_network.save() |
326 |
|
327 |
|
328 |
@transaction.commit_on_success
|
329 |
def process_create_progress(vm, etime, progress): |
330 |
|
331 |
percentage = int(progress)
|
332 |
|
333 |
# The percentage may exceed 100%, due to the way
|
334 |
# snf-image:copy-progress tracks bytes read by image handling processes
|
335 |
percentage = 100 if percentage > 100 else percentage |
336 |
if percentage < 0: |
337 |
raise ValueError("Percentage cannot be negative") |
338 |
|
339 |
# FIXME: log a warning here, see #1033
|
340 |
# if last_update > percentage:
|
341 |
# raise ValueError("Build percentage should increase monotonically " \
|
342 |
# "(old = %d, new = %d)" % (last_update, percentage))
|
343 |
|
344 |
# This assumes that no message of type 'ganeti-create-progress' is going to
|
345 |
# arrive once OP_INSTANCE_CREATE has succeeded for a Ganeti instance and
|
346 |
# the instance is STARTED. What if the two messages are processed by two
|
347 |
# separate dispatcher threads, and the 'ganeti-op-status' message for
|
348 |
# successful creation gets processed before the 'ganeti-create-progress'
|
349 |
# message? [vkoukis]
|
350 |
#
|
351 |
#if not vm.operstate == 'BUILD':
|
352 |
# raise VirtualMachine.IllegalState("VM is not in building state")
|
353 |
|
354 |
vm.buildpercentage = percentage |
355 |
vm.backendtime = etime |
356 |
vm.save() |
357 |
|
358 |
|
359 |
def create_instance_diagnostic(vm, message, source, level="DEBUG", etime=None, |
360 |
details=None):
|
361 |
"""
|
362 |
Create virtual machine instance diagnostic entry.
|
363 |
|
364 |
:param vm: VirtualMachine instance to create diagnostic for.
|
365 |
:param message: Diagnostic message.
|
366 |
:param source: Diagnostic source identifier (e.g. image-helper).
|
367 |
:param level: Diagnostic level (`DEBUG`, `INFO`, `WARNING`, `ERROR`).
|
368 |
:param etime: The time the message occured (if available).
|
369 |
:param details: Additional details or debug information.
|
370 |
"""
|
371 |
VirtualMachineDiagnostic.objects.create_for_vm(vm, level, source=source, |
372 |
source_date=etime, |
373 |
message=message, |
374 |
details=details) |
375 |
|
376 |
|
377 |
def create_instance(vm, public_nic, flavor, image): |
378 |
"""`image` is a dictionary which should contain the keys:
|
379 |
'backend_id', 'format' and 'metadata'
|
380 |
|
381 |
metadata value should be a dictionary.
|
382 |
"""
|
383 |
|
384 |
# Handle arguments to CreateInstance() as a dictionary,
|
385 |
# initialize it based on a deployment-specific value.
|
386 |
# This enables the administrator to override deployment-specific
|
387 |
# arguments, such as the disk template to use, name of os provider
|
388 |
# and hypervisor-specific parameters at will (see Synnefo #785, #835).
|
389 |
#
|
390 |
kw = vm.backend.get_create_params() |
391 |
kw['mode'] = 'create' |
392 |
kw['name'] = vm.backend_vm_id
|
393 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
394 |
|
395 |
kw['disk_template'] = flavor.disk_template
|
396 |
kw['disks'] = [{"size": flavor.disk * 1024}] |
397 |
provider = flavor.disk_provider |
398 |
if provider:
|
399 |
kw['disks'][0]['provider'] = provider |
400 |
kw['disks'][0]['origin'] = flavor.disk_origin |
401 |
|
402 |
kw['nics'] = [public_nic]
|
403 |
if vm.backend.use_hotplug():
|
404 |
kw['hotplug'] = True |
405 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
406 |
# kw['os'] = settings.GANETI_OS_PROVIDER
|
407 |
kw['ip_check'] = False |
408 |
kw['name_check'] = False |
409 |
|
410 |
# Do not specific a node explicitly, have
|
411 |
# Ganeti use an iallocator instead
|
412 |
#kw['pnode'] = rapi.GetNodes()[0]
|
413 |
|
414 |
kw['dry_run'] = settings.TEST
|
415 |
|
416 |
kw['beparams'] = {
|
417 |
'auto_balance': True, |
418 |
'vcpus': flavor.cpu,
|
419 |
'memory': flavor.ram}
|
420 |
|
421 |
kw['osparams'] = {
|
422 |
'config_url': vm.config_url,
|
423 |
# Store image id and format to Ganeti
|
424 |
'img_id': image['backend_id'], |
425 |
'img_format': image['format']} |
426 |
|
427 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS
|
428 |
# kw['hvparams'] = dict(serial_console=False)
|
429 |
|
430 |
log.debug("Creating instance %s", utils.hide_pass(kw))
|
431 |
with pooled_rapi_client(vm) as client: |
432 |
return client.CreateInstance(**kw)
|
433 |
|
434 |
|
435 |
def delete_instance(vm): |
436 |
with pooled_rapi_client(vm) as client: |
437 |
return client.DeleteInstance(vm.backend_vm_id, dry_run=settings.TEST)
|
438 |
|
439 |
|
440 |
def reboot_instance(vm, reboot_type): |
441 |
assert reboot_type in ('soft', 'hard') |
442 |
with pooled_rapi_client(vm) as client: |
443 |
return client.RebootInstance(vm.backend_vm_id, reboot_type,
|
444 |
dry_run=settings.TEST) |
445 |
|
446 |
|
447 |
def startup_instance(vm): |
448 |
with pooled_rapi_client(vm) as client: |
449 |
return client.StartupInstance(vm.backend_vm_id, dry_run=settings.TEST)
|
450 |
|
451 |
|
452 |
def shutdown_instance(vm): |
453 |
with pooled_rapi_client(vm) as client: |
454 |
return client.ShutdownInstance(vm.backend_vm_id, dry_run=settings.TEST)
|
455 |
|
456 |
|
457 |
def resize_instance(vm, vcpus, memory): |
458 |
beparams = {"vcpus": int(vcpus), |
459 |
"minmem": int(memory), |
460 |
"maxmem": int(memory)} |
461 |
with pooled_rapi_client(vm) as client: |
462 |
return client.ModifyInstance(vm.backend_vm_id, beparams=beparams)
|
463 |
|
464 |
|
465 |
def get_instance_console(vm): |
466 |
# RAPI GetInstanceConsole() returns endpoints to the vnc_bind_address,
|
467 |
# which is a cluster-wide setting, either 0.0.0.0 or 127.0.0.1, and pretty
|
468 |
# useless (see #783).
|
469 |
#
|
470 |
# Until this is fixed on the Ganeti side, construct a console info reply
|
471 |
# directly.
|
472 |
#
|
473 |
# WARNING: This assumes that VNC runs on port network_port on
|
474 |
# the instance's primary node, and is probably
|
475 |
# hypervisor-specific.
|
476 |
#
|
477 |
log.debug("Getting console for vm %s", vm)
|
478 |
|
479 |
console = {} |
480 |
console['kind'] = 'vnc' |
481 |
|
482 |
with pooled_rapi_client(vm) as client: |
483 |
i = client.GetInstance(vm.backend_vm_id) |
484 |
|
485 |
if vm.backend.hypervisor == "kvm" and i['hvparams']['serial_console']: |
486 |
raise Exception("hv parameter serial_console cannot be true") |
487 |
console['host'] = i['pnode'] |
488 |
console['port'] = i['network_port'] |
489 |
|
490 |
return console
|
491 |
|
492 |
|
493 |
def get_instance_info(vm): |
494 |
with pooled_rapi_client(vm) as client: |
495 |
return client.GetInstanceInfo(vm.backend_vm_id)
|
496 |
|
497 |
|
498 |
def create_network(network, backend, connect=True): |
499 |
"""Create a network in a Ganeti backend"""
|
500 |
log.debug("Creating network %s in backend %s", network, backend)
|
501 |
|
502 |
job_id = _create_network(network, backend) |
503 |
|
504 |
if connect:
|
505 |
job_ids = connect_network(network, backend, depends=[job_id]) |
506 |
return job_ids
|
507 |
else:
|
508 |
return [job_id]
|
509 |
|
510 |
|
511 |
def _create_network(network, backend): |
512 |
"""Create a network."""
|
513 |
|
514 |
network_type = network.public and 'public' or 'private' |
515 |
|
516 |
tags = network.backend_tag |
517 |
if network.dhcp:
|
518 |
tags.append('nfdhcpd')
|
519 |
|
520 |
if network.public:
|
521 |
conflicts_check = True
|
522 |
else:
|
523 |
conflicts_check = False
|
524 |
|
525 |
try:
|
526 |
bn = BackendNetwork.objects.get(network=network, backend=backend) |
527 |
mac_prefix = bn.mac_prefix |
528 |
except BackendNetwork.DoesNotExist:
|
529 |
raise Exception("BackendNetwork for network '%s' in backend '%s'" |
530 |
" does not exist" % (network.id, backend.id))
|
531 |
|
532 |
with pooled_rapi_client(backend) as client: |
533 |
return client.CreateNetwork(network_name=network.backend_id,
|
534 |
network=network.subnet, |
535 |
network6=network.subnet6, |
536 |
gateway=network.gateway, |
537 |
gateway6=network.gateway6, |
538 |
network_type=network_type, |
539 |
mac_prefix=mac_prefix, |
540 |
conflicts_check=conflicts_check, |
541 |
tags=tags) |
542 |
|
543 |
|
544 |
def connect_network(network, backend, depends=[], group=None): |
545 |
"""Connect a network to nodegroups."""
|
546 |
log.debug("Connecting network %s to backend %s", network, backend)
|
547 |
|
548 |
if network.public:
|
549 |
conflicts_check = True
|
550 |
else:
|
551 |
conflicts_check = False
|
552 |
|
553 |
depends = [[job, ["success", "error", "canceled"]] for job in depends] |
554 |
with pooled_rapi_client(backend) as client: |
555 |
groups = [group] if group is not None else client.GetGroups() |
556 |
job_ids = [] |
557 |
for group in groups: |
558 |
job_id = client.ConnectNetwork(network.backend_id, group, |
559 |
network.mode, network.link, |
560 |
conflicts_check, |
561 |
depends=depends) |
562 |
job_ids.append(job_id) |
563 |
return job_ids
|
564 |
|
565 |
|
566 |
def delete_network(network, backend, disconnect=True): |
567 |
log.debug("Deleting network %s from backend %s", network, backend)
|
568 |
|
569 |
depends = [] |
570 |
if disconnect:
|
571 |
depends = disconnect_network(network, backend) |
572 |
_delete_network(network, backend, depends=depends) |
573 |
|
574 |
|
575 |
def _delete_network(network, backend, depends=[]): |
576 |
depends = [[job, ["success", "error", "canceled"]] for job in depends] |
577 |
with pooled_rapi_client(backend) as client: |
578 |
return client.DeleteNetwork(network.backend_id, depends)
|
579 |
|
580 |
|
581 |
def disconnect_network(network, backend, group=None): |
582 |
log.debug("Disconnecting network %s to backend %s", network, backend)
|
583 |
|
584 |
with pooled_rapi_client(backend) as client: |
585 |
groups = [group] if group is not None else client.GetGroups() |
586 |
job_ids = [] |
587 |
for group in groups: |
588 |
job_id = client.DisconnectNetwork(network.backend_id, group) |
589 |
job_ids.append(job_id) |
590 |
return job_ids
|
591 |
|
592 |
|
593 |
def connect_to_network(vm, network, address=None): |
594 |
backend = vm.backend |
595 |
network = Network.objects.select_for_update().get(id=network.id) |
596 |
bnet, created = BackendNetwork.objects.get_or_create(backend=backend, |
597 |
network=network) |
598 |
depend_jobs = [] |
599 |
if bnet.operstate != "ACTIVE": |
600 |
depend_jobs = create_network(network, backend, connect=True)
|
601 |
|
602 |
depends = [[job, ["success", "error", "canceled"]] for job in depend_jobs] |
603 |
|
604 |
nic = {'ip': address, 'network': network.backend_id} |
605 |
|
606 |
log.debug("Connecting vm %s to network %s(%s)", vm, network, address)
|
607 |
|
608 |
with pooled_rapi_client(vm) as client: |
609 |
return client.ModifyInstance(vm.backend_vm_id, nics=[('add', nic)], |
610 |
hotplug=vm.backend.use_hotplug(), |
611 |
depends=depends, |
612 |
dry_run=settings.TEST) |
613 |
|
614 |
|
615 |
def disconnect_from_network(vm, nic): |
616 |
op = [('remove', nic.index, {})]
|
617 |
|
618 |
log.debug("Removing nic of VM %s, with index %s", vm, str(nic.index)) |
619 |
|
620 |
with pooled_rapi_client(vm) as client: |
621 |
return client.ModifyInstance(vm.backend_vm_id, nics=op,
|
622 |
hotplug=vm.backend.use_hotplug(), |
623 |
dry_run=settings.TEST) |
624 |
|
625 |
|
626 |
def set_firewall_profile(vm, profile): |
627 |
try:
|
628 |
tag = _firewall_tags[profile] |
629 |
except KeyError: |
630 |
raise ValueError("Unsopported Firewall Profile: %s" % profile) |
631 |
|
632 |
log.debug("Setting tag of VM %s to %s", vm, profile)
|
633 |
|
634 |
with pooled_rapi_client(vm) as client: |
635 |
# Delete all firewall tags
|
636 |
for t in _firewall_tags.values(): |
637 |
client.DeleteInstanceTags(vm.backend_vm_id, [t], |
638 |
dry_run=settings.TEST) |
639 |
|
640 |
client.AddInstanceTags(vm.backend_vm_id, [tag], dry_run=settings.TEST) |
641 |
|
642 |
# XXX NOP ModifyInstance call to force process_net_status to run
|
643 |
# on the dispatcher
|
644 |
os_name = settings.GANETI_CREATEINSTANCE_KWARGS['os']
|
645 |
client.ModifyInstance(vm.backend_vm_id, |
646 |
os_name=os_name) |
647 |
|
648 |
|
649 |
def get_instances(backend, bulk=True): |
650 |
with pooled_rapi_client(backend) as c: |
651 |
return c.GetInstances(bulk=bulk)
|
652 |
|
653 |
|
654 |
def get_nodes(backend, bulk=True): |
655 |
with pooled_rapi_client(backend) as c: |
656 |
return c.GetNodes(bulk=bulk)
|
657 |
|
658 |
|
659 |
def get_jobs(backend): |
660 |
with pooled_rapi_client(backend) as c: |
661 |
return c.GetJobs()
|
662 |
|
663 |
|
664 |
def get_physical_resources(backend): |
665 |
""" Get the physical resources of a backend.
|
666 |
|
667 |
Get the resources of a backend as reported by the backend (not the db).
|
668 |
|
669 |
"""
|
670 |
nodes = get_nodes(backend, bulk=True)
|
671 |
attr = ['mfree', 'mtotal', 'dfree', 'dtotal', 'pinst_cnt', 'ctotal'] |
672 |
res = {} |
673 |
for a in attr: |
674 |
res[a] = 0
|
675 |
for n in nodes: |
676 |
# Filter out drained, offline and not vm_capable nodes since they will
|
677 |
# not take part in the vm allocation process
|
678 |
can_host_vms = n['vm_capable'] and not (n['drained'] or n['offline']) |
679 |
if can_host_vms and n['cnodes']: |
680 |
for a in attr: |
681 |
res[a] += int(n[a])
|
682 |
return res
|
683 |
|
684 |
|
685 |
def update_resources(backend, resources=None): |
686 |
""" Update the state of the backend resources in db.
|
687 |
|
688 |
"""
|
689 |
|
690 |
if not resources: |
691 |
resources = get_physical_resources(backend) |
692 |
|
693 |
backend.mfree = resources['mfree']
|
694 |
backend.mtotal = resources['mtotal']
|
695 |
backend.dfree = resources['dfree']
|
696 |
backend.dtotal = resources['dtotal']
|
697 |
backend.pinst_cnt = resources['pinst_cnt']
|
698 |
backend.ctotal = resources['ctotal']
|
699 |
backend.updated = datetime.now() |
700 |
backend.save() |
701 |
|
702 |
|
703 |
def get_memory_from_instances(backend): |
704 |
""" Get the memory that is used from instances.
|
705 |
|
706 |
Get the used memory of a backend. Note: This is different for
|
707 |
the real memory used, due to kvm's memory de-duplication.
|
708 |
|
709 |
"""
|
710 |
with pooled_rapi_client(backend) as client: |
711 |
instances = client.GetInstances(bulk=True)
|
712 |
mem = 0
|
713 |
for i in instances: |
714 |
mem += i['oper_ram']
|
715 |
return mem
|
716 |
|
717 |
##
|
718 |
## Synchronized operations for reconciliation
|
719 |
##
|
720 |
|
721 |
|
722 |
def create_network_synced(network, backend): |
723 |
result = _create_network_synced(network, backend) |
724 |
if result[0] != 'success': |
725 |
return result
|
726 |
result = connect_network_synced(network, backend) |
727 |
return result
|
728 |
|
729 |
|
730 |
def _create_network_synced(network, backend): |
731 |
with pooled_rapi_client(backend) as client: |
732 |
job = _create_network(network, backend) |
733 |
result = wait_for_job(client, job) |
734 |
return result
|
735 |
|
736 |
|
737 |
def connect_network_synced(network, backend): |
738 |
with pooled_rapi_client(backend) as client: |
739 |
for group in client.GetGroups(): |
740 |
job = client.ConnectNetwork(network.backend_id, group, |
741 |
network.mode, network.link) |
742 |
result = wait_for_job(client, job) |
743 |
if result[0] != 'success': |
744 |
return result
|
745 |
|
746 |
return result
|
747 |
|
748 |
|
749 |
def wait_for_job(client, jobid): |
750 |
result = client.WaitForJobChange(jobid, ['status', 'opresult'], None, None) |
751 |
status = result['job_info'][0] |
752 |
while status not in ['success', 'error', 'cancel']: |
753 |
result = client.WaitForJobChange(jobid, ['status', 'opresult'], |
754 |
[result], None)
|
755 |
status = result['job_info'][0] |
756 |
|
757 |
if status == 'success': |
758 |
return (status, None) |
759 |
else:
|
760 |
error = result['job_info'][1] |
761 |
return (status, error)
|