root / snf-cyclades-app / synnefo / api / util.py @ 34c03a51
History | View | Annotate | Download (16.4 kB)
1 |
# Copyright 2011-2012 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
|
34 |
import datetime |
35 |
import ipaddr |
36 |
|
37 |
from base64 import b64encode, b64decode |
38 |
from datetime import timedelta, tzinfo |
39 |
from functools import wraps |
40 |
from hashlib import sha256 |
41 |
from logging import getLogger |
42 |
from random import choice |
43 |
from string import digits, lowercase, uppercase |
44 |
from time import time |
45 |
from traceback import format_exc |
46 |
from wsgiref.handlers import format_date_time |
47 |
|
48 |
import dateutil.parser |
49 |
|
50 |
from Crypto.Cipher import AES |
51 |
|
52 |
from django.conf import settings |
53 |
from django.http import HttpResponse |
54 |
from django.template.loader import render_to_string |
55 |
from django.utils import simplejson as json |
56 |
from django.utils.cache import add_never_cache_headers |
57 |
from django.db.models import Q |
58 |
|
59 |
from snf_django.lib.api import faults |
60 |
from synnefo.db.models import (Flavor, VirtualMachine, VirtualMachineMetadata, |
61 |
Network, BackendNetwork, NetworkInterface, |
62 |
BridgePoolTable, MacPrefixPoolTable, Backend) |
63 |
from synnefo.db.pools import EmptyPool |
64 |
|
65 |
from snf_django.lib.astakos import get_user |
66 |
from synnefo.plankton.utils import image_backend |
67 |
from synnefo.settings import MAX_CIDR_BLOCK |
68 |
|
69 |
from synnefo.cyclades_settings import cyclades_services, BASE_HOST |
70 |
from synnefo.lib.services import get_service_path |
71 |
from synnefo.lib import join_urls |
72 |
|
73 |
COMPUTE_URL = \ |
74 |
join_urls(BASE_HOST, |
75 |
get_service_path(cyclades_services, "compute", version="v2.0")) |
76 |
SERVERS_URL = join_urls(COMPUTE_URL, "servers/")
|
77 |
NETWORKS_URL = join_urls(COMPUTE_URL, "networks/")
|
78 |
FLAVORS_URL = join_urls(COMPUTE_URL, "flavors/")
|
79 |
IMAGES_URL = join_urls(COMPUTE_URL, "images/")
|
80 |
PLANKTON_URL = \ |
81 |
join_urls(BASE_HOST, |
82 |
get_service_path(cyclades_services, "image", version="v1.0")) |
83 |
IMAGES_PLANKTON_URL = join_urls(PLANKTON_URL, "images/")
|
84 |
|
85 |
|
86 |
log = getLogger('synnefo.api')
|
87 |
|
88 |
|
89 |
def random_password(): |
90 |
"""Generates a random password
|
91 |
|
92 |
We generate a windows compliant password: it must contain at least
|
93 |
one charachter from each of the groups: upper case, lower case, digits.
|
94 |
"""
|
95 |
|
96 |
pool = lowercase + uppercase + digits |
97 |
lowerset = set(lowercase)
|
98 |
upperset = set(uppercase)
|
99 |
digitset = set(digits)
|
100 |
length = 10
|
101 |
|
102 |
password = ''.join(choice(pool) for i in range(length - 2)) |
103 |
|
104 |
# Make sure the password is compliant
|
105 |
chars = set(password)
|
106 |
if not chars & lowerset: |
107 |
password += choice(lowercase) |
108 |
if not chars & upperset: |
109 |
password += choice(uppercase) |
110 |
if not chars & digitset: |
111 |
password += choice(digits) |
112 |
|
113 |
# Pad if necessary to reach required length
|
114 |
password += ''.join(choice(pool) for i in range(length - len(password))) |
115 |
|
116 |
return password
|
117 |
|
118 |
|
119 |
def zeropad(s): |
120 |
"""Add zeros at the end of a string in order to make its length
|
121 |
a multiple of 16."""
|
122 |
|
123 |
npad = 16 - len(s) % 16 |
124 |
return s + '\x00' * npad |
125 |
|
126 |
|
127 |
def encrypt(plaintext): |
128 |
# Make sure key is 32 bytes long
|
129 |
key = sha256(settings.SECRET_KEY).digest() |
130 |
|
131 |
aes = AES.new(key) |
132 |
enc = aes.encrypt(zeropad(plaintext)) |
133 |
return b64encode(enc)
|
134 |
|
135 |
|
136 |
def get_vm(server_id, user_id, for_update=False, non_deleted=False, |
137 |
non_suspended=False):
|
138 |
"""Find a VirtualMachine instance based on ID and owner."""
|
139 |
|
140 |
try:
|
141 |
server_id = int(server_id)
|
142 |
servers = VirtualMachine.objects |
143 |
if for_update:
|
144 |
servers = servers.select_for_update() |
145 |
vm = servers.get(id=server_id, userid=user_id) |
146 |
if non_deleted and vm.deleted: |
147 |
raise faults.BadRequest("Server has been deleted.") |
148 |
if non_suspended and vm.suspended: |
149 |
raise faults.Forbidden("Administratively Suspended VM") |
150 |
return vm
|
151 |
except ValueError: |
152 |
raise faults.BadRequest('Invalid server ID.') |
153 |
except VirtualMachine.DoesNotExist:
|
154 |
raise faults.ItemNotFound('Server not found.') |
155 |
|
156 |
|
157 |
def get_vm_meta(vm, key): |
158 |
"""Return a VirtualMachineMetadata instance or raise ItemNotFound."""
|
159 |
|
160 |
try:
|
161 |
return VirtualMachineMetadata.objects.get(meta_key=key, vm=vm)
|
162 |
except VirtualMachineMetadata.DoesNotExist:
|
163 |
raise faults.ItemNotFound('Metadata key not found.') |
164 |
|
165 |
|
166 |
def get_image(image_id, user_id): |
167 |
"""Return an Image instance or raise ItemNotFound."""
|
168 |
|
169 |
with image_backend(user_id) as backend: |
170 |
return backend.get_image(image_id)
|
171 |
|
172 |
|
173 |
def get_image_dict(image_id, user_id): |
174 |
image = {} |
175 |
img = get_image(image_id, user_id) |
176 |
properties = img.get('properties', {})
|
177 |
image["id"] = img["id"] |
178 |
image["name"] = img["name"] |
179 |
image['backend_id'] = img['location'] |
180 |
image['format'] = img['disk_format'] |
181 |
image['metadata'] = dict((key.upper(), val) |
182 |
for key, val in properties.items()) |
183 |
image['checksum'] = img['checksum'] |
184 |
|
185 |
return image
|
186 |
|
187 |
|
188 |
def get_flavor(flavor_id, include_deleted=False): |
189 |
"""Return a Flavor instance or raise ItemNotFound."""
|
190 |
|
191 |
try:
|
192 |
flavor_id = int(flavor_id)
|
193 |
if include_deleted:
|
194 |
return Flavor.objects.get(id=flavor_id)
|
195 |
else:
|
196 |
return Flavor.objects.get(id=flavor_id, deleted=include_deleted)
|
197 |
except (ValueError, Flavor.DoesNotExist): |
198 |
raise faults.ItemNotFound('Flavor not found.') |
199 |
|
200 |
|
201 |
def get_flavor_provider(flavor): |
202 |
"""Extract provider from disk template.
|
203 |
|
204 |
Provider for `ext` disk_template is encoded in the disk template
|
205 |
name, which is formed `ext_<provider_name>`. Provider is None
|
206 |
for all other disk templates.
|
207 |
|
208 |
"""
|
209 |
disk_template = flavor.disk_template |
210 |
provider = None
|
211 |
if disk_template.startswith("ext"): |
212 |
disk_template, provider = disk_template.split("_", 1) |
213 |
return disk_template, provider
|
214 |
|
215 |
|
216 |
def get_network(network_id, user_id, for_update=False): |
217 |
"""Return a Network instance or raise ItemNotFound."""
|
218 |
|
219 |
try:
|
220 |
network_id = int(network_id)
|
221 |
objects = Network.objects |
222 |
if for_update:
|
223 |
objects = objects.select_for_update() |
224 |
return objects.get(Q(userid=user_id) | Q(public=True), id=network_id) |
225 |
except (ValueError, Network.DoesNotExist): |
226 |
raise faults.ItemNotFound('Network not found.') |
227 |
|
228 |
|
229 |
def validate_network_params(subnet, gateway=None, subnet6=None, gateway6=None): |
230 |
try:
|
231 |
# Use strict option to not all subnets with host bits set
|
232 |
network = ipaddr.IPv4Network(subnet, strict=True)
|
233 |
except ValueError: |
234 |
raise faults.BadRequest("Invalid network IPv4 subnet") |
235 |
|
236 |
# Check that network size is allowed!
|
237 |
if not validate_network_size(network.prefixlen): |
238 |
raise faults.OverLimit(message="Unsupported network size", |
239 |
details="Network mask must be in range (%s, 29]" %
|
240 |
MAX_CIDR_BLOCK) |
241 |
|
242 |
# Check that gateway belongs to network
|
243 |
if gateway:
|
244 |
try:
|
245 |
gateway = ipaddr.IPv4Address(gateway) |
246 |
except ValueError: |
247 |
raise faults.BadRequest("Invalid network IPv4 gateway") |
248 |
if not gateway in network: |
249 |
raise faults.BadRequest("Invalid network IPv4 gateway") |
250 |
|
251 |
if subnet6:
|
252 |
try:
|
253 |
# Use strict option to not all subnets with host bits set
|
254 |
network6 = ipaddr.IPv6Network(subnet6, strict=True)
|
255 |
except ValueError: |
256 |
raise faults.BadRequest("Invalid network IPv6 subnet") |
257 |
if gateway6:
|
258 |
try:
|
259 |
gateway6 = ipaddr.IPv6Address(gateway6) |
260 |
except ValueError: |
261 |
raise faults.BadRequest("Invalid network IPv6 gateway") |
262 |
if not gateway6 in network6: |
263 |
raise faults.BadRequest("Invalid network IPv6 gateway") |
264 |
|
265 |
|
266 |
def validate_network_size(cidr_block): |
267 |
"""Return True if network size is allowed."""
|
268 |
return cidr_block <= 29 and cidr_block > MAX_CIDR_BLOCK |
269 |
|
270 |
|
271 |
def allocate_public_address(backend): |
272 |
"""Allocate a public IP for a vm."""
|
273 |
for network in backend_public_networks(backend): |
274 |
try:
|
275 |
address = get_network_free_address(network) |
276 |
except faults.OverLimit:
|
277 |
pass
|
278 |
else:
|
279 |
return (network, address)
|
280 |
return (None, None) |
281 |
|
282 |
|
283 |
def get_public_ip(backend): |
284 |
"""Reserve an IP from a public network.
|
285 |
|
286 |
This method should run inside a transaction.
|
287 |
|
288 |
"""
|
289 |
|
290 |
# Guarantee exclusive access to backend, because accessing the IP pools of
|
291 |
# the backend networks may result in a deadlock with backend allocator
|
292 |
# which also checks that backend networks have a free IP.
|
293 |
backend = Backend.objects.select_for_update().get(id=backend.id) |
294 |
|
295 |
address = None
|
296 |
if settings.PUBLIC_USE_POOL:
|
297 |
(network, address) = allocate_public_address(backend) |
298 |
else:
|
299 |
for net in list(backend_public_networks(backend)): |
300 |
pool = net.get_pool() |
301 |
if not pool.empty(): |
302 |
address = 'pool'
|
303 |
network = net |
304 |
break
|
305 |
if address is None: |
306 |
log.error("Public networks of backend %s are full", backend)
|
307 |
raise faults.OverLimit("Can not allocate IP for new machine." |
308 |
" Public networks are full.")
|
309 |
return (network, address)
|
310 |
|
311 |
|
312 |
def backend_public_networks(backend): |
313 |
"""Return available public networks of the backend.
|
314 |
|
315 |
Iterator for non-deleted public networks that are available
|
316 |
to the specified backend.
|
317 |
|
318 |
"""
|
319 |
for network in Network.objects.filter(public=True, deleted=False, |
320 |
drained=False):
|
321 |
if BackendNetwork.objects.filter(network=network,
|
322 |
backend=backend).exists(): |
323 |
yield network
|
324 |
|
325 |
|
326 |
def get_network_free_address(network): |
327 |
"""Reserve an IP address from the IP Pool of the network."""
|
328 |
|
329 |
pool = network.get_pool() |
330 |
try:
|
331 |
address = pool.get() |
332 |
except EmptyPool:
|
333 |
raise faults.OverLimit("Network %s is full." % network.backend_id) |
334 |
address = None
|
335 |
pool.save() |
336 |
return address
|
337 |
|
338 |
|
339 |
def get_nic(machine, network): |
340 |
try:
|
341 |
return NetworkInterface.objects.get(machine=machine, network=network)
|
342 |
except NetworkInterface.DoesNotExist:
|
343 |
raise faults.ItemNotFound('Server not connected to this network.') |
344 |
|
345 |
|
346 |
def get_nic_from_index(vm, nic_index): |
347 |
"""Returns the nic_index-th nic of a vm
|
348 |
Error Response Codes: itemNotFound (404), badMediaType (415)
|
349 |
"""
|
350 |
matching_nics = vm.nics.filter(index=nic_index) |
351 |
matching_nics_len = len(matching_nics)
|
352 |
if matching_nics_len < 1: |
353 |
raise faults.ItemNotFound('NIC not found on VM') |
354 |
elif matching_nics_len > 1: |
355 |
raise faults.BadMediaType('NIC index conflict on VM') |
356 |
nic = matching_nics[0]
|
357 |
return nic
|
358 |
|
359 |
|
360 |
def render_metadata(request, metadata, use_values=False, status=200): |
361 |
if request.serialization == 'xml': |
362 |
data = render_to_string('metadata.xml', {'metadata': metadata}) |
363 |
else:
|
364 |
if use_values:
|
365 |
d = {'metadata': {'values': metadata}} |
366 |
else:
|
367 |
d = {'metadata': metadata}
|
368 |
data = json.dumps(d) |
369 |
return HttpResponse(data, status=status)
|
370 |
|
371 |
|
372 |
def render_meta(request, meta, status=200): |
373 |
if request.serialization == 'xml': |
374 |
data = render_to_string('meta.xml', dict(key=key, val=val)) |
375 |
else:
|
376 |
data = json.dumps(dict(meta=meta))
|
377 |
return HttpResponse(data, status=status)
|
378 |
|
379 |
|
380 |
def construct_nic_id(nic): |
381 |
return "-".join(["nic", unicode(nic.machine.id), unicode(nic.index)]) |
382 |
|
383 |
|
384 |
def verify_personality(personality): |
385 |
"""Verify that a a list of personalities is well formed"""
|
386 |
if len(personality) > settings.MAX_PERSONALITY: |
387 |
raise faults.OverLimit("Maximum number of personalities" |
388 |
" exceeded")
|
389 |
for p in personality: |
390 |
# Verify that personalities are well-formed
|
391 |
try:
|
392 |
assert isinstance(p, dict) |
393 |
keys = set(p.keys())
|
394 |
allowed = set(['contents', 'group', 'mode', 'owner', 'path']) |
395 |
assert keys.issubset(allowed)
|
396 |
contents = p['contents']
|
397 |
if len(contents) > settings.MAX_PERSONALITY_SIZE: |
398 |
# No need to decode if contents already exceed limit
|
399 |
raise faults.OverLimit("Maximum size of personality exceeded") |
400 |
if len(b64decode(contents)) > settings.MAX_PERSONALITY_SIZE: |
401 |
raise faults.OverLimit("Maximum size of personality exceeded") |
402 |
except AssertionError: |
403 |
raise faults.BadRequest("Malformed personality in request") |
404 |
|
405 |
|
406 |
def values_from_flavor(flavor): |
407 |
"""Get Ganeti connectivity info from flavor type.
|
408 |
|
409 |
If link or mac_prefix equals to "pool", then the resources
|
410 |
are allocated from the corresponding Pools.
|
411 |
|
412 |
"""
|
413 |
try:
|
414 |
flavor = Network.FLAVORS[flavor] |
415 |
except KeyError: |
416 |
raise faults.BadRequest("Unknown network flavor") |
417 |
|
418 |
mode = flavor.get("mode")
|
419 |
|
420 |
link = flavor.get("link")
|
421 |
if link == "pool": |
422 |
link = allocate_resource("bridge")
|
423 |
|
424 |
mac_prefix = flavor.get("mac_prefix")
|
425 |
if mac_prefix == "pool": |
426 |
mac_prefix = allocate_resource("mac_prefix")
|
427 |
|
428 |
tags = flavor.get("tags")
|
429 |
|
430 |
return mode, link, mac_prefix, tags
|
431 |
|
432 |
|
433 |
def allocate_resource(res_type): |
434 |
table = get_pool_table(res_type) |
435 |
pool = table.get_pool() |
436 |
value = pool.get() |
437 |
pool.save() |
438 |
return value
|
439 |
|
440 |
|
441 |
def release_resource(res_type, value): |
442 |
table = get_pool_table(res_type) |
443 |
pool = table.get_pool() |
444 |
pool.put(value) |
445 |
pool.save() |
446 |
|
447 |
|
448 |
def get_pool_table(res_type): |
449 |
if res_type == "bridge": |
450 |
return BridgePoolTable
|
451 |
elif res_type == "mac_prefix": |
452 |
return MacPrefixPoolTable
|
453 |
else:
|
454 |
raise Exception("Unknown resource type") |
455 |
|
456 |
|
457 |
def get_existing_users(): |
458 |
"""
|
459 |
Retrieve user ids stored in cyclades user agnostic models.
|
460 |
"""
|
461 |
# also check PublicKeys a user with no servers/networks exist
|
462 |
from synnefo.userdata.models import PublicKeyPair |
463 |
from synnefo.db.models import VirtualMachine, Network |
464 |
|
465 |
keypairusernames = PublicKeyPair.objects.filter().values_list('user',
|
466 |
flat=True)
|
467 |
serverusernames = VirtualMachine.objects.filter().values_list('userid',
|
468 |
flat=True)
|
469 |
networkusernames = Network.objects.filter().values_list('userid',
|
470 |
flat=True)
|
471 |
|
472 |
return set(list(keypairusernames) + list(serverusernames) + |
473 |
list(networkusernames))
|
474 |
|
475 |
|
476 |
def vm_to_links(vm_id): |
477 |
href = join_urls(SERVERS_URL, str(vm_id))
|
478 |
return [{"rel": rel, "href": href} for rel in ("self", "bookmark")] |
479 |
|
480 |
|
481 |
def network_to_links(network_id): |
482 |
href = join_urls(NETWORKS_URL, str(network_id))
|
483 |
return [{"rel": rel, "href": href} for rel in ("self", "bookmark")] |
484 |
|
485 |
|
486 |
def flavor_to_links(flavor_id): |
487 |
href = join_urls(FLAVORS_URL, str(flavor_id))
|
488 |
return [{"rel": rel, "href": href} for rel in ("self", "bookmark")] |
489 |
|
490 |
|
491 |
def image_to_links(image_id): |
492 |
href = join_urls(IMAGES_URL, str(image_id))
|
493 |
links = [{"rel": rel, "href": href} for rel in ("self", "bookmark")] |
494 |
links.append({"rel": "alternate", |
495 |
"href": join_urls(IMAGES_PLANKTON_URL, str(image_id))}) |
496 |
return links
|
497 |
|
498 |
def start_action(vm, action, jobId): |
499 |
vm.action = action |
500 |
vm.backendjobid = jobId |
501 |
vm.backendopcode = None
|
502 |
vm.backendjobstatus = None
|
503 |
vm.backendlogmsg = None
|
504 |
vm.save() |