Revision ad0efdb3 snf-pithos-app/pithos/api/util.py
b/snf-pithos-app/pithos/api/util.py | ||
---|---|---|
32 | 32 |
# or implied, of GRNET S.A. |
33 | 33 |
|
34 | 34 |
from functools import wraps |
35 |
from traceback import format_exc |
|
36 | 35 |
from datetime import datetime |
37 | 36 |
from urllib import quote, unquote |
38 | 37 |
|
39 |
from django.conf import settings |
|
40 | 38 |
from django.http import HttpResponse |
41 | 39 |
from django.template.loader import render_to_string |
42 | 40 |
from django.utils import simplejson as json |
... | ... | |
61 | 59 |
BACKEND_ACCOUNT_QUOTA, BACKEND_CONTAINER_QUOTA, |
62 | 60 |
BACKEND_VERSIONING, |
63 | 61 |
BACKEND_FREE_VERSIONING, BACKEND_POOL_SIZE, |
64 |
COOKIE_NAME, USER_CATALOG_URL,
|
|
62 |
USER_CATALOG_URL, |
|
65 | 63 |
RADOS_STORAGE, RADOS_POOL_BLOCKS, |
66 | 64 |
RADOS_POOL_MAPS, TRANSLATE_UUIDS, |
67 | 65 |
PUBLIC_URL_SECURITY, |
... | ... | |
69 | 67 |
from pithos.backends.base import (NotAllowedError, QuotaError, ItemNotExists, |
70 | 68 |
VersionNotExists) |
71 | 69 |
from snf_django.lib.astakos import (get_user_uuid, get_displayname, |
72 |
get_uuids, get_displaynames) |
|
70 |
get_uuids, get_displaynames)
|
|
73 | 71 |
|
74 | 72 |
import logging |
75 | 73 |
import re |
... | ... | |
112 | 110 |
|
113 | 111 |
|
114 | 112 |
def get_header_prefix(request, prefix): |
115 |
"""Get all prefix-* request headers in a dict. Reformat keys with format_header_key().""" |
|
113 |
"""Get all prefix-* request headers in a dict. |
|
114 |
Reformat keys with format_header_key().""" |
|
116 | 115 |
|
117 | 116 |
prefix = 'HTTP_' + prefix.upper().replace('-', '_') |
118 | 117 |
# TODO: Document or remove '~' replacing. |
119 |
return dict([(format_header_key(k[5:]), v.replace('~', '')) for k, v in request.META.iteritems() if k.startswith(prefix) and len(k) > len(prefix)]) |
|
118 |
return dict([(format_header_key(k[5:]), v.replace('~', '')) |
|
119 |
for k, v in request.META.iteritems() |
|
120 |
if k.startswith(prefix) and len(k) > len(prefix)]) |
|
120 | 121 |
|
121 | 122 |
|
122 | 123 |
def check_meta_headers(meta): |
... | ... | |
161 | 162 |
v = smart_str(','.join(v), strings_only=True) |
162 | 163 |
response[k] = v |
163 | 164 |
for k, v in policy.iteritems(): |
164 |
response[smart_str(format_header_key('X-Account-Policy-' + k), strings_only=True)] = smart_str(v, strings_only=True) |
|
165 |
response[smart_str(format_header_key('X-Account-Policy-' + k), |
|
166 |
strings_only=True)] = smart_str(v, strings_only=True) |
|
165 | 167 |
|
166 | 168 |
|
167 | 169 |
def get_container_headers(request): |
168 | 170 |
meta = get_header_prefix(request, 'X-Container-Meta-') |
169 | 171 |
check_meta_headers(meta) |
170 |
policy = dict([(k[19:].lower(), v.replace(' ', '')) for k, v in get_header_prefix(request, 'X-Container-Policy-').iteritems()]) |
|
172 |
policy = dict([(k[19:].lower(), v.replace(' ', '')) for k, v in |
|
173 |
get_header_prefix(request, |
|
174 |
'X-Container-Policy-').iteritems()]) |
|
171 | 175 |
return meta, policy |
172 | 176 |
|
173 | 177 |
|
... | ... | |
189 | 193 |
response['X-Container-Until-Timestamp'] = http_date( |
190 | 194 |
int(meta['until_timestamp'])) |
191 | 195 |
for k, v in policy.iteritems(): |
192 |
response[smart_str(format_header_key('X-Container-Policy-' + k), strings_only=True)] = smart_str(v, strings_only=True) |
|
196 |
response[smart_str(format_header_key('X-Container-Policy-' + k), |
|
197 |
strings_only=True)] = smart_str(v, strings_only=True) |
|
193 | 198 |
|
194 | 199 |
|
195 | 200 |
def get_object_headers(request): |
... | ... | |
214 | 219 |
response['X-Object-Hash'] = meta['hash'] |
215 | 220 |
response['X-Object-UUID'] = meta['uuid'] |
216 | 221 |
if TRANSLATE_UUIDS: |
217 |
meta['modified_by'] = retrieve_displayname(token, meta['modified_by']) |
|
222 |
meta['modified_by'] = \ |
|
223 |
retrieve_displayname(token, meta['modified_by']) |
|
218 | 224 |
response['X-Object-Modified-By'] = smart_str( |
219 | 225 |
meta['modified_by'], strings_only=True) |
220 | 226 |
response['X-Object-Version'] = meta['version'] |
... | ... | |
249 | 255 |
src_container, prefix=src_name, virtual=False) |
250 | 256 |
for x in objects: |
251 | 257 |
src_meta = request.backend.get_object_meta(request.user_uniq, |
252 |
v_account, src_container, x[0], 'pithos', x[1]) |
|
258 |
v_account, |
|
259 |
src_container, |
|
260 |
x[0], 'pithos', x[1]) |
|
253 | 261 |
etag += src_meta['checksum'] |
254 | 262 |
bytes += src_meta['bytes'] |
255 | 263 |
except: |
... | ... | |
260 | 268 |
md5.update(etag) |
261 | 269 |
meta['checksum'] = md5.hexdigest().lower() |
262 | 270 |
|
271 |
|
|
263 | 272 |
def is_uuid(str): |
264 | 273 |
if str is None: |
265 | 274 |
return False |
... | ... | |
268 | 277 |
except ValueError: |
269 | 278 |
return False |
270 | 279 |
else: |
271 |
return True |
|
280 |
return True |
|
281 |
|
|
272 | 282 |
|
273 | 283 |
########################## |
274 | 284 |
# USER CATALOG utilities # |
... | ... | |
283 | 293 |
return uuid |
284 | 294 |
return displayname |
285 | 295 |
|
296 |
|
|
286 | 297 |
def retrieve_displaynames(token, uuids, return_dict=False, fail_silently=True): |
287 |
catalog = get_displaynames(token, uuids, USER_CATALOG_URL) or {}
|
|
298 |
catalog = get_displaynames(token, uuids, USER_CATALOG_URL) or {} |
|
288 | 299 |
missing = list(set(uuids) - set(catalog)) |
289 | 300 |
if missing and not fail_silently: |
290 | 301 |
raise ItemNotExists('Unknown displaynames: %s' % ', '.join(missing)) |
291 | 302 |
return catalog if return_dict else [catalog.get(i) for i in uuids] |
292 | 303 |
|
304 |
|
|
293 | 305 |
def retrieve_uuid(token, displayname): |
294 | 306 |
if is_uuid(displayname): |
295 | 307 |
return displayname |
... | ... | |
299 | 311 |
raise ItemNotExists(displayname) |
300 | 312 |
return uuid |
301 | 313 |
|
314 |
|
|
302 | 315 |
def retrieve_uuids(token, displaynames, return_dict=False, fail_silently=True): |
303 | 316 |
catalog = get_uuids(token, displaynames, USER_CATALOG_URL) or {} |
304 | 317 |
missing = list(set(displaynames) - set(catalog)) |
... | ... | |
306 | 319 |
raise ItemNotExists('Unknown uuids: %s' % ', '.join(missing)) |
307 | 320 |
return catalog if return_dict else [catalog.get(i) for i in displaynames] |
308 | 321 |
|
322 |
|
|
309 | 323 |
def replace_permissions_displayname(token, holder): |
310 | 324 |
if holder == '*': |
311 | 325 |
return holder |
... | ... | |
317 | 331 |
else: |
318 | 332 |
return ':'.join([retrieve_uuid(token, account), group]) |
319 | 333 |
|
334 |
|
|
320 | 335 |
def replace_permissions_uuid(token, holder): |
321 | 336 |
if holder == '*': |
322 | 337 |
return holder |
... | ... | |
328 | 343 |
else: |
329 | 344 |
return ':'.join([retrieve_displayname(token, account), group]) |
330 | 345 |
|
331 |
def update_sharing_meta(request, permissions, v_account, v_container, v_object, meta): |
|
346 |
|
|
347 |
def update_sharing_meta(request, permissions, v_account, |
|
348 |
v_container, v_object, meta): |
|
332 | 349 |
if permissions is None: |
333 | 350 |
return |
334 | 351 |
allowed, perm_path, perms = permissions |
... | ... | |
338 | 355 |
# replace uuid with displayname |
339 | 356 |
if TRANSLATE_UUIDS: |
340 | 357 |
perms['read'] = [replace_permissions_uuid( |
341 |
getattr(request, 'token', None), x) \
|
|
342 |
for x in perms.get('read', [])]
|
|
358 |
getattr(request, 'token', None), x)
|
|
359 |
for x in perms.get('read', [])] |
|
343 | 360 |
perms['write'] = [replace_permissions_uuid( |
344 |
getattr(request, 'token', None), x) \
|
|
345 |
for x in perms.get('write', [])]
|
|
361 |
getattr(request, 'token', None), x)
|
|
362 |
for x in perms.get('write', [])] |
|
346 | 363 |
|
347 | 364 |
ret = [] |
348 | 365 |
|
... | ... | |
374 | 391 |
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE') |
375 | 392 |
if if_modified_since is not None: |
376 | 393 |
if_modified_since = parse_http_date_safe(if_modified_since) |
377 |
if if_modified_since is not None and int(meta['modified']) <= if_modified_since: |
|
394 |
if (if_modified_since is not None |
|
395 |
and int(meta['modified']) <= if_modified_since): |
|
378 | 396 |
raise faults.NotModified('Resource has not been modified') |
379 | 397 |
|
380 | 398 |
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE') |
381 | 399 |
if if_unmodified_since is not None: |
382 | 400 |
if_unmodified_since = parse_http_date_safe(if_unmodified_since) |
383 |
if if_unmodified_since is not None and int(meta['modified']) > if_unmodified_since: |
|
401 |
if (if_unmodified_since is not None |
|
402 |
and int(meta['modified']) > if_unmodified_since): |
|
384 | 403 |
raise faults.PreconditionFailed('Resource has been modified') |
385 | 404 |
|
386 | 405 |
|
... | ... | |
395 | 414 |
if if_match is not None: |
396 | 415 |
if etag is None: |
397 | 416 |
raise faults.PreconditionFailed('Resource does not exist') |
398 |
if if_match != '*' and etag not in [x.lower() for x in parse_etags(if_match)]: |
|
417 |
if (if_match != '*' |
|
418 |
and etag not in [x.lower() for x in parse_etags(if_match)]): |
|
399 | 419 |
raise faults.PreconditionFailed('Resource ETag does not match') |
400 | 420 |
|
401 | 421 |
if_none_match = request.META.get('HTTP_IF_NONE_MATCH') |
402 | 422 |
if if_none_match is not None: |
403 | 423 |
# TODO: If this passes, must ignore If-Modified-Since header. |
404 | 424 |
if etag is not None: |
405 |
if if_none_match == '*' or etag in [x.lower() for x in parse_etags(if_none_match)]: |
|
425 |
if (if_none_match == '*' |
|
426 |
or etag in [x.lower() for x in parse_etags(if_none_match)]): |
|
406 | 427 |
# TODO: Continue if an If-Modified-Since header is present. |
407 | 428 |
if request.method in ('HEAD', 'GET'): |
408 | 429 |
raise faults.NotModified('Resource ETag matches') |
409 |
raise faults.PreconditionFailed('Resource exists or ETag matches') |
|
430 |
raise faults.PreconditionFailed( |
|
431 |
'Resource exists or ETag matches') |
|
410 | 432 |
|
411 | 433 |
|
412 | 434 |
def split_container_object_string(s): |
... | ... | |
419 | 441 |
return s[:pos], s[(pos + 1):] |
420 | 442 |
|
421 | 443 |
|
422 |
def copy_or_move_object(request, src_account, src_container, src_name, dest_account, dest_container, dest_name, move=False, delimiter=None): |
|
444 |
def copy_or_move_object(request, src_account, src_container, src_name, |
|
445 |
dest_account, dest_container, dest_name, |
|
446 |
move=False, delimiter=None): |
|
423 | 447 |
"""Copy or move an object.""" |
424 | 448 |
|
425 | 449 |
if 'ignore_content_type' in request.GET and 'CONTENT_TYPE' in request.META: |
... | ... | |
436 | 460 |
version_id = request.backend.copy_object( |
437 | 461 |
request.user_uniq, src_account, src_container, src_name, |
438 | 462 |
dest_account, dest_container, dest_name, |
439 |
content_type, 'pithos', meta, False, permissions, src_version, delimiter) |
|
463 |
content_type, 'pithos', meta, False, permissions, |
|
464 |
src_version, delimiter) |
|
440 | 465 |
except NotAllowedError: |
441 | 466 |
raise faults.Forbidden('Not allowed') |
442 | 467 |
except (ItemNotExists, VersionNotExists): |
... | ... | |
447 | 472 |
raise faults.RequestEntityTooLarge('Quota error: %s' % e) |
448 | 473 |
if public is not None: |
449 | 474 |
try: |
450 |
request.backend.update_object_public(request.user_uniq, dest_account, dest_container, dest_name, public) |
|
475 |
request.backend.update_object_public( |
|
476 |
request.user_uniq, dest_account, |
|
477 |
dest_container, dest_name, public) |
|
451 | 478 |
except NotAllowedError: |
452 | 479 |
raise faults.Forbidden('Not allowed') |
453 | 480 |
except ItemNotExists: |
... | ... | |
598 | 625 |
if TRANSLATE_UUIDS: |
599 | 626 |
try: |
600 | 627 |
ret['read'] = [replace_permissions_displayname( |
601 |
getattr(request, 'token', None), x) \
|
|
602 |
for x in ret.get('read', [])]
|
|
628 |
getattr(request, 'token', None), x)
|
|
629 |
for x in ret.get('read', [])] |
|
603 | 630 |
ret['write'] = [replace_permissions_displayname( |
604 |
getattr(request, 'token', None), x) \
|
|
605 |
for x in ret.get('write', [])]
|
|
631 |
getattr(request, 'token', None), x)
|
|
632 |
for x in ret.get('write', [])] |
|
606 | 633 |
except ItemNotExists, e: |
607 | 634 |
raise faults.BadRequest( |
608 | 635 |
'Bad X-Object-Sharing header value: unknown account: %s' % e) |
... | ... | |
651 | 678 |
|
652 | 679 |
|
653 | 680 |
def socket_read_iterator(request, length=0, blocksize=4096): |
654 |
"""Return a maximum of blocksize data read from the socket in each iteration.
|
|
681 |
"""Return a maximum of blocksize data read from the socket in each iteration |
|
655 | 682 |
|
656 | 683 |
Read up to 'length'. If 'length' is negative, will attempt a chunked read. |
657 | 684 |
The maximum ammount of data read is controlled by MAX_UPLOAD_SIZE. |
... | ... | |
660 | 687 |
sock = raw_input_socket(request) |
661 | 688 |
if length < 0: # Chunked transfers |
662 | 689 |
# Small version (server does the dechunking). |
663 |
if request.environ.get('mod_wsgi.input_chunked', None) or request.META['SERVER_SOFTWARE'].startswith('gunicorn'): |
|
690 |
if (request.environ.get('mod_wsgi.input_chunked', None) |
|
691 |
or request.META['SERVER_SOFTWARE'].startswith('gunicorn')): |
|
664 | 692 |
while length < MAX_UPLOAD_SIZE: |
665 | 693 |
data = sock.read(blocksize) |
666 | 694 |
if data == '': |
... | ... | |
730 | 758 |
self.md5.update(block) |
731 | 759 |
self.data = self.data[length:] |
732 | 760 |
|
733 |
def new_file(self, field_name, file_name, content_type, content_length, charset=None): |
|
761 |
def new_file(self, field_name, file_name, content_type, |
|
762 |
content_length, charset=None): |
|
734 | 763 |
self.md5 = hashlib.md5() |
735 | 764 |
self.data = '' |
736 | 765 |
self.file = UploadedFile( |
... | ... | |
755 | 784 |
class ObjectWrapper(object): |
756 | 785 |
"""Return the object's data block-per-block in each iteration. |
757 | 786 |
|
758 |
Read from the object using the offset and length provided in each entry of the range list. |
|
787 |
Read from the object using the offset and length provided |
|
788 |
in each entry of the range list. |
|
759 | 789 |
""" |
760 | 790 |
|
761 | 791 |
def __init__(self, backend, ranges, sizes, hashmaps, boundary): |
... | ... | |
788 | 818 |
|
789 | 819 |
# Get the block for the current position. |
790 | 820 |
self.block_index = int(self.offset / self.backend.block_size) |
791 |
if self.block_hash != self.hashmaps[self.file_index][self.block_index]: |
|
821 |
if self.block_hash != \ |
|
822 |
self.hashmaps[self.file_index][self.block_index]: |
|
792 | 823 |
self.block_hash = self.hashmaps[ |
793 | 824 |
self.file_index][self.block_index] |
794 | 825 |
try: |
... | ... | |
858 | 889 |
offset < 0 or offset >= size or |
859 | 890 |
offset + length > size] |
860 | 891 |
if len(check) > 0: |
861 |
raise faults.RangeNotSatisfiable('Requested range exceeds object limits') |
|
892 |
raise faults.RangeNotSatisfiable( |
|
893 |
'Requested range exceeds object limits') |
|
862 | 894 |
ret = 206 |
863 | 895 |
if_range = request.META.get('HTTP_IF_RANGE') |
864 | 896 |
if if_range: |
... | ... | |
880 | 912 |
wrapper = ObjectWrapper(request.backend, ranges, sizes, hashmaps, boundary) |
881 | 913 |
response = HttpResponse(wrapper, status=ret) |
882 | 914 |
put_object_headers( |
883 |
response, meta, restricted=public, token=getattr(request, 'token', None)) |
|
915 |
response, meta, restricted=public, |
|
916 |
token=getattr(request, 'token', None)) |
|
884 | 917 |
if ret == 206: |
885 | 918 |
if len(ranges) == 1: |
886 | 919 |
offset, length = ranges[0] |
... | ... | |
911 | 944 |
def hashmap_md5(backend, hashmap, size): |
912 | 945 |
"""Produce the MD5 sum from the data in the hashmap.""" |
913 | 946 |
|
914 |
# TODO: Search backend for the MD5 of another object with the same hashmap and size... |
|
947 |
# TODO: Search backend for the MD5 of another object |
|
948 |
# with the same hashmap and size... |
|
915 | 949 |
md5 = hashlib.md5() |
916 | 950 |
bs = backend.block_size |
917 | 951 |
for bi, hash in enumerate(hashmap): |
... | ... | |
934 | 968 |
from pithos.backends.util import PithosBackendPool |
935 | 969 |
|
936 | 970 |
if RADOS_STORAGE: |
937 |
BLOCK_PARAMS = { 'mappool': RADOS_POOL_MAPS, |
|
938 |
'blockpool': RADOS_POOL_BLOCKS, |
|
939 |
} |
|
971 |
BLOCK_PARAMS = {'mappool': RADOS_POOL_MAPS, |
|
972 |
'blockpool': RADOS_POOL_BLOCKS, } |
|
940 | 973 |
else: |
941 |
BLOCK_PARAMS = { 'mappool': None, |
|
942 |
'blockpool': None, |
|
943 |
} |
|
974 |
BLOCK_PARAMS = {'mappool': None, |
|
975 |
'blockpool': None, } |
|
944 | 976 |
|
945 | 977 |
|
946 | 978 |
_pithos_backend_pool = PithosBackendPool( |
947 |
size=BACKEND_POOL_SIZE,
|
|
948 |
db_module=BACKEND_DB_MODULE,
|
|
949 |
db_connection=BACKEND_DB_CONNECTION,
|
|
950 |
block_module=BACKEND_BLOCK_MODULE,
|
|
951 |
block_path=BACKEND_BLOCK_PATH,
|
|
952 |
block_umask=BACKEND_BLOCK_UMASK,
|
|
953 |
queue_module=BACKEND_QUEUE_MODULE,
|
|
954 |
queue_hosts=BACKEND_QUEUE_HOSTS,
|
|
955 |
queue_exchange=BACKEND_QUEUE_EXCHANGE,
|
|
956 |
quotaholder_enabled=USE_QUOTAHOLDER,
|
|
957 |
quotaholder_url=QUOTAHOLDER_URL,
|
|
958 |
quotaholder_token=QUOTAHOLDER_TOKEN,
|
|
959 |
quotaholder_client_poolsize=QUOTAHOLDER_POOLSIZE,
|
|
960 |
free_versioning=BACKEND_FREE_VERSIONING,
|
|
961 |
block_params=BLOCK_PARAMS,
|
|
962 |
public_url_security=PUBLIC_URL_SECURITY,
|
|
963 |
public_url_alphabet=PUBLIC_URL_ALPHABET,
|
|
964 |
account_quota_policy=BACKEND_ACCOUNT_QUOTA,
|
|
965 |
container_quota_policy=BACKEND_CONTAINER_QUOTA,
|
|
966 |
container_versioning_policy=BACKEND_VERSIONING)
|
|
979 |
size=BACKEND_POOL_SIZE, |
|
980 |
db_module=BACKEND_DB_MODULE, |
|
981 |
db_connection=BACKEND_DB_CONNECTION, |
|
982 |
block_module=BACKEND_BLOCK_MODULE, |
|
983 |
block_path=BACKEND_BLOCK_PATH, |
|
984 |
block_umask=BACKEND_BLOCK_UMASK, |
|
985 |
queue_module=BACKEND_QUEUE_MODULE, |
|
986 |
queue_hosts=BACKEND_QUEUE_HOSTS, |
|
987 |
queue_exchange=BACKEND_QUEUE_EXCHANGE, |
|
988 |
quotaholder_enabled=USE_QUOTAHOLDER, |
|
989 |
quotaholder_url=QUOTAHOLDER_URL, |
|
990 |
quotaholder_token=QUOTAHOLDER_TOKEN, |
|
991 |
quotaholder_client_poolsize=QUOTAHOLDER_POOLSIZE, |
|
992 |
free_versioning=BACKEND_FREE_VERSIONING, |
|
993 |
block_params=BLOCK_PARAMS, |
|
994 |
public_url_security=PUBLIC_URL_SECURITY, |
|
995 |
public_url_alphabet=PUBLIC_URL_ALPHABET, |
|
996 |
account_quota_policy=BACKEND_ACCOUNT_QUOTA, |
|
997 |
container_quota_policy=BACKEND_CONTAINER_QUOTA, |
|
998 |
container_versioning_policy=BACKEND_VERSIONING) |
|
967 | 999 |
|
968 | 1000 |
|
969 | 1001 |
def get_backend(): |
Also available in: Unified diff