root / snf-pithos-app / pithos / api / util.py @ 48c5b124
History | View | Annotate | Download (41.6 kB)
1 |
# Copyright 2011-2012 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
|
34 |
from functools import wraps |
35 |
from datetime import datetime |
36 |
from urllib import quote, unquote |
37 |
|
38 |
from django.http import (HttpResponse, HttpResponseRedirect, Http404, |
39 |
HttpResponseForbidden) |
40 |
from django.template.loader import render_to_string |
41 |
from django.utils import simplejson as json |
42 |
from django.utils.http import http_date, parse_etags |
43 |
from django.utils.encoding import smart_unicode, smart_str |
44 |
from django.core.files.uploadhandler import FileUploadHandler |
45 |
from django.core.files.uploadedfile import UploadedFile |
46 |
from django.core.urlresolvers import reverse |
47 |
|
48 |
from snf_django.lib.api.parsedate import parse_http_date_safe, parse_http_date |
49 |
from snf_django.lib import api |
50 |
from snf_django.lib.api import faults, utils |
51 |
|
52 |
from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION, |
53 |
BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH, |
54 |
BACKEND_BLOCK_UMASK, |
55 |
BACKEND_QUEUE_MODULE, BACKEND_QUEUE_HOSTS, |
56 |
BACKEND_QUEUE_EXCHANGE, |
57 |
ASTAKOSCLIENT_POOLSIZE, |
58 |
SERVICE_TOKEN, |
59 |
ASTAKOS_BASE_URL, |
60 |
BACKEND_ACCOUNT_QUOTA, |
61 |
BACKEND_CONTAINER_QUOTA, |
62 |
BACKEND_VERSIONING, BACKEND_FREE_VERSIONING, |
63 |
BACKEND_POOL_ENABLED, BACKEND_POOL_SIZE, |
64 |
BACKEND_BLOCK_SIZE, BACKEND_HASH_ALGORITHM, |
65 |
RADOS_STORAGE, RADOS_POOL_BLOCKS, |
66 |
RADOS_POOL_MAPS, TRANSLATE_UUIDS, |
67 |
PUBLIC_URL_SECURITY, PUBLIC_URL_ALPHABET, |
68 |
COOKIE_NAME, BASE_HOST, UPDATE_MD5, LOGIN_URL) |
69 |
from pithos.api.resources import resources |
70 |
from pithos.backends import connect_backend |
71 |
from pithos.backends.base import (NotAllowedError, QuotaError, ItemNotExists, |
72 |
VersionNotExists) |
73 |
|
74 |
from synnefo.lib import join_urls |
75 |
|
76 |
from astakosclient import AstakosClient |
77 |
from astakosclient.errors import NoUserName, NoUUID |
78 |
|
79 |
import logging |
80 |
import re |
81 |
import hashlib |
82 |
import uuid |
83 |
import decimal |
84 |
|
85 |
logger = logging.getLogger(__name__) |
86 |
|
87 |
|
88 |
def json_encode_decimal(obj): |
89 |
if isinstance(obj, decimal.Decimal): |
90 |
return str(obj) |
91 |
raise TypeError(repr(obj) + " is not JSON serializable") |
92 |
|
93 |
|
94 |
def rename_meta_key(d, old, new): |
95 |
if old not in d: |
96 |
return
|
97 |
d[new] = d[old] |
98 |
del(d[old])
|
99 |
|
100 |
|
101 |
def printable_header_dict(d): |
102 |
"""Format a meta dictionary for printing out json/xml.
|
103 |
|
104 |
Convert all keys to lower case and replace dashes with underscores.
|
105 |
Format 'last_modified' timestamp.
|
106 |
"""
|
107 |
|
108 |
if 'last_modified' in d and d['last_modified']: |
109 |
d['last_modified'] = utils.isoformat(
|
110 |
datetime.fromtimestamp(d['last_modified']))
|
111 |
return dict([(k.lower().replace('-', '_'), v) for k, v in d.iteritems()]) |
112 |
|
113 |
|
114 |
def format_header_key(k): |
115 |
"""Convert underscores to dashes and capitalize intra-dash strings."""
|
116 |
return '-'.join([x.capitalize() for x in k.replace('_', '-').split('-')]) |
117 |
|
118 |
|
119 |
def get_header_prefix(request, prefix): |
120 |
"""Get all prefix-* request headers in a dict.
|
121 |
Reformat keys with format_header_key()."""
|
122 |
|
123 |
prefix = 'HTTP_' + prefix.upper().replace('-', '_') |
124 |
# TODO: Document or remove '~' replacing.
|
125 |
return dict([(format_header_key(k[5:]), v.replace('~', '')) |
126 |
for k, v in request.META.iteritems() |
127 |
if k.startswith(prefix) and len(k) > len(prefix)]) |
128 |
|
129 |
|
130 |
def check_meta_headers(meta): |
131 |
if len(meta) > 90: |
132 |
raise faults.BadRequest('Too many headers.') |
133 |
for k, v in meta.iteritems(): |
134 |
if len(k) > 128: |
135 |
raise faults.BadRequest('Header name too large.') |
136 |
if len(v) > 256: |
137 |
raise faults.BadRequest('Header value too large.') |
138 |
|
139 |
|
140 |
def get_account_headers(request): |
141 |
meta = get_header_prefix(request, 'X-Account-Meta-')
|
142 |
check_meta_headers(meta) |
143 |
groups = {} |
144 |
for k, v in get_header_prefix(request, 'X-Account-Group-').iteritems(): |
145 |
n = k[16:].lower()
|
146 |
if '-' in n or '_' in n: |
147 |
raise faults.BadRequest('Bad characters in group name') |
148 |
groups[n] = v.replace(' ', '').split(',') |
149 |
while '' in groups[n]: |
150 |
groups[n].remove('')
|
151 |
return meta, groups
|
152 |
|
153 |
|
154 |
def put_account_headers(response, meta, groups, policy): |
155 |
if 'count' in meta: |
156 |
response['X-Account-Container-Count'] = meta['count'] |
157 |
if 'bytes' in meta: |
158 |
response['X-Account-Bytes-Used'] = meta['bytes'] |
159 |
response['Last-Modified'] = http_date(int(meta['modified'])) |
160 |
for k in [x for x in meta.keys() if x.startswith('X-Account-Meta-')]: |
161 |
response[smart_str( |
162 |
k, strings_only=True)] = smart_str(meta[k], strings_only=True) |
163 |
if 'until_timestamp' in meta: |
164 |
response['X-Account-Until-Timestamp'] = http_date(
|
165 |
int(meta['until_timestamp'])) |
166 |
for k, v in groups.iteritems(): |
167 |
k = smart_str(k, strings_only=True)
|
168 |
k = format_header_key('X-Account-Group-' + k)
|
169 |
v = smart_str(','.join(v), strings_only=True) |
170 |
response[k] = v |
171 |
for k, v in policy.iteritems(): |
172 |
response[smart_str(format_header_key('X-Account-Policy-' + k),
|
173 |
strings_only=True)] = smart_str(v, strings_only=True) |
174 |
|
175 |
|
176 |
def get_container_headers(request): |
177 |
meta = get_header_prefix(request, 'X-Container-Meta-')
|
178 |
check_meta_headers(meta) |
179 |
policy = dict([(k[19:].lower(), v.replace(' ', '')) for k, v in |
180 |
get_header_prefix(request, |
181 |
'X-Container-Policy-').iteritems()])
|
182 |
return meta, policy
|
183 |
|
184 |
|
185 |
def put_container_headers(request, response, meta, policy): |
186 |
if 'count' in meta: |
187 |
response['X-Container-Object-Count'] = meta['count'] |
188 |
if 'bytes' in meta: |
189 |
response['X-Container-Bytes-Used'] = meta['bytes'] |
190 |
response['Last-Modified'] = http_date(int(meta['modified'])) |
191 |
for k in [x for x in meta.keys() if x.startswith('X-Container-Meta-')]: |
192 |
response[smart_str( |
193 |
k, strings_only=True)] = smart_str(meta[k], strings_only=True) |
194 |
l = [smart_str(x, strings_only=True) for x in meta['object_meta'] |
195 |
if x.startswith('X-Object-Meta-')] |
196 |
response['X-Container-Object-Meta'] = ','.join([x[14:] for x in l]) |
197 |
response['X-Container-Block-Size'] = request.backend.block_size
|
198 |
response['X-Container-Block-Hash'] = request.backend.hash_algorithm
|
199 |
if 'until_timestamp' in meta: |
200 |
response['X-Container-Until-Timestamp'] = http_date(
|
201 |
int(meta['until_timestamp'])) |
202 |
for k, v in policy.iteritems(): |
203 |
response[smart_str(format_header_key('X-Container-Policy-' + k),
|
204 |
strings_only=True)] = smart_str(v,
|
205 |
strings_only=True)
|
206 |
|
207 |
|
208 |
def get_object_headers(request): |
209 |
content_type = request.META.get('CONTENT_TYPE', None) |
210 |
meta = get_header_prefix(request, 'X-Object-Meta-')
|
211 |
check_meta_headers(meta) |
212 |
if request.META.get('HTTP_CONTENT_ENCODING'): |
213 |
meta['Content-Encoding'] = request.META['HTTP_CONTENT_ENCODING'] |
214 |
if request.META.get('HTTP_CONTENT_DISPOSITION'): |
215 |
meta['Content-Disposition'] = request.META['HTTP_CONTENT_DISPOSITION'] |
216 |
if request.META.get('HTTP_X_OBJECT_MANIFEST'): |
217 |
meta['X-Object-Manifest'] = request.META['HTTP_X_OBJECT_MANIFEST'] |
218 |
return content_type, meta, get_sharing(request), get_public(request)
|
219 |
|
220 |
|
221 |
def put_object_headers(response, meta, restricted=False, token=None): |
222 |
response['ETag'] = meta['hash'] if not UPDATE_MD5 else meta['checksum'] |
223 |
response['Content-Length'] = meta['bytes'] |
224 |
response.override_serialization = True
|
225 |
response['Content-Type'] = meta.get('type', 'application/octet-stream') |
226 |
response['Last-Modified'] = http_date(int(meta['modified'])) |
227 |
if not restricted: |
228 |
response['X-Object-Hash'] = meta['hash'] |
229 |
response['X-Object-UUID'] = meta['uuid'] |
230 |
if TRANSLATE_UUIDS:
|
231 |
meta['modified_by'] = \
|
232 |
retrieve_displayname(token, meta['modified_by'])
|
233 |
response['X-Object-Modified-By'] = smart_str(
|
234 |
meta['modified_by'], strings_only=True) |
235 |
response['X-Object-Version'] = meta['version'] |
236 |
response['X-Object-Version-Timestamp'] = http_date(
|
237 |
int(meta['version_timestamp'])) |
238 |
for k in [x for x in meta.keys() if x.startswith('X-Object-Meta-')]: |
239 |
response[smart_str( |
240 |
k, strings_only=True)] = smart_str(meta[k], strings_only=True) |
241 |
for k in ( |
242 |
'Content-Encoding', 'Content-Disposition', 'X-Object-Manifest', |
243 |
'X-Object-Sharing', 'X-Object-Shared-By', 'X-Object-Allowed-To', |
244 |
'X-Object-Public'):
|
245 |
if k in meta: |
246 |
response[k] = smart_str(meta[k], strings_only=True)
|
247 |
else:
|
248 |
for k in ('Content-Encoding', 'Content-Disposition'): |
249 |
if k in meta: |
250 |
response[k] = smart_str(meta[k], strings_only=True)
|
251 |
|
252 |
|
253 |
def update_manifest_meta(request, v_account, meta): |
254 |
"""Update metadata if the object has an X-Object-Manifest."""
|
255 |
|
256 |
if 'X-Object-Manifest' in meta: |
257 |
etag = ''
|
258 |
bytes = 0
|
259 |
try:
|
260 |
src_container, src_name = split_container_object_string( |
261 |
'/' + meta['X-Object-Manifest']) |
262 |
objects = request.backend.list_objects( |
263 |
request.user_uniq, v_account, |
264 |
src_container, prefix=src_name, virtual=False)
|
265 |
for x in objects: |
266 |
src_meta = request.backend.get_object_meta( |
267 |
request.user_uniq, v_account, src_container, x[0],
|
268 |
'pithos', x[1]) |
269 |
etag += (src_meta['hash'] if not UPDATE_MD5 else |
270 |
src_meta['checksum'])
|
271 |
bytes += src_meta['bytes'] |
272 |
except:
|
273 |
# Ignore errors.
|
274 |
return
|
275 |
meta['bytes'] = bytes |
276 |
md5 = hashlib.md5() |
277 |
md5.update(etag) |
278 |
meta['checksum'] = md5.hexdigest().lower()
|
279 |
|
280 |
|
281 |
def is_uuid(str): |
282 |
if str is None: |
283 |
return False |
284 |
try:
|
285 |
uuid.UUID(str)
|
286 |
except ValueError: |
287 |
return False |
288 |
else:
|
289 |
return True |
290 |
|
291 |
|
292 |
##########################
|
293 |
# USER CATALOG utilities #
|
294 |
##########################
|
295 |
|
296 |
def retrieve_displayname(token, uuid, fail_silently=True): |
297 |
astakos = AstakosClient(ASTAKOS_BASE_URL, retry=2, use_pool=True, |
298 |
logger=logger) |
299 |
try:
|
300 |
displayname = astakos.get_username(token, uuid) |
301 |
except NoUserName:
|
302 |
if not fail_silently: |
303 |
raise ItemNotExists(uuid)
|
304 |
else:
|
305 |
# just return the uuid
|
306 |
return uuid
|
307 |
return displayname
|
308 |
|
309 |
|
310 |
def retrieve_displaynames(token, uuids, return_dict=False, fail_silently=True): |
311 |
astakos = AstakosClient(ASTAKOS_BASE_URL, retry=2, use_pool=True, |
312 |
logger=logger) |
313 |
catalog = astakos.get_usernames(token, uuids) or {}
|
314 |
missing = list(set(uuids) - set(catalog)) |
315 |
if missing and not fail_silently: |
316 |
raise ItemNotExists('Unknown displaynames: %s' % ', '.join(missing)) |
317 |
return catalog if return_dict else [catalog.get(i) for i in uuids] |
318 |
|
319 |
|
320 |
def retrieve_uuid(token, displayname): |
321 |
if is_uuid(displayname):
|
322 |
return displayname
|
323 |
|
324 |
astakos = AstakosClient(ASTAKOS_BASE_URL, retry=2, use_pool=True, |
325 |
logger=logger) |
326 |
try:
|
327 |
uuid = astakos.get_uuid(token, displayname) |
328 |
except NoUUID:
|
329 |
raise ItemNotExists(displayname)
|
330 |
return uuid
|
331 |
|
332 |
|
333 |
def retrieve_uuids(token, displaynames, return_dict=False, fail_silently=True): |
334 |
astakos = AstakosClient(ASTAKOS_BASE_URL, retry=2, use_pool=True, |
335 |
logger=logger) |
336 |
catalog = astakos.get_uuids(token, displaynames) or {}
|
337 |
missing = list(set(displaynames) - set(catalog)) |
338 |
if missing and not fail_silently: |
339 |
raise ItemNotExists('Unknown uuids: %s' % ', '.join(missing)) |
340 |
return catalog if return_dict else [catalog.get(i) for i in displaynames] |
341 |
|
342 |
|
343 |
def replace_permissions_displayname(token, holder): |
344 |
if holder == '*': |
345 |
return holder
|
346 |
try:
|
347 |
# check first for a group permission
|
348 |
account, group = holder.split(':', 1) |
349 |
except ValueError: |
350 |
return retrieve_uuid(token, holder)
|
351 |
else:
|
352 |
return ':'.join([retrieve_uuid(token, account), group]) |
353 |
|
354 |
|
355 |
def replace_permissions_uuid(token, holder): |
356 |
if holder == '*': |
357 |
return holder
|
358 |
try:
|
359 |
# check first for a group permission
|
360 |
account, group = holder.split(':', 1) |
361 |
except ValueError: |
362 |
return retrieve_displayname(token, holder)
|
363 |
else:
|
364 |
return ':'.join([retrieve_displayname(token, account), group]) |
365 |
|
366 |
|
367 |
def update_sharing_meta(request, permissions, v_account, |
368 |
v_container, v_object, meta): |
369 |
if permissions is None: |
370 |
return
|
371 |
allowed, perm_path, perms = permissions |
372 |
if len(perms) == 0: |
373 |
return
|
374 |
|
375 |
# replace uuid with displayname
|
376 |
if TRANSLATE_UUIDS:
|
377 |
perms['read'] = [replace_permissions_uuid(
|
378 |
getattr(request, 'token', None), x) |
379 |
for x in perms.get('read', [])] |
380 |
perms['write'] = [replace_permissions_uuid(
|
381 |
getattr(request, 'token', None), x) |
382 |
for x in perms.get('write', [])] |
383 |
|
384 |
ret = [] |
385 |
|
386 |
r = ','.join(perms.get('read', [])) |
387 |
if r:
|
388 |
ret.append('read=' + r)
|
389 |
w = ','.join(perms.get('write', [])) |
390 |
if w:
|
391 |
ret.append('write=' + w)
|
392 |
meta['X-Object-Sharing'] = '; '.join(ret) |
393 |
if '/'.join((v_account, v_container, v_object)) != perm_path: |
394 |
meta['X-Object-Shared-By'] = perm_path
|
395 |
if request.user_uniq != v_account:
|
396 |
meta['X-Object-Allowed-To'] = allowed
|
397 |
|
398 |
|
399 |
def update_public_meta(public, meta): |
400 |
if not public: |
401 |
return
|
402 |
meta['X-Object-Public'] = join_urls(
|
403 |
BASE_HOST, reverse('pithos.api.public.public_demux', args=(public,)))
|
404 |
|
405 |
|
406 |
def validate_modification_preconditions(request, meta): |
407 |
"""Check the modified timestamp conforms with the preconditions set."""
|
408 |
|
409 |
if 'modified' not in meta: |
410 |
return # TODO: Always return? |
411 |
|
412 |
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
|
413 |
if if_modified_since is not None: |
414 |
if_modified_since = parse_http_date_safe(if_modified_since) |
415 |
if (if_modified_since is not None |
416 |
and int(meta['modified']) <= if_modified_since): |
417 |
raise faults.NotModified('Resource has not been modified') |
418 |
|
419 |
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
|
420 |
if if_unmodified_since is not None: |
421 |
if_unmodified_since = parse_http_date_safe(if_unmodified_since) |
422 |
if (if_unmodified_since is not None |
423 |
and int(meta['modified']) > if_unmodified_since): |
424 |
raise faults.PreconditionFailed('Resource has been modified') |
425 |
|
426 |
|
427 |
def validate_matching_preconditions(request, meta): |
428 |
"""Check that the ETag conforms with the preconditions set."""
|
429 |
|
430 |
etag = meta['hash'] if not UPDATE_MD5 else meta['checksum'] |
431 |
if not etag: |
432 |
etag = None
|
433 |
|
434 |
if_match = request.META.get('HTTP_IF_MATCH')
|
435 |
if if_match is not None: |
436 |
if etag is None: |
437 |
raise faults.PreconditionFailed('Resource does not exist') |
438 |
if (if_match != '*' |
439 |
and etag not in [x.lower() for x in parse_etags(if_match)]): |
440 |
raise faults.PreconditionFailed('Resource ETag does not match') |
441 |
|
442 |
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
|
443 |
if if_none_match is not None: |
444 |
# TODO: If this passes, must ignore If-Modified-Since header.
|
445 |
if etag is not None: |
446 |
if (if_none_match == '*' or etag in [x.lower() for x in |
447 |
parse_etags(if_none_match)]): |
448 |
# TODO: Continue if an If-Modified-Since header is present.
|
449 |
if request.method in ('HEAD', 'GET'): |
450 |
raise faults.NotModified('Resource ETag matches') |
451 |
raise faults.PreconditionFailed(
|
452 |
'Resource exists or ETag matches')
|
453 |
|
454 |
|
455 |
def split_container_object_string(s): |
456 |
if not len(s) > 0 or s[0] != '/': |
457 |
raise ValueError |
458 |
s = s[1:]
|
459 |
pos = s.find('/')
|
460 |
if pos == -1 or pos == len(s) - 1: |
461 |
raise ValueError |
462 |
return s[:pos], s[(pos + 1):] |
463 |
|
464 |
|
465 |
def copy_or_move_object(request, src_account, src_container, src_name, |
466 |
dest_account, dest_container, dest_name, |
467 |
move=False, delimiter=None): |
468 |
"""Copy or move an object."""
|
469 |
|
470 |
if 'ignore_content_type' in request.GET and 'CONTENT_TYPE' in request.META: |
471 |
del(request.META['CONTENT_TYPE']) |
472 |
content_type, meta, permissions, public = get_object_headers(request) |
473 |
src_version = request.META.get('HTTP_X_SOURCE_VERSION')
|
474 |
try:
|
475 |
if move:
|
476 |
version_id = request.backend.move_object( |
477 |
request.user_uniq, src_account, src_container, src_name, |
478 |
dest_account, dest_container, dest_name, |
479 |
content_type, 'pithos', meta, False, permissions, delimiter) |
480 |
else:
|
481 |
version_id = request.backend.copy_object( |
482 |
request.user_uniq, src_account, src_container, src_name, |
483 |
dest_account, dest_container, dest_name, |
484 |
content_type, 'pithos', meta, False, permissions, |
485 |
src_version, delimiter) |
486 |
except NotAllowedError:
|
487 |
raise faults.Forbidden('Not allowed') |
488 |
except (ItemNotExists, VersionNotExists):
|
489 |
raise faults.ItemNotFound('Container or object does not exist') |
490 |
except ValueError: |
491 |
raise faults.BadRequest('Invalid sharing header') |
492 |
except QuotaError, e:
|
493 |
raise faults.RequestEntityTooLarge('Quota error: %s' % e) |
494 |
if public is not None: |
495 |
try:
|
496 |
request.backend.update_object_public( |
497 |
request.user_uniq, dest_account, |
498 |
dest_container, dest_name, public) |
499 |
except NotAllowedError:
|
500 |
raise faults.Forbidden('Not allowed') |
501 |
except ItemNotExists:
|
502 |
raise faults.ItemNotFound('Object does not exist') |
503 |
return version_id
|
504 |
|
505 |
|
506 |
def get_int_parameter(p): |
507 |
if p is not None: |
508 |
try:
|
509 |
p = int(p)
|
510 |
except ValueError: |
511 |
return None |
512 |
if p < 0: |
513 |
return None |
514 |
return p
|
515 |
|
516 |
|
517 |
def get_content_length(request): |
518 |
content_length = get_int_parameter(request.META.get('CONTENT_LENGTH'))
|
519 |
if content_length is None: |
520 |
raise faults.LengthRequired('Missing or invalid Content-Length header') |
521 |
return content_length
|
522 |
|
523 |
|
524 |
def get_range(request, size): |
525 |
"""Parse a Range header from the request.
|
526 |
|
527 |
Either returns None, when the header is not existent or should be ignored,
|
528 |
or a list of (offset, length) tuples - should be further checked.
|
529 |
"""
|
530 |
|
531 |
ranges = request.META.get('HTTP_RANGE', '').replace(' ', '') |
532 |
if not ranges.startswith('bytes='): |
533 |
return None |
534 |
|
535 |
ret = [] |
536 |
for r in (x.strip() for x in ranges[6:].split(',')): |
537 |
p = re.compile('^(?P<offset>\d*)-(?P<upto>\d*)$')
|
538 |
m = p.match(r) |
539 |
if not m: |
540 |
return None |
541 |
offset = m.group('offset')
|
542 |
upto = m.group('upto')
|
543 |
if offset == '' and upto == '': |
544 |
return None |
545 |
|
546 |
if offset != '': |
547 |
offset = int(offset)
|
548 |
if upto != '': |
549 |
upto = int(upto)
|
550 |
if offset > upto:
|
551 |
return None |
552 |
ret.append((offset, upto - offset + 1))
|
553 |
else:
|
554 |
ret.append((offset, size - offset)) |
555 |
else:
|
556 |
length = int(upto)
|
557 |
ret.append((size - length, length)) |
558 |
|
559 |
return ret
|
560 |
|
561 |
|
562 |
def get_content_range(request): |
563 |
"""Parse a Content-Range header from the request.
|
564 |
|
565 |
Either returns None, when the header is not existent or should be ignored,
|
566 |
or an (offset, length, total) tuple - check as length, total may be None.
|
567 |
Returns (None, None, None) if the provided range is '*/*'.
|
568 |
"""
|
569 |
|
570 |
ranges = request.META.get('HTTP_CONTENT_RANGE', '') |
571 |
if not ranges: |
572 |
return None |
573 |
|
574 |
p = re.compile('^bytes (?P<offset>\d+)-(?P<upto>\d*)/(?P<total>(\d+|\*))$')
|
575 |
m = p.match(ranges) |
576 |
if not m: |
577 |
if ranges == 'bytes */*': |
578 |
return (None, None, None) |
579 |
return None |
580 |
offset = int(m.group('offset')) |
581 |
upto = m.group('upto')
|
582 |
total = m.group('total')
|
583 |
if upto != '': |
584 |
upto = int(upto)
|
585 |
else:
|
586 |
upto = None
|
587 |
if total != '*': |
588 |
total = int(total)
|
589 |
else:
|
590 |
total = None
|
591 |
if (upto is not None and offset > upto) or \ |
592 |
(total is not None and offset >= total) or \ |
593 |
(total is not None and upto is not None and upto >= total): |
594 |
return None |
595 |
|
596 |
if upto is None: |
597 |
length = None
|
598 |
else:
|
599 |
length = upto - offset + 1
|
600 |
return (offset, length, total)
|
601 |
|
602 |
|
603 |
def get_sharing(request): |
604 |
"""Parse an X-Object-Sharing header from the request.
|
605 |
|
606 |
Raises BadRequest on error.
|
607 |
"""
|
608 |
|
609 |
permissions = request.META.get('HTTP_X_OBJECT_SHARING')
|
610 |
if permissions is None: |
611 |
return None |
612 |
|
613 |
# TODO: Document or remove '~' replacing.
|
614 |
permissions = permissions.replace('~', '') |
615 |
|
616 |
ret = {} |
617 |
permissions = permissions.replace(' ', '') |
618 |
if permissions == '': |
619 |
return ret
|
620 |
for perm in (x for x in permissions.split(';')): |
621 |
if perm.startswith('read='): |
622 |
ret['read'] = list(set( |
623 |
[v.replace(' ', '').lower() for v in perm[5:].split(',')])) |
624 |
if '' in ret['read']: |
625 |
ret['read'].remove('') |
626 |
if '*' in ret['read']: |
627 |
ret['read'] = ['*'] |
628 |
if len(ret['read']) == 0: |
629 |
raise faults.BadRequest(
|
630 |
'Bad X-Object-Sharing header value: invalid length')
|
631 |
elif perm.startswith('write='): |
632 |
ret['write'] = list(set( |
633 |
[v.replace(' ', '').lower() for v in perm[6:].split(',')])) |
634 |
if '' in ret['write']: |
635 |
ret['write'].remove('') |
636 |
if '*' in ret['write']: |
637 |
ret['write'] = ['*'] |
638 |
if len(ret['write']) == 0: |
639 |
raise faults.BadRequest(
|
640 |
'Bad X-Object-Sharing header value: invalid length')
|
641 |
else:
|
642 |
raise faults.BadRequest(
|
643 |
'Bad X-Object-Sharing header value: missing prefix')
|
644 |
|
645 |
# replace displayname with uuid
|
646 |
if TRANSLATE_UUIDS:
|
647 |
try:
|
648 |
ret['read'] = [replace_permissions_displayname(
|
649 |
getattr(request, 'token', None), x) |
650 |
for x in ret.get('read', [])] |
651 |
ret['write'] = [replace_permissions_displayname(
|
652 |
getattr(request, 'token', None), x) |
653 |
for x in ret.get('write', [])] |
654 |
except ItemNotExists, e:
|
655 |
raise faults.BadRequest(
|
656 |
'Bad X-Object-Sharing header value: unknown account: %s' % e)
|
657 |
|
658 |
# Keep duplicates only in write list.
|
659 |
dups = [x for x in ret.get( |
660 |
'read', []) if x in ret.get('write', []) and x != '*'] |
661 |
if dups:
|
662 |
for x in dups: |
663 |
ret['read'].remove(x)
|
664 |
if len(ret['read']) == 0: |
665 |
del(ret['read']) |
666 |
|
667 |
return ret
|
668 |
|
669 |
|
670 |
def get_public(request): |
671 |
"""Parse an X-Object-Public header from the request.
|
672 |
|
673 |
Raises BadRequest on error.
|
674 |
"""
|
675 |
|
676 |
public = request.META.get('HTTP_X_OBJECT_PUBLIC')
|
677 |
if public is None: |
678 |
return None |
679 |
|
680 |
public = public.replace(' ', '').lower() |
681 |
if public == 'true': |
682 |
return True |
683 |
elif public == 'false' or public == '': |
684 |
return False |
685 |
raise faults.BadRequest('Bad X-Object-Public header value') |
686 |
|
687 |
|
688 |
def raw_input_socket(request): |
689 |
"""Return the socket for reading the rest of the request."""
|
690 |
|
691 |
server_software = request.META.get('SERVER_SOFTWARE')
|
692 |
if server_software and server_software.startswith('mod_python'): |
693 |
return request._req
|
694 |
if 'wsgi.input' in request.environ: |
695 |
return request.environ['wsgi.input'] |
696 |
raise NotImplemented('Unknown server software') |
697 |
|
698 |
MAX_UPLOAD_SIZE = 5 * (1024 * 1024 * 1024) # 5GB |
699 |
|
700 |
|
701 |
def socket_read_iterator(request, length=0, blocksize=4096): |
702 |
"""Return maximum of blocksize data read from the socket in each iteration
|
703 |
|
704 |
Read up to 'length'. If 'length' is negative, will attempt a chunked read.
|
705 |
The maximum ammount of data read is controlled by MAX_UPLOAD_SIZE.
|
706 |
"""
|
707 |
|
708 |
sock = raw_input_socket(request) |
709 |
if length < 0: # Chunked transfers |
710 |
# Small version (server does the dechunking).
|
711 |
if (request.environ.get('mod_wsgi.input_chunked', None) |
712 |
or request.META['SERVER_SOFTWARE'].startswith('gunicorn')): |
713 |
while length < MAX_UPLOAD_SIZE:
|
714 |
data = sock.read(blocksize) |
715 |
if data == '': |
716 |
return
|
717 |
yield data
|
718 |
raise faults.BadRequest('Maximum size is reached') |
719 |
|
720 |
# Long version (do the dechunking).
|
721 |
data = ''
|
722 |
while length < MAX_UPLOAD_SIZE:
|
723 |
# Get chunk size.
|
724 |
if hasattr(sock, 'readline'): |
725 |
chunk_length = sock.readline() |
726 |
else:
|
727 |
chunk_length = ''
|
728 |
while chunk_length[-1:] != '\n': |
729 |
chunk_length += sock.read(1)
|
730 |
chunk_length.strip() |
731 |
pos = chunk_length.find(';')
|
732 |
if pos >= 0: |
733 |
chunk_length = chunk_length[:pos] |
734 |
try:
|
735 |
chunk_length = int(chunk_length, 16) |
736 |
except Exception: |
737 |
raise faults.BadRequest('Bad chunk size') |
738 |
# TODO: Change to something more appropriate.
|
739 |
# Check if done.
|
740 |
if chunk_length == 0: |
741 |
if len(data) > 0: |
742 |
yield data
|
743 |
return
|
744 |
# Get the actual data.
|
745 |
while chunk_length > 0: |
746 |
chunk = sock.read(min(chunk_length, blocksize))
|
747 |
chunk_length -= len(chunk)
|
748 |
if length > 0: |
749 |
length += len(chunk)
|
750 |
data += chunk |
751 |
if len(data) >= blocksize: |
752 |
ret = data[:blocksize] |
753 |
data = data[blocksize:] |
754 |
yield ret
|
755 |
sock.read(2) # CRLF |
756 |
raise faults.BadRequest('Maximum size is reached') |
757 |
else:
|
758 |
if length > MAX_UPLOAD_SIZE:
|
759 |
raise faults.BadRequest('Maximum size is reached') |
760 |
while length > 0: |
761 |
data = sock.read(min(length, blocksize))
|
762 |
if not data: |
763 |
raise faults.BadRequest()
|
764 |
length -= len(data)
|
765 |
yield data
|
766 |
|
767 |
|
768 |
class SaveToBackendHandler(FileUploadHandler): |
769 |
"""Handle a file from an HTML form the django way."""
|
770 |
|
771 |
def __init__(self, request=None): |
772 |
super(SaveToBackendHandler, self).__init__(request) |
773 |
self.backend = request.backend
|
774 |
|
775 |
def put_data(self, length): |
776 |
if len(self.data) >= length: |
777 |
block = self.data[:length]
|
778 |
self.file.hashmap.append(self.backend.put_block(block)) |
779 |
self.checksum_compute.update(block)
|
780 |
self.data = self.data[length:] |
781 |
|
782 |
def new_file(self, field_name, file_name, content_type, |
783 |
content_length, charset=None):
|
784 |
self.checksum_compute = NoChecksum() if not UPDATE_MD5 else Checksum() |
785 |
self.data = '' |
786 |
self.file = UploadedFile(
|
787 |
name=file_name, content_type=content_type, charset=charset) |
788 |
self.file.size = 0 |
789 |
self.file.hashmap = []
|
790 |
|
791 |
def receive_data_chunk(self, raw_data, start): |
792 |
self.data += raw_data
|
793 |
self.file.size += len(raw_data) |
794 |
self.put_data(self.request.backend.block_size) |
795 |
return None |
796 |
|
797 |
def file_complete(self, file_size): |
798 |
l = len(self.data) |
799 |
if l > 0: |
800 |
self.put_data(l)
|
801 |
self.file.etag = self.checksum_compute.hexdigest() |
802 |
return self.file |
803 |
|
804 |
|
805 |
class ObjectWrapper(object): |
806 |
"""Return the object's data block-per-block in each iteration.
|
807 |
|
808 |
Read from the object using the offset and length provided
|
809 |
in each entry of the range list.
|
810 |
"""
|
811 |
|
812 |
def __init__(self, backend, ranges, sizes, hashmaps, boundary): |
813 |
self.backend = backend
|
814 |
self.ranges = ranges
|
815 |
self.sizes = sizes
|
816 |
self.hashmaps = hashmaps
|
817 |
self.boundary = boundary
|
818 |
self.size = sum(self.sizes) |
819 |
|
820 |
self.file_index = 0 |
821 |
self.block_index = 0 |
822 |
self.block_hash = -1 |
823 |
self.block = '' |
824 |
|
825 |
self.range_index = -1 |
826 |
self.offset, self.length = self.ranges[0] |
827 |
|
828 |
def __iter__(self): |
829 |
return self |
830 |
|
831 |
def part_iterator(self): |
832 |
if self.length > 0: |
833 |
# Get the file for the current offset.
|
834 |
file_size = self.sizes[self.file_index] |
835 |
while self.offset >= file_size: |
836 |
self.offset -= file_size
|
837 |
self.file_index += 1 |
838 |
file_size = self.sizes[self.file_index] |
839 |
|
840 |
# Get the block for the current position.
|
841 |
self.block_index = int(self.offset / self.backend.block_size) |
842 |
if self.block_hash != \ |
843 |
self.hashmaps[self.file_index][self.block_index]: |
844 |
self.block_hash = self.hashmaps[ |
845 |
self.file_index][self.block_index] |
846 |
try:
|
847 |
self.block = self.backend.get_block(self.block_hash) |
848 |
except ItemNotExists:
|
849 |
raise faults.ItemNotFound('Block does not exist') |
850 |
|
851 |
# Get the data from the block.
|
852 |
bo = self.offset % self.backend.block_size |
853 |
bs = self.backend.block_size
|
854 |
if (self.block_index == len(self.hashmaps[self.file_index]) - 1 and |
855 |
self.sizes[self.file_index] % self.backend.block_size): |
856 |
bs = self.sizes[self.file_index] % self.backend.block_size |
857 |
bl = min(self.length, bs - bo) |
858 |
data = self.block[bo:bo + bl]
|
859 |
self.offset += bl
|
860 |
self.length -= bl
|
861 |
return data
|
862 |
else:
|
863 |
raise StopIteration |
864 |
|
865 |
def next(self): |
866 |
if len(self.ranges) == 1: |
867 |
return self.part_iterator() |
868 |
if self.range_index == len(self.ranges): |
869 |
raise StopIteration |
870 |
try:
|
871 |
if self.range_index == -1: |
872 |
raise StopIteration |
873 |
return self.part_iterator() |
874 |
except StopIteration: |
875 |
self.range_index += 1 |
876 |
out = [] |
877 |
if self.range_index < len(self.ranges): |
878 |
# Part header.
|
879 |
self.offset, self.length = self.ranges[self.range_index] |
880 |
self.file_index = 0 |
881 |
if self.range_index > 0: |
882 |
out.append('')
|
883 |
out.append('--' + self.boundary) |
884 |
out.append('Content-Range: bytes %d-%d/%d' % (
|
885 |
self.offset, self.offset + self.length - 1, self.size)) |
886 |
out.append('Content-Transfer-Encoding: binary')
|
887 |
out.append('')
|
888 |
out.append('')
|
889 |
return '\r\n'.join(out) |
890 |
else:
|
891 |
# Footer.
|
892 |
out.append('')
|
893 |
out.append('--' + self.boundary + '--') |
894 |
out.append('')
|
895 |
return '\r\n'.join(out) |
896 |
|
897 |
|
898 |
def object_data_response(request, sizes, hashmaps, meta, public=False): |
899 |
"""Get the HttpResponse object for replying with the object's data."""
|
900 |
|
901 |
# Range handling.
|
902 |
size = sum(sizes)
|
903 |
ranges = get_range(request, size) |
904 |
if ranges is None: |
905 |
ranges = [(0, size)]
|
906 |
ret = 200
|
907 |
else:
|
908 |
check = [True for offset, length in ranges if |
909 |
length <= 0 or length > size or |
910 |
offset < 0 or offset >= size or |
911 |
offset + length > size] |
912 |
if len(check) > 0: |
913 |
raise faults.RangeNotSatisfiable(
|
914 |
'Requested range exceeds object limits')
|
915 |
ret = 206
|
916 |
if_range = request.META.get('HTTP_IF_RANGE')
|
917 |
if if_range:
|
918 |
try:
|
919 |
# Modification time has passed instead.
|
920 |
last_modified = parse_http_date(if_range) |
921 |
if last_modified != meta['modified']: |
922 |
ranges = [(0, size)]
|
923 |
ret = 200
|
924 |
except ValueError: |
925 |
if if_range != meta['checksum']: |
926 |
ranges = [(0, size)]
|
927 |
ret = 200
|
928 |
|
929 |
if ret == 206 and len(ranges) > 1: |
930 |
boundary = uuid.uuid4().hex |
931 |
else:
|
932 |
boundary = ''
|
933 |
wrapper = ObjectWrapper(request.backend, ranges, sizes, hashmaps, boundary) |
934 |
response = HttpResponse(wrapper, status=ret) |
935 |
put_object_headers( |
936 |
response, meta, restricted=public, |
937 |
token=getattr(request, 'token', None)) |
938 |
if ret == 206: |
939 |
if len(ranges) == 1: |
940 |
offset, length = ranges[0]
|
941 |
response[ |
942 |
'Content-Length'] = length # Update with the correct length. |
943 |
response['Content-Range'] = 'bytes %d-%d/%d' % ( |
944 |
offset, offset + length - 1, size)
|
945 |
else:
|
946 |
del(response['Content-Length']) |
947 |
response['Content-Type'] = 'multipart/byteranges; boundary=%s' % ( |
948 |
boundary,) |
949 |
return response
|
950 |
|
951 |
|
952 |
def put_object_block(request, hashmap, data, offset): |
953 |
"""Put one block of data at the given offset."""
|
954 |
|
955 |
bi = int(offset / request.backend.block_size)
|
956 |
bo = offset % request.backend.block_size |
957 |
bl = min(len(data), request.backend.block_size - bo) |
958 |
if bi < len(hashmap): |
959 |
hashmap[bi] = request.backend.update_block(hashmap[bi], data[:bl], bo) |
960 |
else:
|
961 |
hashmap.append(request.backend.put_block(('\x00' * bo) + data[:bl]))
|
962 |
return bl # Return ammount of data written. |
963 |
|
964 |
|
965 |
def hashmap_md5(backend, hashmap, size): |
966 |
"""Produce the MD5 sum from the data in the hashmap."""
|
967 |
|
968 |
# TODO: Search backend for the MD5 of another object
|
969 |
# with the same hashmap and size...
|
970 |
md5 = hashlib.md5() |
971 |
bs = backend.block_size |
972 |
for bi, hash in enumerate(hashmap): |
973 |
data = backend.get_block(hash) # Blocks come in padded. |
974 |
if bi == len(hashmap) - 1: |
975 |
data = data[:size % bs] |
976 |
md5.update(data) |
977 |
return md5.hexdigest().lower()
|
978 |
|
979 |
|
980 |
def simple_list_response(request, l): |
981 |
if request.serialization == 'text': |
982 |
return '\n'.join(l) + '\n' |
983 |
if request.serialization == 'xml': |
984 |
return render_to_string('items.xml', {'items': l}) |
985 |
if request.serialization == 'json': |
986 |
return json.dumps(l)
|
987 |
|
988 |
|
989 |
from pithos.backends.util import PithosBackendPool |
990 |
|
991 |
if RADOS_STORAGE:
|
992 |
BLOCK_PARAMS = {'mappool': RADOS_POOL_MAPS,
|
993 |
'blockpool': RADOS_POOL_BLOCKS, }
|
994 |
else:
|
995 |
BLOCK_PARAMS = {'mappool': None, |
996 |
'blockpool': None, } |
997 |
|
998 |
BACKEND_KWARGS = dict(
|
999 |
db_module=BACKEND_DB_MODULE, |
1000 |
db_connection=BACKEND_DB_CONNECTION, |
1001 |
block_module=BACKEND_BLOCK_MODULE, |
1002 |
block_path=BACKEND_BLOCK_PATH, |
1003 |
block_umask=BACKEND_BLOCK_UMASK, |
1004 |
block_size=BACKEND_BLOCK_SIZE, |
1005 |
hash_algorithm=BACKEND_HASH_ALGORITHM, |
1006 |
queue_module=BACKEND_QUEUE_MODULE, |
1007 |
queue_hosts=BACKEND_QUEUE_HOSTS, |
1008 |
queue_exchange=BACKEND_QUEUE_EXCHANGE, |
1009 |
astakos_url=ASTAKOS_BASE_URL, |
1010 |
service_token=SERVICE_TOKEN, |
1011 |
astakosclient_poolsize=ASTAKOSCLIENT_POOLSIZE, |
1012 |
free_versioning=BACKEND_FREE_VERSIONING, |
1013 |
block_params=BLOCK_PARAMS, |
1014 |
public_url_security=PUBLIC_URL_SECURITY, |
1015 |
public_url_alphabet=PUBLIC_URL_ALPHABET, |
1016 |
account_quota_policy=BACKEND_ACCOUNT_QUOTA, |
1017 |
container_quota_policy=BACKEND_CONTAINER_QUOTA, |
1018 |
container_versioning_policy=BACKEND_VERSIONING) |
1019 |
|
1020 |
_pithos_backend_pool = PithosBackendPool(size=BACKEND_POOL_SIZE, |
1021 |
**BACKEND_KWARGS) |
1022 |
|
1023 |
|
1024 |
def get_backend(): |
1025 |
if BACKEND_POOL_ENABLED:
|
1026 |
backend = _pithos_backend_pool.pool_get() |
1027 |
else:
|
1028 |
backend = connect_backend(**BACKEND_KWARGS) |
1029 |
backend.serials = [] |
1030 |
backend.messages = [] |
1031 |
return backend
|
1032 |
|
1033 |
|
1034 |
def update_request_headers(request): |
1035 |
# Handle URL-encoded keys and values.
|
1036 |
meta = dict([(
|
1037 |
k, v) for k, v in request.META.iteritems() if k.startswith('HTTP_')]) |
1038 |
for k, v in meta.iteritems(): |
1039 |
try:
|
1040 |
k.decode('ascii')
|
1041 |
v.decode('ascii')
|
1042 |
except UnicodeDecodeError: |
1043 |
raise faults.BadRequest('Bad character in headers.') |
1044 |
if '%' in k or '%' in v: |
1045 |
del(request.META[k])
|
1046 |
request.META[unquote(k)] = smart_unicode(unquote( |
1047 |
v), strings_only=True)
|
1048 |
|
1049 |
|
1050 |
def update_response_headers(request, response): |
1051 |
# URL-encode unicode in headers.
|
1052 |
meta = response.items() |
1053 |
for k, v in meta: |
1054 |
if (k.startswith('X-Account-') or k.startswith('X-Container-') or |
1055 |
k.startswith('X-Object-') or k.startswith('Content-')): |
1056 |
del(response[k])
|
1057 |
response[quote(k)] = quote(v, safe='/=,:@; ')
|
1058 |
|
1059 |
|
1060 |
def get_pithos_usage(token): |
1061 |
"""Get Pithos Usage from astakos."""
|
1062 |
astakos = AstakosClient(ASTAKOS_BASE_URL, retry=2, use_pool=True, |
1063 |
logger=logger) |
1064 |
quotas = astakos.get_quotas(token)['system']
|
1065 |
pithos_resources = [r['name'] for r in resources] |
1066 |
map(quotas.pop, filter(lambda k: k not in pithos_resources, quotas.keys())) |
1067 |
return quotas.popitem()[-1] # assume only one resource |
1068 |
|
1069 |
|
1070 |
def api_method(http_method=None, token_required=True, user_required=True, |
1071 |
logger=None, format_allowed=False, serializations=None, |
1072 |
strict_serlization=False, lock_container_path=False): |
1073 |
serializations = serializations or ['json', 'xml'] |
1074 |
|
1075 |
def decorator(func): |
1076 |
@api.api_method(http_method=http_method, token_required=token_required,
|
1077 |
user_required=user_required, |
1078 |
logger=logger, format_allowed=format_allowed, |
1079 |
astakos_url=ASTAKOS_BASE_URL, |
1080 |
serializations=serializations, |
1081 |
strict_serlization=strict_serlization) |
1082 |
@wraps(func)
|
1083 |
def wrapper(request, *args, **kwargs): |
1084 |
# The args variable may contain up to (account, container, object).
|
1085 |
if len(args) > 1 and len(args[1]) > 256: |
1086 |
raise faults.BadRequest("Container name too large") |
1087 |
if len(args) > 2 and len(args[2]) > 1024: |
1088 |
raise faults.BadRequest('Object name too large.') |
1089 |
|
1090 |
success_status = False
|
1091 |
try:
|
1092 |
# Add a PithosBackend as attribute of the request object
|
1093 |
request.backend = get_backend() |
1094 |
request.backend.pre_exec(lock_container_path) |
1095 |
|
1096 |
# Many API method expect thet X-Auth-Token in request,token
|
1097 |
request.token = request.x_auth_token |
1098 |
update_request_headers(request) |
1099 |
response = func(request, *args, **kwargs) |
1100 |
update_response_headers(request, response) |
1101 |
|
1102 |
success_status = True
|
1103 |
return response
|
1104 |
finally:
|
1105 |
# Always close PithosBackend connection
|
1106 |
if getattr(request, "backend", None) is not None: |
1107 |
request.backend.post_exec(success_status) |
1108 |
request.backend.close() |
1109 |
return wrapper
|
1110 |
return decorator
|
1111 |
|
1112 |
|
1113 |
def get_token_from_cookie(request): |
1114 |
assert(request.method == 'GET'),\ |
1115 |
"Cookie based authentication is only allowed to GET requests"
|
1116 |
token = None
|
1117 |
if COOKIE_NAME in request.COOKIES: |
1118 |
cookie_value = unquote(request.COOKIES.get(COOKIE_NAME, ''))
|
1119 |
account, sep, token = cookie_value.partition('|')
|
1120 |
return token
|
1121 |
|
1122 |
|
1123 |
def view_method(): |
1124 |
"""Decorator function for views."""
|
1125 |
|
1126 |
def decorator(func): |
1127 |
@wraps(func)
|
1128 |
def wrapper(request, *args, **kwargs): |
1129 |
token = get_token_from_cookie(request) |
1130 |
if token is None: |
1131 |
return HttpResponseRedirect('%s?next=%s' % (LOGIN_URL, |
1132 |
request.path)) |
1133 |
request.META['HTTP_X_AUTH_TOKEN'] = token
|
1134 |
# Get the response object
|
1135 |
response = func(request, *args, **kwargs) |
1136 |
if response.status_code in [200, 206, 304, 412, 416]: |
1137 |
return response
|
1138 |
elif response.status_code == 404: |
1139 |
raise Http404()
|
1140 |
elif response.status_code in [401, 403]: |
1141 |
return HttpResponseForbidden()
|
1142 |
else:
|
1143 |
# unexpected response status
|
1144 |
raise Exception(response.status_code) |
1145 |
return wrapper
|
1146 |
return decorator
|
1147 |
|
1148 |
|
1149 |
class Checksum: |
1150 |
def __init__(self): |
1151 |
self.md5 = hashlib.md5()
|
1152 |
|
1153 |
def update(self, data): |
1154 |
self.md5.update(data)
|
1155 |
|
1156 |
def hexdigest(self): |
1157 |
return self.md5.hexdigest().lower() |
1158 |
|
1159 |
|
1160 |
class NoChecksum: |
1161 |
def update(self, data): |
1162 |
pass
|
1163 |
|
1164 |
def hexdigest(self): |
1165 |
return '' |