root / pithos / backends / modular.py @ 15a96c3e
History | View | Annotate | Download (47.6 kB)
1 |
# Copyright 2011-2012 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
|
34 |
import sys |
35 |
import os |
36 |
import time |
37 |
import uuid as uuidlib |
38 |
import logging |
39 |
import binascii |
40 |
|
41 |
from base import DEFAULT_QUOTA, DEFAULT_VERSIONING, NotAllowedError, QuotaError, BaseBackend |
42 |
|
43 |
from pithos.lib.hashmap import HashMap |
44 |
|
45 |
# Default modules and settings.
|
46 |
DEFAULT_DB_MODULE = 'pithos.backends.lib.sqlalchemy'
|
47 |
DEFAULT_DB_CONNECTION = 'sqlite:///backend.db'
|
48 |
DEFAULT_BLOCK_MODULE = 'pithos.backends.lib.hashfiler'
|
49 |
DEFAULT_BLOCK_PATH = 'data/'
|
50 |
#DEFAULT_QUEUE_MODULE = 'pithos.backends.lib.rabbitmq'
|
51 |
#DEFAULT_QUEUE_CONNECTION = 'rabbitmq://guest:guest@localhost:5672/pithos'
|
52 |
|
53 |
QUEUE_MESSAGE_KEY = '#'
|
54 |
QUEUE_CLIENT_ID = 2 # Pithos. |
55 |
|
56 |
( CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED ) = range(3) |
57 |
|
58 |
inf = float('inf') |
59 |
|
60 |
ULTIMATE_ANSWER = 42
|
61 |
|
62 |
|
63 |
logger = logging.getLogger(__name__) |
64 |
|
65 |
|
66 |
def backend_method(func=None, autocommit=1): |
67 |
if func is None: |
68 |
def fn(func): |
69 |
return backend_method(func, autocommit)
|
70 |
return fn
|
71 |
|
72 |
if not autocommit: |
73 |
return func
|
74 |
def fn(self, *args, **kw): |
75 |
self.wrapper.execute()
|
76 |
try:
|
77 |
ret = func(self, *args, **kw)
|
78 |
self.wrapper.commit()
|
79 |
return ret
|
80 |
except:
|
81 |
self.wrapper.rollback()
|
82 |
raise
|
83 |
return fn
|
84 |
|
85 |
|
86 |
class ModularBackend(BaseBackend): |
87 |
"""A modular backend.
|
88 |
|
89 |
Uses modules for SQL functions and storage.
|
90 |
"""
|
91 |
|
92 |
def __init__(self, db_module=None, db_connection=None, |
93 |
block_module=None, block_path=None, |
94 |
queue_module=None, queue_connection=None): |
95 |
db_module = db_module or DEFAULT_DB_MODULE
|
96 |
db_connection = db_connection or DEFAULT_DB_CONNECTION
|
97 |
block_module = block_module or DEFAULT_BLOCK_MODULE
|
98 |
block_path = block_path or DEFAULT_BLOCK_PATH
|
99 |
#queue_module = queue_module or DEFAULT_QUEUE_MODULE
|
100 |
#queue_connection = queue_connection or DEFAULT_QUEUE_CONNECTION
|
101 |
|
102 |
self.hash_algorithm = 'sha256' |
103 |
self.block_size = 4 * 1024 * 1024 # 4MB |
104 |
|
105 |
self.default_policy = {'quota': DEFAULT_QUOTA, 'versioning': DEFAULT_VERSIONING} |
106 |
|
107 |
def load_module(m): |
108 |
__import__(m)
|
109 |
return sys.modules[m]
|
110 |
|
111 |
self.db_module = load_module(db_module)
|
112 |
self.wrapper = self.db_module.DBWrapper(db_connection) |
113 |
params = {'wrapper': self.wrapper} |
114 |
self.permissions = self.db_module.Permissions(**params) |
115 |
for x in ['READ', 'WRITE']: |
116 |
setattr(self, x, getattr(self.db_module, x)) |
117 |
self.node = self.db_module.Node(**params) |
118 |
for x in ['ROOTNODE', 'SERIAL', 'HASH', 'SIZE', 'TYPE', 'MTIME', 'MUSER', 'UUID', 'CHECKSUM', 'CLUSTER', 'MATCH_PREFIX', 'MATCH_EXACT']: |
119 |
setattr(self, x, getattr(self.db_module, x)) |
120 |
|
121 |
self.block_module = load_module(block_module)
|
122 |
params = {'path': block_path,
|
123 |
'block_size': self.block_size, |
124 |
'hash_algorithm': self.hash_algorithm} |
125 |
self.store = self.block_module.Store(**params) |
126 |
|
127 |
if queue_module and queue_connection: |
128 |
self.queue_module = load_module(queue_module)
|
129 |
params = {'exchange': queue_connection,
|
130 |
'message_key': QUEUE_MESSAGE_KEY,
|
131 |
'client_id': QUEUE_CLIENT_ID}
|
132 |
self.queue = self.queue_module.Queue(**params) |
133 |
else:
|
134 |
class NoQueue: |
135 |
def send(self, *args): |
136 |
pass
|
137 |
|
138 |
def close(self): |
139 |
pass
|
140 |
|
141 |
self.queue = NoQueue()
|
142 |
|
143 |
def close(self): |
144 |
self.wrapper.close()
|
145 |
self.queue.close()
|
146 |
|
147 |
@backend_method
|
148 |
def list_accounts(self, user, marker=None, limit=10000): |
149 |
"""Return a list of accounts the user can access."""
|
150 |
|
151 |
logger.debug("list_accounts: %s %s %s", user, marker, limit)
|
152 |
allowed = self._allowed_accounts(user)
|
153 |
start, limit = self._list_limits(allowed, marker, limit)
|
154 |
return allowed[start:start + limit]
|
155 |
|
156 |
@backend_method
|
157 |
def get_account_meta(self, user, account, domain, until=None): |
158 |
"""Return a dictionary with the account metadata for the domain."""
|
159 |
|
160 |
logger.debug("get_account_meta: %s %s %s", account, domain, until)
|
161 |
path, node = self._lookup_account(account, user == account)
|
162 |
if user != account:
|
163 |
if until or node is None or account not in self._allowed_accounts(user): |
164 |
raise NotAllowedError
|
165 |
try:
|
166 |
props = self._get_properties(node, until)
|
167 |
mtime = props[self.MTIME]
|
168 |
except NameError: |
169 |
props = None
|
170 |
mtime = until |
171 |
count, bytes, tstamp = self._get_statistics(node, until) |
172 |
tstamp = max(tstamp, mtime)
|
173 |
if until is None: |
174 |
modified = tstamp |
175 |
else:
|
176 |
modified = self._get_statistics(node)[2] # Overall last modification. |
177 |
modified = max(modified, mtime)
|
178 |
|
179 |
if user != account:
|
180 |
meta = {'name': account}
|
181 |
else:
|
182 |
meta = {} |
183 |
if props is not None: |
184 |
meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain))) |
185 |
if until is not None: |
186 |
meta.update({'until_timestamp': tstamp})
|
187 |
meta.update({'name': account, 'count': count, 'bytes': bytes}) |
188 |
meta.update({'modified': modified})
|
189 |
return meta
|
190 |
|
191 |
@backend_method
|
192 |
def update_account_meta(self, user, account, domain, meta, replace=False): |
193 |
"""Update the metadata associated with the account for the domain."""
|
194 |
|
195 |
logger.debug("update_account_meta: %s %s %s %s", account, domain, meta, replace)
|
196 |
if user != account:
|
197 |
raise NotAllowedError
|
198 |
path, node = self._lookup_account(account, True) |
199 |
self._put_metadata(user, node, domain, meta, replace)
|
200 |
|
201 |
@backend_method
|
202 |
def get_account_groups(self, user, account): |
203 |
"""Return a dictionary with the user groups defined for this account."""
|
204 |
|
205 |
logger.debug("get_account_groups: %s", account)
|
206 |
if user != account:
|
207 |
if account not in self._allowed_accounts(user): |
208 |
raise NotAllowedError
|
209 |
return {}
|
210 |
self._lookup_account(account, True) |
211 |
return self.permissions.group_dict(account) |
212 |
|
213 |
@backend_method
|
214 |
def update_account_groups(self, user, account, groups, replace=False): |
215 |
"""Update the groups associated with the account."""
|
216 |
|
217 |
logger.debug("update_account_groups: %s %s %s", account, groups, replace)
|
218 |
if user != account:
|
219 |
raise NotAllowedError
|
220 |
self._lookup_account(account, True) |
221 |
self._check_groups(groups)
|
222 |
if replace:
|
223 |
self.permissions.group_destroy(account)
|
224 |
for k, v in groups.iteritems(): |
225 |
if not replace: # If not already deleted. |
226 |
self.permissions.group_delete(account, k)
|
227 |
if v:
|
228 |
self.permissions.group_addmany(account, k, v)
|
229 |
|
230 |
@backend_method
|
231 |
def get_account_policy(self, user, account): |
232 |
"""Return a dictionary with the account policy."""
|
233 |
|
234 |
logger.debug("get_account_policy: %s", account)
|
235 |
if user != account:
|
236 |
if account not in self._allowed_accounts(user): |
237 |
raise NotAllowedError
|
238 |
return {}
|
239 |
path, node = self._lookup_account(account, True) |
240 |
return self._get_policy(node) |
241 |
|
242 |
@backend_method
|
243 |
def update_account_policy(self, user, account, policy, replace=False): |
244 |
"""Update the policy associated with the account."""
|
245 |
|
246 |
logger.debug("update_account_policy: %s %s %s", account, policy, replace)
|
247 |
if user != account:
|
248 |
raise NotAllowedError
|
249 |
path, node = self._lookup_account(account, True) |
250 |
self._check_policy(policy)
|
251 |
self._put_policy(node, policy, replace)
|
252 |
|
253 |
@backend_method
|
254 |
def put_account(self, user, account, policy={}): |
255 |
"""Create a new account with the given name."""
|
256 |
|
257 |
logger.debug("put_account: %s %s", account, policy)
|
258 |
if user != account:
|
259 |
raise NotAllowedError
|
260 |
node = self.node.node_lookup(account)
|
261 |
if node is not None: |
262 |
raise NameError('Account already exists') |
263 |
if policy:
|
264 |
self._check_policy(policy)
|
265 |
node = self._put_path(user, self.ROOTNODE, account) |
266 |
self._put_policy(node, policy, True) |
267 |
|
268 |
@backend_method
|
269 |
def delete_account(self, user, account): |
270 |
"""Delete the account with the given name."""
|
271 |
|
272 |
logger.debug("delete_account: %s", account)
|
273 |
if user != account:
|
274 |
raise NotAllowedError
|
275 |
node = self.node.node_lookup(account)
|
276 |
if node is None: |
277 |
return
|
278 |
if not self.node.node_remove(node): |
279 |
raise IndexError('Account is not empty') |
280 |
self.permissions.group_destroy(account)
|
281 |
|
282 |
@backend_method
|
283 |
def list_containers(self, user, account, marker=None, limit=10000, shared=False, until=None): |
284 |
"""Return a list of containers existing under an account."""
|
285 |
|
286 |
logger.debug("list_containers: %s %s %s %s %s", account, marker, limit, shared, until)
|
287 |
if user != account:
|
288 |
if until or account not in self._allowed_accounts(user): |
289 |
raise NotAllowedError
|
290 |
allowed = self._allowed_containers(user, account)
|
291 |
start, limit = self._list_limits(allowed, marker, limit)
|
292 |
return allowed[start:start + limit]
|
293 |
if shared:
|
294 |
allowed = [x.split('/', 2)[1] for x in self.permissions.access_list_shared(account)] |
295 |
allowed = list(set(allowed)) |
296 |
start, limit = self._list_limits(allowed, marker, limit)
|
297 |
return allowed[start:start + limit]
|
298 |
node = self.node.node_lookup(account)
|
299 |
return [x[0] for x in self._list_object_properties(node, account, '', '/', marker, limit, False, None, [], until)] |
300 |
|
301 |
@backend_method
|
302 |
def list_container_meta(self, user, account, container, domain, until=None): |
303 |
"""Return a list with all the container's object meta keys for the domain."""
|
304 |
|
305 |
logger.debug("list_container_meta: %s %s %s %s", account, container, domain, until)
|
306 |
allowed = [] |
307 |
if user != account:
|
308 |
if until:
|
309 |
raise NotAllowedError
|
310 |
allowed = self.permissions.access_list_paths(user, '/'.join((account, container))) |
311 |
if not allowed: |
312 |
raise NotAllowedError
|
313 |
path, node = self._lookup_container(account, container)
|
314 |
before = until if until is not None else inf |
315 |
allowed = self._get_formatted_paths(allowed)
|
316 |
return self.node.latest_attribute_keys(node, domain, before, CLUSTER_DELETED, allowed) |
317 |
|
318 |
@backend_method
|
319 |
def get_container_meta(self, user, account, container, domain, until=None): |
320 |
"""Return a dictionary with the container metadata for the domain."""
|
321 |
|
322 |
logger.debug("get_container_meta: %s %s %s %s", account, container, domain, until)
|
323 |
if user != account:
|
324 |
if until or container not in self._allowed_containers(user, account): |
325 |
raise NotAllowedError
|
326 |
path, node = self._lookup_container(account, container)
|
327 |
props = self._get_properties(node, until)
|
328 |
mtime = props[self.MTIME]
|
329 |
count, bytes, tstamp = self._get_statistics(node, until) |
330 |
tstamp = max(tstamp, mtime)
|
331 |
if until is None: |
332 |
modified = tstamp |
333 |
else:
|
334 |
modified = self._get_statistics(node)[2] # Overall last modification. |
335 |
modified = max(modified, mtime)
|
336 |
|
337 |
if user != account:
|
338 |
meta = {'name': container}
|
339 |
else:
|
340 |
meta = dict(self.node.attribute_get(props[self.SERIAL], domain)) |
341 |
if until is not None: |
342 |
meta.update({'until_timestamp': tstamp})
|
343 |
meta.update({'name': container, 'count': count, 'bytes': bytes}) |
344 |
meta.update({'modified': modified})
|
345 |
return meta
|
346 |
|
347 |
@backend_method
|
348 |
def update_container_meta(self, user, account, container, domain, meta, replace=False): |
349 |
"""Update the metadata associated with the container for the domain."""
|
350 |
|
351 |
logger.debug("update_container_meta: %s %s %s %s %s", account, container, domain, meta, replace)
|
352 |
if user != account:
|
353 |
raise NotAllowedError
|
354 |
path, node = self._lookup_container(account, container)
|
355 |
self._put_metadata(user, node, domain, meta, replace)
|
356 |
|
357 |
@backend_method
|
358 |
def get_container_policy(self, user, account, container): |
359 |
"""Return a dictionary with the container policy."""
|
360 |
|
361 |
logger.debug("get_container_policy: %s %s", account, container)
|
362 |
if user != account:
|
363 |
if container not in self._allowed_containers(user, account): |
364 |
raise NotAllowedError
|
365 |
return {}
|
366 |
path, node = self._lookup_container(account, container)
|
367 |
return self._get_policy(node) |
368 |
|
369 |
@backend_method
|
370 |
def update_container_policy(self, user, account, container, policy, replace=False): |
371 |
"""Update the policy associated with the container."""
|
372 |
|
373 |
logger.debug("update_container_policy: %s %s %s %s", account, container, policy, replace)
|
374 |
if user != account:
|
375 |
raise NotAllowedError
|
376 |
path, node = self._lookup_container(account, container)
|
377 |
self._check_policy(policy)
|
378 |
self._put_policy(node, policy, replace)
|
379 |
|
380 |
@backend_method
|
381 |
def put_container(self, user, account, container, policy={}): |
382 |
"""Create a new container with the given name."""
|
383 |
|
384 |
logger.debug("put_container: %s %s %s", account, container, policy)
|
385 |
if user != account:
|
386 |
raise NotAllowedError
|
387 |
try:
|
388 |
path, node = self._lookup_container(account, container)
|
389 |
except NameError: |
390 |
pass
|
391 |
else:
|
392 |
raise NameError('Container already exists') |
393 |
if policy:
|
394 |
self._check_policy(policy)
|
395 |
path = '/'.join((account, container))
|
396 |
node = self._put_path(user, self._lookup_account(account, True)[1], path) |
397 |
self._put_policy(node, policy, True) |
398 |
|
399 |
@backend_method
|
400 |
def delete_container(self, user, account, container, until=None): |
401 |
"""Delete/purge the container with the given name."""
|
402 |
|
403 |
logger.debug("delete_container: %s %s %s", account, container, until)
|
404 |
if user != account:
|
405 |
raise NotAllowedError
|
406 |
path, node = self._lookup_container(account, container)
|
407 |
|
408 |
if until is not None: |
409 |
hashes, size = self.node.node_purge_children(node, until, CLUSTER_HISTORY)
|
410 |
for h in hashes: |
411 |
self.store.map_delete(h)
|
412 |
self.node.node_purge_children(node, until, CLUSTER_DELETED)
|
413 |
self._report_size_change(user, account, -size, {'action': 'container purge'}) |
414 |
return
|
415 |
|
416 |
if self._get_statistics(node)[0] > 0: |
417 |
raise IndexError('Container is not empty') |
418 |
hashes, size = self.node.node_purge_children(node, inf, CLUSTER_HISTORY)
|
419 |
for h in hashes: |
420 |
self.store.map_delete(h)
|
421 |
self.node.node_purge_children(node, inf, CLUSTER_DELETED)
|
422 |
self.node.node_remove(node)
|
423 |
self._report_size_change(user, account, -size, {'action': 'container delete'}) |
424 |
|
425 |
def _list_objects(self, user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, all_props): |
426 |
if user != account and until: |
427 |
raise NotAllowedError
|
428 |
allowed = self._list_object_permissions(user, account, container, prefix, shared)
|
429 |
path, node = self._lookup_container(account, container)
|
430 |
allowed = self._get_formatted_paths(allowed)
|
431 |
return self._list_object_properties(node, path, prefix, delimiter, marker, limit, virtual, domain, keys, until, size_range, allowed, all_props) |
432 |
|
433 |
def _list_object_permissions(self, user, account, container, prefix, shared): |
434 |
allowed = [] |
435 |
if user != account:
|
436 |
allowed = self.permissions.access_list_paths(user, '/'.join((account, container, prefix))) |
437 |
if not allowed: |
438 |
raise NotAllowedError
|
439 |
else:
|
440 |
if shared:
|
441 |
allowed = self.permissions.access_list_shared('/'.join((account, container, prefix))) |
442 |
if not allowed: |
443 |
return []
|
444 |
return allowed
|
445 |
|
446 |
@backend_method
|
447 |
def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None): |
448 |
"""Return a list of object (name, version_id) tuples existing under a container."""
|
449 |
|
450 |
logger.debug("list_objects: %s %s %s %s %s %s %s %s %s %s %s %s", account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range)
|
451 |
return self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, False) |
452 |
|
453 |
@backend_method
|
454 |
def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None): |
455 |
"""Return a list of object metadata dicts existing under a container."""
|
456 |
|
457 |
logger.debug("list_object_meta: %s %s %s %s %s %s %s %s %s %s %s %s", account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range)
|
458 |
props = self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, True) |
459 |
objects = [] |
460 |
for p in props: |
461 |
if len(p) == 2: |
462 |
objects.append({'subdir': p[0]}) |
463 |
else:
|
464 |
objects.append({'name': p[0], |
465 |
'bytes': p[self.SIZE + 1], |
466 |
'type': p[self.TYPE + 1], |
467 |
'hash': p[self.HASH + 1], |
468 |
'version': p[self.SERIAL + 1], |
469 |
'version_timestamp': p[self.MTIME + 1], |
470 |
'modified': p[self.MTIME + 1] if until is None else None, |
471 |
'modified_by': p[self.MUSER + 1], |
472 |
'uuid': p[self.UUID + 1], |
473 |
'checksum': p[self.CHECKSUM + 1]}) |
474 |
return objects
|
475 |
|
476 |
@backend_method
|
477 |
def list_object_permissions(self, user, account, container, prefix=''): |
478 |
"""Return a list of paths that enforce permissions under a container."""
|
479 |
|
480 |
logger.debug("list_object_permissions: %s %s %s", account, container, prefix)
|
481 |
return self._list_object_permissions(user, account, container, prefix, True) |
482 |
|
483 |
@backend_method
|
484 |
def list_object_public(self, user, account, container, prefix=''): |
485 |
"""Return a dict mapping paths to public ids for objects that are public under a container."""
|
486 |
|
487 |
logger.debug("list_object_public: %s %s %s", account, container, prefix)
|
488 |
public = {} |
489 |
for path, p in self.permissions.public_list('/'.join((account, container, prefix))): |
490 |
public[path] = p + ULTIMATE_ANSWER |
491 |
return public
|
492 |
|
493 |
@backend_method
|
494 |
def get_object_meta(self, user, account, container, name, domain, version=None): |
495 |
"""Return a dictionary with the object metadata for the domain."""
|
496 |
|
497 |
logger.debug("get_object_meta: %s %s %s %s %s", account, container, name, domain, version)
|
498 |
self._can_read(user, account, container, name)
|
499 |
path, node = self._lookup_object(account, container, name)
|
500 |
props = self._get_version(node, version)
|
501 |
if version is None: |
502 |
modified = props[self.MTIME]
|
503 |
else:
|
504 |
try:
|
505 |
modified = self._get_version(node)[self.MTIME] # Overall last modification. |
506 |
except NameError: # Object may be deleted. |
507 |
del_props = self.node.version_lookup(node, inf, CLUSTER_DELETED)
|
508 |
if del_props is None: |
509 |
raise NameError('Object does not exist') |
510 |
modified = del_props[self.MTIME]
|
511 |
|
512 |
meta = dict(self.node.attribute_get(props[self.SERIAL], domain)) |
513 |
meta.update({'name': name,
|
514 |
'bytes': props[self.SIZE], |
515 |
'type': props[self.TYPE], |
516 |
'hash': props[self.HASH], |
517 |
'version': props[self.SERIAL], |
518 |
'version_timestamp': props[self.MTIME], |
519 |
'modified': modified,
|
520 |
'modified_by': props[self.MUSER], |
521 |
'uuid': props[self.UUID], |
522 |
'checksum': props[self.CHECKSUM]}) |
523 |
return meta
|
524 |
|
525 |
@backend_method
|
526 |
def update_object_meta(self, user, account, container, name, domain, meta, replace=False): |
527 |
"""Update the metadata associated with the object for the domain and return the new version."""
|
528 |
|
529 |
logger.debug("update_object_meta: %s %s %s %s %s %s", account, container, name, domain, meta, replace)
|
530 |
self._can_write(user, account, container, name)
|
531 |
path, node = self._lookup_object(account, container, name)
|
532 |
src_version_id, dest_version_id = self._put_metadata(user, node, domain, meta, replace)
|
533 |
self._apply_versioning(account, container, src_version_id)
|
534 |
return dest_version_id
|
535 |
|
536 |
@backend_method
|
537 |
def get_object_permissions(self, user, account, container, name): |
538 |
"""Return the action allowed on the object, the path
|
539 |
from which the object gets its permissions from,
|
540 |
along with a dictionary containing the permissions."""
|
541 |
|
542 |
logger.debug("get_object_permissions: %s %s %s", account, container, name)
|
543 |
allowed = 'write'
|
544 |
permissions_path = self._get_permissions_path(account, container, name)
|
545 |
if user != account:
|
546 |
if self.permissions.access_check(permissions_path, self.WRITE, user): |
547 |
allowed = 'write'
|
548 |
elif self.permissions.access_check(permissions_path, self.READ, user): |
549 |
allowed = 'read'
|
550 |
else:
|
551 |
raise NotAllowedError
|
552 |
self._lookup_object(account, container, name)
|
553 |
return (allowed, permissions_path, self.permissions.access_get(permissions_path)) |
554 |
|
555 |
@backend_method
|
556 |
def update_object_permissions(self, user, account, container, name, permissions): |
557 |
"""Update the permissions associated with the object."""
|
558 |
|
559 |
logger.debug("update_object_permissions: %s %s %s %s", account, container, name, permissions)
|
560 |
if user != account:
|
561 |
raise NotAllowedError
|
562 |
path = self._lookup_object(account, container, name)[0] |
563 |
self._check_permissions(path, permissions)
|
564 |
self.permissions.access_set(path, permissions)
|
565 |
|
566 |
@backend_method
|
567 |
def get_object_public(self, user, account, container, name): |
568 |
"""Return the public id of the object if applicable."""
|
569 |
|
570 |
logger.debug("get_object_public: %s %s %s", account, container, name)
|
571 |
self._can_read(user, account, container, name)
|
572 |
path = self._lookup_object(account, container, name)[0] |
573 |
p = self.permissions.public_get(path)
|
574 |
if p is not None: |
575 |
p += ULTIMATE_ANSWER |
576 |
return p
|
577 |
|
578 |
@backend_method
|
579 |
def update_object_public(self, user, account, container, name, public): |
580 |
"""Update the public status of the object."""
|
581 |
|
582 |
logger.debug("update_object_public: %s %s %s %s", account, container, name, public)
|
583 |
self._can_write(user, account, container, name)
|
584 |
path = self._lookup_object(account, container, name)[0] |
585 |
if not public: |
586 |
self.permissions.public_unset(path)
|
587 |
else:
|
588 |
self.permissions.public_set(path)
|
589 |
|
590 |
@backend_method
|
591 |
def get_object_hashmap(self, user, account, container, name, version=None): |
592 |
"""Return the object's size and a list with partial hashes."""
|
593 |
|
594 |
logger.debug("get_object_hashmap: %s %s %s %s", account, container, name, version)
|
595 |
self._can_read(user, account, container, name)
|
596 |
path, node = self._lookup_object(account, container, name)
|
597 |
props = self._get_version(node, version)
|
598 |
hashmap = self.store.map_get(binascii.unhexlify(props[self.HASH])) |
599 |
return props[self.SIZE], [binascii.hexlify(x) for x in hashmap] |
600 |
|
601 |
def _update_object_hash(self, user, account, container, name, size, type, hash, checksum, permissions, src_node=None, is_copy=False): |
602 |
if permissions is not None and user != account: |
603 |
raise NotAllowedError
|
604 |
self._can_write(user, account, container, name)
|
605 |
if permissions is not None: |
606 |
path = '/'.join((account, container, name))
|
607 |
self._check_permissions(path, permissions)
|
608 |
|
609 |
account_path, account_node = self._lookup_account(account, True) |
610 |
container_path, container_node = self._lookup_container(account, container)
|
611 |
path, node = self._put_object_node(container_path, container_node, name)
|
612 |
pre_version_id, dest_version_id = self._put_version_duplicate(user, node, src_node=src_node, size=size, type=type, hash=hash, checksum=checksum, is_copy=is_copy) |
613 |
|
614 |
# Check quota.
|
615 |
del_size = self._apply_versioning(account, container, pre_version_id)
|
616 |
size_delta = size - del_size |
617 |
if size_delta > 0: |
618 |
account_quota = long(self._get_policy(account_node)['quota']) |
619 |
container_quota = long(self._get_policy(container_node)['quota']) |
620 |
if (account_quota > 0 and self._get_statistics(account_node)[1] + size_delta > account_quota) or \ |
621 |
(container_quota > 0 and self._get_statistics(container_node)[1] + size_delta > container_quota): |
622 |
# This must be executed in a transaction, so the version is never created if it fails.
|
623 |
raise QuotaError
|
624 |
self._report_size_change(user, account, size_delta, {'action': 'object update'}) |
625 |
|
626 |
if permissions is not None: |
627 |
self.permissions.access_set(path, permissions)
|
628 |
return pre_version_id, dest_version_id
|
629 |
|
630 |
@backend_method
|
631 |
def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta={}, replace_meta=False, permissions=None): |
632 |
"""Create/update an object with the specified size and partial hashes."""
|
633 |
|
634 |
logger.debug("update_object_hashmap: %s %s %s %s %s %s %s", account, container, name, size, type, hashmap, checksum) |
635 |
if size == 0: # No such thing as an empty hashmap. |
636 |
hashmap = [self.put_block('')] |
637 |
map = HashMap(self.block_size, self.hash_algorithm) |
638 |
map.extend([binascii.unhexlify(x) for x in hashmap]) |
639 |
missing = self.store.block_search(map) |
640 |
if missing:
|
641 |
ie = IndexError()
|
642 |
ie.data = [binascii.hexlify(x) for x in missing] |
643 |
raise ie
|
644 |
|
645 |
hash = map.hash()
|
646 |
pre_version_id, dest_version_id = self._update_object_hash(user, account, container, name, size, type, binascii.hexlify(hash), checksum, permissions) |
647 |
self._put_metadata_duplicate(pre_version_id, dest_version_id, domain, meta, replace_meta)
|
648 |
self.store.map_put(hash, map) |
649 |
return dest_version_id
|
650 |
|
651 |
@backend_method
|
652 |
def update_object_checksum(self, user, account, container, name, version, checksum): |
653 |
"""Update an object's checksum."""
|
654 |
|
655 |
logger.debug("update_object_checksum: %s %s %s %s %s", account, container, name, version, checksum)
|
656 |
# Update objects with greater version and same hashmap and size (fix metadata updates).
|
657 |
self._can_write(user, account, container, name)
|
658 |
path, node = self._lookup_object(account, container, name)
|
659 |
props = self._get_version(node, version)
|
660 |
versions = self.node.node_get_versions(node)
|
661 |
for x in versions: |
662 |
if x[self.SERIAL] >= int(version) and x[self.HASH] == props[self.HASH] and x[self.SIZE] == props[self.SIZE]: |
663 |
self.node.version_put_property(x[self.SERIAL], 'checksum', checksum) |
664 |
|
665 |
def _copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, dest_domain=None, dest_meta={}, replace_meta=False, permissions=None, src_version=None, is_move=False): |
666 |
self._can_read(user, src_account, src_container, src_name)
|
667 |
path, node = self._lookup_object(src_account, src_container, src_name)
|
668 |
# TODO: Will do another fetch of the properties in duplicate version...
|
669 |
props = self._get_version(node, src_version) # Check to see if source exists. |
670 |
src_version_id = props[self.SERIAL]
|
671 |
hash = props[self.HASH]
|
672 |
size = props[self.SIZE]
|
673 |
|
674 |
is_copy = not is_move and (src_account, src_container, src_name) != (dest_account, dest_container, dest_name) # New uuid. |
675 |
pre_version_id, dest_version_id = self._update_object_hash(user, dest_account, dest_container, dest_name, size, type, hash, None, permissions, src_node=node, is_copy=is_copy) |
676 |
self._put_metadata_duplicate(src_version_id, dest_version_id, dest_domain, dest_meta, replace_meta)
|
677 |
return dest_version_id
|
678 |
|
679 |
@backend_method
|
680 |
def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, src_version=None): |
681 |
"""Copy an object's data and metadata."""
|
682 |
|
683 |
logger.debug("copy_object: %s %s %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version) |
684 |
dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version, False) |
685 |
return dest_version_id
|
686 |
|
687 |
@backend_method
|
688 |
def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None): |
689 |
"""Move an object's data and metadata."""
|
690 |
|
691 |
logger.debug("move_object: %s %s %s %s %s %s %s %s %s %s %s", src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions) |
692 |
if user != src_account:
|
693 |
raise NotAllowedError
|
694 |
dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, None, True) |
695 |
if (src_account, src_container, src_name) != (dest_account, dest_container, dest_name):
|
696 |
self._delete_object(user, src_account, src_container, src_name)
|
697 |
return dest_version_id
|
698 |
|
699 |
def _delete_object(self, user, account, container, name, until=None): |
700 |
if user != account:
|
701 |
raise NotAllowedError
|
702 |
|
703 |
if until is not None: |
704 |
path = '/'.join((account, container, name))
|
705 |
node = self.node.node_lookup(path)
|
706 |
if node is None: |
707 |
return
|
708 |
hashes = [] |
709 |
size = 0
|
710 |
h, s = self.node.node_purge(node, until, CLUSTER_NORMAL)
|
711 |
hashes += h |
712 |
size += s |
713 |
h, s = self.node.node_purge(node, until, CLUSTER_HISTORY)
|
714 |
hashes += h |
715 |
size += s |
716 |
for h in hashes: |
717 |
self.store.map_delete(h)
|
718 |
self.node.node_purge(node, until, CLUSTER_DELETED)
|
719 |
try:
|
720 |
props = self._get_version(node)
|
721 |
except NameError: |
722 |
self.permissions.access_clear(path)
|
723 |
self._report_size_change(user, account, -size, {'action': 'object purge'}) |
724 |
return
|
725 |
|
726 |
path, node = self._lookup_object(account, container, name)
|
727 |
src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED) |
728 |
del_size = self._apply_versioning(account, container, src_version_id)
|
729 |
if del_size:
|
730 |
self._report_size_change(user, account, -del_size, {'action': 'object delete'}) |
731 |
self.permissions.access_clear(path)
|
732 |
|
733 |
@backend_method
|
734 |
def delete_object(self, user, account, container, name, until=None): |
735 |
"""Delete/purge an object."""
|
736 |
|
737 |
logger.debug("delete_object: %s %s %s %s", account, container, name, until)
|
738 |
self._delete_object(user, account, container, name, until)
|
739 |
|
740 |
@backend_method
|
741 |
def list_versions(self, user, account, container, name): |
742 |
"""Return a list of all (version, version_timestamp) tuples for an object."""
|
743 |
|
744 |
logger.debug("list_versions: %s %s %s", account, container, name)
|
745 |
self._can_read(user, account, container, name)
|
746 |
path, node = self._lookup_object(account, container, name)
|
747 |
versions = self.node.node_get_versions(node)
|
748 |
return [[x[self.SERIAL], x[self.MTIME]] for x in versions if x[self.CLUSTER] != CLUSTER_DELETED] |
749 |
|
750 |
@backend_method
|
751 |
def get_uuid(self, user, uuid): |
752 |
"""Return the (account, container, name) for the UUID given."""
|
753 |
|
754 |
logger.debug("get_uuid: %s", uuid)
|
755 |
info = self.node.latest_uuid(uuid)
|
756 |
if info is None: |
757 |
raise NameError |
758 |
path, serial = info |
759 |
account, container, name = path.split('/', 2) |
760 |
self._can_read(user, account, container, name)
|
761 |
return (account, container, name)
|
762 |
|
763 |
@backend_method
|
764 |
def get_public(self, user, public): |
765 |
"""Return the (account, container, name) for the public id given."""
|
766 |
|
767 |
logger.debug("get_public: %s", public)
|
768 |
if public is None or public < ULTIMATE_ANSWER: |
769 |
raise NameError |
770 |
path = self.permissions.public_path(public - ULTIMATE_ANSWER)
|
771 |
if path is None: |
772 |
raise NameError |
773 |
account, container, name = path.split('/', 2) |
774 |
self._can_read(user, account, container, name)
|
775 |
return (account, container, name)
|
776 |
|
777 |
@backend_method(autocommit=0) |
778 |
def get_block(self, hash): |
779 |
"""Return a block's data."""
|
780 |
|
781 |
logger.debug("get_block: %s", hash) |
782 |
block = self.store.block_get(binascii.unhexlify(hash)) |
783 |
if not block: |
784 |
raise NameError('Block does not exist') |
785 |
return block
|
786 |
|
787 |
@backend_method(autocommit=0) |
788 |
def put_block(self, data): |
789 |
"""Store a block and return the hash."""
|
790 |
|
791 |
logger.debug("put_block: %s", len(data)) |
792 |
return binascii.hexlify(self.store.block_put(data)) |
793 |
|
794 |
@backend_method(autocommit=0) |
795 |
def update_block(self, hash, data, offset=0): |
796 |
"""Update a known block and return the hash."""
|
797 |
|
798 |
logger.debug("update_block: %s %s %s", hash, len(data), offset) |
799 |
if offset == 0 and len(data) == self.block_size: |
800 |
return self.put_block(data) |
801 |
h = self.store.block_update(binascii.unhexlify(hash), offset, data) |
802 |
return binascii.hexlify(h)
|
803 |
|
804 |
# Path functions.
|
805 |
|
806 |
def _generate_uuid(self): |
807 |
return str(uuidlib.uuid4()) |
808 |
|
809 |
def _put_object_node(self, path, parent, name): |
810 |
path = '/'.join((path, name))
|
811 |
node = self.node.node_lookup(path)
|
812 |
if node is None: |
813 |
node = self.node.node_create(parent, path)
|
814 |
return path, node
|
815 |
|
816 |
def _put_path(self, user, parent, path): |
817 |
node = self.node.node_create(parent, path)
|
818 |
self.node.version_create(node, None, 0, '', None, user, self._generate_uuid(), '', CLUSTER_NORMAL) |
819 |
return node
|
820 |
|
821 |
def _lookup_account(self, account, create=True): |
822 |
node = self.node.node_lookup(account)
|
823 |
if node is None and create: |
824 |
node = self._put_path(account, self.ROOTNODE, account) # User is account. |
825 |
return account, node
|
826 |
|
827 |
def _lookup_container(self, account, container): |
828 |
path = '/'.join((account, container))
|
829 |
node = self.node.node_lookup(path)
|
830 |
if node is None: |
831 |
raise NameError('Container does not exist') |
832 |
return path, node
|
833 |
|
834 |
def _lookup_object(self, account, container, name): |
835 |
path = '/'.join((account, container, name))
|
836 |
node = self.node.node_lookup(path)
|
837 |
if node is None: |
838 |
raise NameError('Object does not exist') |
839 |
return path, node
|
840 |
|
841 |
def _get_properties(self, node, until=None): |
842 |
"""Return properties until the timestamp given."""
|
843 |
|
844 |
before = until if until is not None else inf |
845 |
props = self.node.version_lookup(node, before, CLUSTER_NORMAL)
|
846 |
if props is None and until is not None: |
847 |
props = self.node.version_lookup(node, before, CLUSTER_HISTORY)
|
848 |
if props is None: |
849 |
raise NameError('Path does not exist') |
850 |
return props
|
851 |
|
852 |
def _get_statistics(self, node, until=None): |
853 |
"""Return count, sum of size and latest timestamp of everything under node."""
|
854 |
|
855 |
if until is None: |
856 |
stats = self.node.statistics_get(node, CLUSTER_NORMAL)
|
857 |
else:
|
858 |
stats = self.node.statistics_latest(node, until, CLUSTER_DELETED)
|
859 |
if stats is None: |
860 |
stats = (0, 0, 0) |
861 |
return stats
|
862 |
|
863 |
def _get_version(self, node, version=None): |
864 |
if version is None: |
865 |
props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
|
866 |
if props is None: |
867 |
raise NameError('Object does not exist') |
868 |
else:
|
869 |
try:
|
870 |
version = int(version)
|
871 |
except ValueError: |
872 |
raise IndexError('Version does not exist') |
873 |
props = self.node.version_get_properties(version)
|
874 |
if props is None or props[self.CLUSTER] == CLUSTER_DELETED: |
875 |
raise IndexError('Version does not exist') |
876 |
return props
|
877 |
|
878 |
def _put_version_duplicate(self, user, node, src_node=None, size=None, type=None, hash=None, checksum=None, cluster=CLUSTER_NORMAL, is_copy=False): |
879 |
"""Create a new version of the node."""
|
880 |
|
881 |
props = self.node.version_lookup(node if src_node is None else src_node, inf, CLUSTER_NORMAL) |
882 |
if props is not None: |
883 |
src_version_id = props[self.SERIAL]
|
884 |
src_hash = props[self.HASH]
|
885 |
src_size = props[self.SIZE]
|
886 |
src_type = props[self.TYPE]
|
887 |
src_checksum = props[self.CHECKSUM]
|
888 |
else:
|
889 |
src_version_id = None
|
890 |
src_hash = None
|
891 |
src_size = 0
|
892 |
src_type = ''
|
893 |
src_checksum = ''
|
894 |
if size is None: # Set metadata. |
895 |
hash = src_hash # This way hash can be set to None (account or container).
|
896 |
size = src_size |
897 |
if type is None: |
898 |
type = src_type |
899 |
if checksum is None: |
900 |
checksum = src_checksum |
901 |
uuid = self._generate_uuid() if (is_copy or src_version_id is None) else props[self.UUID] |
902 |
|
903 |
if src_node is None: |
904 |
pre_version_id = src_version_id |
905 |
else:
|
906 |
pre_version_id = None
|
907 |
props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
|
908 |
if props is not None: |
909 |
pre_version_id = props[self.SERIAL]
|
910 |
if pre_version_id is not None: |
911 |
self.node.version_recluster(pre_version_id, CLUSTER_HISTORY)
|
912 |
|
913 |
dest_version_id, mtime = self.node.version_create(node, hash, size, type, src_version_id, user, uuid, checksum, cluster) |
914 |
return pre_version_id, dest_version_id
|
915 |
|
916 |
def _put_metadata_duplicate(self, src_version_id, dest_version_id, domain, meta, replace=False): |
917 |
if src_version_id is not None: |
918 |
self.node.attribute_copy(src_version_id, dest_version_id)
|
919 |
if not replace: |
920 |
self.node.attribute_del(dest_version_id, domain, (k for k, v in meta.iteritems() if v == '')) |
921 |
self.node.attribute_set(dest_version_id, domain, ((k, v) for k, v in meta.iteritems() if v != '')) |
922 |
else:
|
923 |
self.node.attribute_del(dest_version_id, domain)
|
924 |
self.node.attribute_set(dest_version_id, domain, ((k, v) for k, v in meta.iteritems())) |
925 |
|
926 |
def _put_metadata(self, user, node, domain, meta, replace=False): |
927 |
"""Create a new version and store metadata."""
|
928 |
|
929 |
src_version_id, dest_version_id = self._put_version_duplicate(user, node)
|
930 |
self._put_metadata_duplicate(src_version_id, dest_version_id, domain, meta, replace)
|
931 |
return src_version_id, dest_version_id
|
932 |
|
933 |
def _list_limits(self, listing, marker, limit): |
934 |
start = 0
|
935 |
if marker:
|
936 |
try:
|
937 |
start = listing.index(marker) + 1
|
938 |
except ValueError: |
939 |
pass
|
940 |
if not limit or limit > 10000: |
941 |
limit = 10000
|
942 |
return start, limit
|
943 |
|
944 |
def _list_object_properties(self, parent, path, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], until=None, size_range=None, allowed=[], all_props=False): |
945 |
cont_prefix = path + '/'
|
946 |
prefix = cont_prefix + prefix |
947 |
start = cont_prefix + marker if marker else None |
948 |
before = until if until is not None else inf |
949 |
filterq = keys if domain else [] |
950 |
sizeq = size_range |
951 |
|
952 |
objects, prefixes = self.node.latest_version_list(parent, prefix, delimiter, start, limit, before, CLUSTER_DELETED, allowed, domain, filterq, sizeq, all_props)
|
953 |
objects.extend([(p, None) for p in prefixes] if virtual else []) |
954 |
objects.sort(key=lambda x: x[0]) |
955 |
objects = [(x[0][len(cont_prefix):],) + x[1:] for x in objects] |
956 |
|
957 |
start, limit = self._list_limits([x[0] for x in objects], marker, limit) |
958 |
return objects[start:start + limit]
|
959 |
|
960 |
# Reporting functions.
|
961 |
|
962 |
def _report_size_change(self, user, account, size, details={}): |
963 |
logger.debug("_report_size_change: %s %s %s %s", user, account, size, details)
|
964 |
account_node = self._lookup_account(account, True)[1] |
965 |
total = self._get_statistics(account_node)[1] |
966 |
details.update({'user': user, 'total': total}) |
967 |
self.queue.send(account, 'diskspace', size, details) |
968 |
|
969 |
# Policy functions.
|
970 |
|
971 |
def _check_policy(self, policy): |
972 |
for k in policy.keys(): |
973 |
if policy[k] == '': |
974 |
policy[k] = self.default_policy.get(k)
|
975 |
for k, v in policy.iteritems(): |
976 |
if k == 'quota': |
977 |
q = int(v) # May raise ValueError. |
978 |
if q < 0: |
979 |
raise ValueError |
980 |
elif k == 'versioning': |
981 |
if v not in ['auto', 'none']: |
982 |
raise ValueError |
983 |
else:
|
984 |
raise ValueError |
985 |
|
986 |
def _put_policy(self, node, policy, replace): |
987 |
if replace:
|
988 |
for k, v in self.default_policy.iteritems(): |
989 |
if k not in policy: |
990 |
policy[k] = v |
991 |
self.node.policy_set(node, policy)
|
992 |
|
993 |
def _get_policy(self, node): |
994 |
policy = self.default_policy.copy()
|
995 |
policy.update(self.node.policy_get(node))
|
996 |
return policy
|
997 |
|
998 |
def _apply_versioning(self, account, container, version_id): |
999 |
"""Delete the provided version if such is the policy.
|
1000 |
Return size of object removed.
|
1001 |
"""
|
1002 |
|
1003 |
if version_id is None: |
1004 |
return 0 |
1005 |
path, node = self._lookup_container(account, container)
|
1006 |
versioning = self._get_policy(node)['versioning'] |
1007 |
if versioning != 'auto': |
1008 |
hash, size = self.node.version_remove(version_id) |
1009 |
self.store.map_delete(hash) |
1010 |
return size
|
1011 |
return 0 |
1012 |
|
1013 |
# Access control functions.
|
1014 |
|
1015 |
def _check_groups(self, groups): |
1016 |
# raise ValueError('Bad characters in groups')
|
1017 |
pass
|
1018 |
|
1019 |
def _check_permissions(self, path, permissions): |
1020 |
# raise ValueError('Bad characters in permissions')
|
1021 |
pass
|
1022 |
|
1023 |
def _get_formatted_paths(self, paths): |
1024 |
formatted = [] |
1025 |
for p in paths: |
1026 |
node = self.node.node_lookup(p)
|
1027 |
if node is not None: |
1028 |
props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
|
1029 |
if props is not None: |
1030 |
if props[self.TYPE] in ('application/directory', 'application/folder'): |
1031 |
formatted.append((p.rstrip('/') + '/', self.MATCH_PREFIX)) |
1032 |
formatted.append((p, self.MATCH_EXACT))
|
1033 |
return formatted
|
1034 |
|
1035 |
def _get_permissions_path(self, account, container, name): |
1036 |
path = '/'.join((account, container, name))
|
1037 |
permission_paths = self.permissions.access_inherit(path)
|
1038 |
permission_paths.sort() |
1039 |
permission_paths.reverse() |
1040 |
for p in permission_paths: |
1041 |
if p == path:
|
1042 |
return p
|
1043 |
else:
|
1044 |
if p.count('/') < 2: |
1045 |
continue
|
1046 |
node = self.node.node_lookup(p)
|
1047 |
if node is not None: |
1048 |
props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
|
1049 |
if props is not None: |
1050 |
if props[self.TYPE] in ('application/directory', 'application/folder'): |
1051 |
return p
|
1052 |
return None |
1053 |
|
1054 |
def _can_read(self, user, account, container, name): |
1055 |
if user == account:
|
1056 |
return True |
1057 |
path = '/'.join((account, container, name))
|
1058 |
if self.permissions.public_get(path) is not None: |
1059 |
return True |
1060 |
path = self._get_permissions_path(account, container, name)
|
1061 |
if not path: |
1062 |
raise NotAllowedError
|
1063 |
if not self.permissions.access_check(path, self.READ, user) and not self.permissions.access_check(path, self.WRITE, user): |
1064 |
raise NotAllowedError
|
1065 |
|
1066 |
def _can_write(self, user, account, container, name): |
1067 |
if user == account:
|
1068 |
return True |
1069 |
path = '/'.join((account, container, name))
|
1070 |
path = self._get_permissions_path(account, container, name)
|
1071 |
if not path: |
1072 |
raise NotAllowedError
|
1073 |
if not self.permissions.access_check(path, self.WRITE, user): |
1074 |
raise NotAllowedError
|
1075 |
|
1076 |
def _allowed_accounts(self, user): |
1077 |
allow = set()
|
1078 |
for path in self.permissions.access_list_paths(user): |
1079 |
allow.add(path.split('/', 1)[0]) |
1080 |
return sorted(allow) |
1081 |
|
1082 |
def _allowed_containers(self, user, account): |
1083 |
allow = set()
|
1084 |
for path in self.permissions.access_list_paths(user, account): |
1085 |
allow.add(path.split('/', 2)[1]) |
1086 |
return sorted(allow) |