Revision a9b3f29d
b/pithos/backends/__init__.py | ||
---|---|---|
34 | 34 |
from django.conf import settings |
35 | 35 |
|
36 | 36 |
from simple import SimpleBackend |
37 |
from modular import ModularBackend |
|
37 | 38 |
|
38 | 39 |
backend = None |
39 | 40 |
options = getattr(settings, 'BACKEND', None) |
b/pithos/backends/base.py | ||
---|---|---|
102 | 102 |
""" |
103 | 103 |
return |
104 | 104 |
|
105 |
def put_account(self, user, account): |
|
106 |
"""Create a new account with the given name. |
|
107 |
|
|
108 |
Raises: |
|
109 |
NotAllowedError: Operation not permitted |
|
110 |
""" |
|
111 |
return |
|
112 |
|
|
105 | 113 |
def delete_account(self, user, account): |
106 | 114 |
"""Delete the account with the given name. |
107 | 115 |
|
b/pithos/backends/lib/dbworker.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
|
|
35 |
class DBWorker(object): |
|
36 |
"""Database connection handler.""" |
|
37 |
|
|
38 |
def __init__(self, **params): |
|
39 |
self.params = params |
|
40 |
conn = params['connection'] |
|
41 |
cur = params['cursor'] |
|
42 |
self.execute = cur.execute |
|
43 |
self.executemany = cur.executemany |
|
44 |
self.fetchone = cur.fetchone |
|
45 |
self.fetchall = cur.fetchall |
|
46 |
self.cur = cur |
|
47 |
self.conn = conn |
b/pithos/backends/lib/groups.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from dbworker import DBWorker |
|
35 |
|
|
36 |
|
|
37 |
class Groups(DBWorker): |
|
38 |
"""Groups are named collections of members, belonging to an owner.""" |
|
39 |
|
|
40 |
def __init__(self, **params): |
|
41 |
DBWorker.__init__(self, **params) |
|
42 |
execute = self.execute |
|
43 |
|
|
44 |
execute(""" create table if not exists groups |
|
45 |
( owner text, |
|
46 |
name text, |
|
47 |
member text, |
|
48 |
primary key (owner, name, member) ) """) |
|
49 |
execute(""" create index if not exists idx_groups_member |
|
50 |
on groups(member) """) |
|
51 |
|
|
52 |
def group_names(self, owner): |
|
53 |
"""List all group names belonging to owner.""" |
|
54 |
|
|
55 |
q = "select distinct name from groups where owner = ?" |
|
56 |
self.execute(q, (owner,)) |
|
57 |
return [r[0] for r in self.fetchall()] |
|
58 |
|
|
59 |
def group_list(self, owner): |
|
60 |
"""List all (group, member) tuples belonging to owner.""" |
|
61 |
|
|
62 |
q = "select name, member from groups where owner = ?" |
|
63 |
self.execute(q, (owner,)) |
|
64 |
return self.fetchall() |
|
65 |
|
|
66 |
def group_add(self, owner, group, member): |
|
67 |
"""Add a member to a group.""" |
|
68 |
|
|
69 |
q = "insert or ignore into groups (owner, name, member) values (?, ?, ?)" |
|
70 |
self.execute(q, (owner, group, member)) |
|
71 |
|
|
72 |
def group_addmany(self, owner, group, members): |
|
73 |
"""Add members to a group.""" |
|
74 |
|
|
75 |
q = "insert or ignore into groups (owner, name, member) values (?, ?, ?)" |
|
76 |
self.executemany(q, ((owner, group, member) for member in members)) |
|
77 |
|
|
78 |
def group_remove(self, owner, group, member): |
|
79 |
"""Remove a member from a group.""" |
|
80 |
|
|
81 |
q = "delete from groups where owner = ? and name = ? and member = ?" |
|
82 |
self.execute(q, (owner, group, member)) |
|
83 |
|
|
84 |
def group_delete(self, owner, group): |
|
85 |
"""Delete a group.""" |
|
86 |
|
|
87 |
q = "delete from groups where owner = ? and name = ?" |
|
88 |
self.execute(q, (owner, group)) |
|
89 |
|
|
90 |
def group_destroy(self, owner): |
|
91 |
"""Delete all groups belonging to owner.""" |
|
92 |
|
|
93 |
q = "delete from groups where owner = ?" |
|
94 |
self.execute(q, (owner,)) |
|
95 |
|
|
96 |
def group_members(self, owner, group): |
|
97 |
"""Return the list of members of a group.""" |
|
98 |
|
|
99 |
q = "select member from groups where owner = ? and name = ?" |
|
100 |
self.execute(q, (owner, group)) |
|
101 |
return [r[0] for r in self.fetchall()] |
|
102 |
|
|
103 |
def group_check(self, owner, group, member): |
|
104 |
"""Check if a member is in a group.""" |
|
105 |
|
|
106 |
q = "select 1 from groups where owner = ? and name = ? and member = ?" |
|
107 |
self.execute(q, (group, member)) |
|
108 |
return bool(self.fetchone()) |
|
109 |
|
|
110 |
def group_parents(self, member): |
|
111 |
"""Return all (owner, group) tuples that contain member.""" |
|
112 |
|
|
113 |
q = "select owner, name from groups where member = ?" |
|
114 |
self.execute(q, (member,)) |
|
115 |
return self.fetchall() |
b/pithos/backends/lib/hashfiler/__init__.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from blocker import Blocker |
|
35 |
from mapper import Mapper |
|
36 |
|
b/pithos/backends/lib/hashfiler/blocker.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from os import makedirs |
|
35 |
from os.path import isdir, realpath, exists, join |
|
36 |
from hashlib import new as newhasher |
|
37 |
from binascii import hexlify |
|
38 |
|
|
39 |
from pithos.lib.hashfiler.context_file import ContextFile, file_sync_read_chunks |
|
40 |
|
|
41 |
|
|
42 |
class Blocker(object): |
|
43 |
"""Blocker. |
|
44 |
Required contstructor parameters: blocksize, blockpath, hashtype. |
|
45 |
""" |
|
46 |
|
|
47 |
blocksize = None |
|
48 |
blockpath = None |
|
49 |
hashtype = None |
|
50 |
|
|
51 |
def __init__(self, **params): |
|
52 |
blocksize = params['blocksize'] |
|
53 |
blockpath = params['blockpath'] |
|
54 |
blockpath = realpath(blockpath) |
|
55 |
if not isdir(blockpath): |
|
56 |
if not exists(blockpath): |
|
57 |
makedirs(blockpath) |
|
58 |
else: |
|
59 |
raise ValueError("Variable blockpath '%s' is not a directory" % (blockpath,)) |
|
60 |
|
|
61 |
hashtype = params['hashtype'] |
|
62 |
try: |
|
63 |
hasher = newhasher(hashtype) |
|
64 |
except ValueError: |
|
65 |
msg = "Variable hashtype '%s' is not available from hashlib" |
|
66 |
raise ValueError(msg % (hashtype,)) |
|
67 |
|
|
68 |
hasher.update("") |
|
69 |
emptyhash = hasher.digest() |
|
70 |
|
|
71 |
self.blocksize = blocksize |
|
72 |
self.blockpath = blockpath |
|
73 |
self.hashtype = hashtype |
|
74 |
self.hashlen = len(emptyhash) |
|
75 |
self.emptyhash = emptyhash |
|
76 |
|
|
77 |
def get_rear_block(self, blkhash, create=0): |
|
78 |
name = join(self.blockpath, hexlify(blkhash)) |
|
79 |
return ContextFile(name, create) |
|
80 |
|
|
81 |
def check_rear_block(self, blkhash): |
|
82 |
name = join(self.blockpath, hexlify(blkhash)) |
|
83 |
return exists(name) |
|
84 |
|
|
85 |
def block_hash(self, data): |
|
86 |
"""Hash a block of data""" |
|
87 |
hasher = newhasher(self.hashtype) |
|
88 |
hasher.update(data.rstrip('\x00')) |
|
89 |
return hasher.digest() |
|
90 |
|
|
91 |
def block_ping(self, hashes): |
|
92 |
"""Check hashes for existence and |
|
93 |
return those missing from block storage. |
|
94 |
""" |
|
95 |
missing = [] |
|
96 |
append = missing.append |
|
97 |
for i, h in enumerate(hashes): |
|
98 |
if not self.check_rear_block(h): |
|
99 |
append(i) |
|
100 |
return missing |
|
101 |
|
|
102 |
def block_retr(self, hashes): |
|
103 |
"""Retrieve blocks from storage by their hashes.""" |
|
104 |
blocksize = self.blocksize |
|
105 |
blocks = [] |
|
106 |
append = blocks.append |
|
107 |
block = None |
|
108 |
|
|
109 |
for h in hashes: |
|
110 |
with self.get_rear_block(h, 0) as rbl: |
|
111 |
if not rbl: |
|
112 |
break |
|
113 |
for block in rbl.sync_read_chunks(blocksize, 1, 0): |
|
114 |
break # there should be just one block there |
|
115 |
if not block: |
|
116 |
break |
|
117 |
append(block) |
|
118 |
|
|
119 |
return blocks |
|
120 |
|
|
121 |
def block_stor(self, blocklist): |
|
122 |
"""Store a bunch of blocks and return (hashes, missing). |
|
123 |
Hashes is a list of the hashes of the blocks, |
|
124 |
missing is a list of indices in that list indicating |
|
125 |
which blocks were missing from the store. |
|
126 |
""" |
|
127 |
block_hash = self.block_hash |
|
128 |
hashlist = [block_hash(b) for b in blocklist] |
|
129 |
mf = None |
|
130 |
missing = self.block_ping(hashlist) |
|
131 |
for i in missing: |
|
132 |
with self.get_rear_block(hashlist[i], 1) as rbl: |
|
133 |
rbl.sync_write(blocklist[i]) #XXX: verify? |
|
134 |
|
|
135 |
return hashlist, missing |
|
136 |
|
|
137 |
def block_delta(self, blkhash, offdata=()): |
|
138 |
"""Construct and store a new block from a given block |
|
139 |
and a list of (offset, data) 'patches'. Return: |
|
140 |
(the hash of the new block, if the block already existed) |
|
141 |
""" |
|
142 |
if not offdata: |
|
143 |
return None, None |
|
144 |
|
|
145 |
blocksize = self.blocksize |
|
146 |
block = self.block_retr((blkhash,)) |
|
147 |
if not block: |
|
148 |
return None, None |
|
149 |
|
|
150 |
block = block[0] |
|
151 |
newblock = '' |
|
152 |
idx = 0 |
|
153 |
size = 0 |
|
154 |
trunc = 0 |
|
155 |
for off, data in offdata: |
|
156 |
if not data: |
|
157 |
trunc = 1 |
|
158 |
break |
|
159 |
newblock += block[idx:off] + data |
|
160 |
size += off - idx + len(data) |
|
161 |
if size >= blocksize: |
|
162 |
break |
|
163 |
off = size |
|
164 |
|
|
165 |
if not trunc: |
|
166 |
newblock += block[size:len(block)] |
|
167 |
|
|
168 |
h, a = self.block_stor((newblock,)) |
|
169 |
return h[0], 1 if a else 0 |
|
170 |
|
|
171 |
def block_hash_file(self, openfile): |
|
172 |
"""Return the list of hashes (hashes map) |
|
173 |
for the blocks in a buffered file. |
|
174 |
Helper method, does not affect store. |
|
175 |
""" |
|
176 |
hashes = [] |
|
177 |
append = hashes.append |
|
178 |
block_hash = self.block_hash |
|
179 |
|
|
180 |
for block in file_sync_read_chunks(openfile, self.blocksize, 1, 0): |
|
181 |
append(block_hash(block)) |
|
182 |
|
|
183 |
return hashes |
|
184 |
|
|
185 |
def block_stor_file(self, openfile): |
|
186 |
"""Read blocks from buffered file object and store them. Return: |
|
187 |
(bytes read, list of hashes, list of hashes that were missing) |
|
188 |
""" |
|
189 |
blocksize = self.blocksize |
|
190 |
block_stor = self.block_stor |
|
191 |
hashlist = [] |
|
192 |
hextend = hashlist.extend |
|
193 |
storedlist = [] |
|
194 |
sextend = storedlist.extend |
|
195 |
lastsize = 0 |
|
196 |
|
|
197 |
for block in file_sync_read_chunks(openfile, blocksize, 1, 0): |
|
198 |
hl, sl = block_stor((block,)) |
|
199 |
hextend(hl) |
|
200 |
sextend(sl) |
|
201 |
lastsize = len(block) |
|
202 |
|
|
203 |
size = (len(hashlist) -1) * blocksize + lastsize if hashlist else 0 |
|
204 |
return size, hashlist, storedlist |
|
205 |
|
b/pithos/backends/lib/hashfiler/context_file.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from os import SEEK_CUR, SEEK_SET, fsync |
|
35 |
from errno import ENOENT |
|
36 |
|
|
37 |
|
|
38 |
_zeros = '' |
|
39 |
|
|
40 |
|
|
41 |
def zeros(nr): |
|
42 |
global _zeros |
|
43 |
size = len(_zeros) |
|
44 |
if nr == size: |
|
45 |
return _zeros |
|
46 |
|
|
47 |
if nr > size: |
|
48 |
_zeros += '\0' * (nr - size) |
|
49 |
return _zeros |
|
50 |
|
|
51 |
if nr < size: |
|
52 |
_zeros = _zeros[:nr] |
|
53 |
return _zeros |
|
54 |
|
|
55 |
|
|
56 |
def file_sync_write_chunks(openfile, chunksize, offset, chunks, size=None): |
|
57 |
"""Write given chunks to the given buffered file object. |
|
58 |
Writes never span across chunk boundaries. |
|
59 |
If size is given stop after or pad until size bytes have been written. |
|
60 |
""" |
|
61 |
fwrite = openfile.write |
|
62 |
seek = openfile.seek |
|
63 |
padding = 0 |
|
64 |
|
|
65 |
try: |
|
66 |
seek(offset * chunksize) |
|
67 |
except IOError, e: |
|
68 |
seek = None |
|
69 |
for x in xrange(offset): |
|
70 |
fwrite(zeros(chunksize)) |
|
71 |
|
|
72 |
cursize = offset * chunksize |
|
73 |
|
|
74 |
for chunk in chunks: |
|
75 |
if padding: |
|
76 |
if seek: |
|
77 |
seek(padding -1, SEEK_CUR) |
|
78 |
fwrite("\x00") |
|
79 |
else: |
|
80 |
fwrite(buffer(zeros(chunksize), 0, padding)) |
|
81 |
if size is not None and cursize + chunksize >= size: |
|
82 |
chunk = chunk[:chunksize - (cursize - size)] |
|
83 |
fwrite(chunk) |
|
84 |
cursize += len(chunk) |
|
85 |
break |
|
86 |
fwrite(chunk) |
|
87 |
padding = chunksize - len(chunk) |
|
88 |
|
|
89 |
padding = size - cursize if size is not None else 0 |
|
90 |
if padding <= 0: |
|
91 |
return |
|
92 |
|
|
93 |
q, r = divmod(padding, chunksize) |
|
94 |
for x in xrange(q): |
|
95 |
fwrite(zeros(chunksize)) |
|
96 |
fwrite(buffer(zeros(chunksize), 0, r)) |
|
97 |
|
|
98 |
|
|
99 |
def file_sync_read_chunks(openfile, chunksize, nr, offset=0): |
|
100 |
"""Read and yield groups of chunks from a buffered file object at offset. |
|
101 |
Reads never span accros chunksize boundaries. |
|
102 |
""" |
|
103 |
fread = openfile.read |
|
104 |
remains = offset * chunksize |
|
105 |
seek = openfile.seek |
|
106 |
try: |
|
107 |
seek(remains) |
|
108 |
except IOError, e: |
|
109 |
seek = None |
|
110 |
while 1: |
|
111 |
s = fread(remains) |
|
112 |
remains -= len(s) |
|
113 |
if remains <= 0: |
|
114 |
break |
|
115 |
|
|
116 |
while nr: |
|
117 |
remains = chunksize |
|
118 |
chunk = '' |
|
119 |
while 1: |
|
120 |
s = fread(remains) |
|
121 |
if not s: |
|
122 |
if chunk: |
|
123 |
yield chunk |
|
124 |
return |
|
125 |
chunk += s |
|
126 |
remains -= len(s) |
|
127 |
if remains <= 0: |
|
128 |
break |
|
129 |
yield chunk |
|
130 |
nr -= 1 |
|
131 |
|
|
132 |
|
|
133 |
class ContextFile(object): |
|
134 |
__slots__ = ("name", "fdesc", "create") |
|
135 |
|
|
136 |
def __init__(self, name, create=0): |
|
137 |
self.name = name |
|
138 |
self.fdesc = None |
|
139 |
self.create = create |
|
140 |
#self.dirty = 0 |
|
141 |
|
|
142 |
def __enter__(self): |
|
143 |
name = self.name |
|
144 |
try: |
|
145 |
fdesc = open(name, 'rb+') |
|
146 |
except IOError, e: |
|
147 |
if not self.create or e.errno != ENOENT: |
|
148 |
raise |
|
149 |
fdesc = open(name, 'w+') |
|
150 |
|
|
151 |
self.fdesc = fdesc |
|
152 |
return self |
|
153 |
|
|
154 |
def __exit__(self, exc, arg, trace): |
|
155 |
fdesc = self.fdesc |
|
156 |
if fdesc is not None: |
|
157 |
#if self.dirty: |
|
158 |
# fsync(fdesc.fileno()) |
|
159 |
fdesc.close() |
|
160 |
return False # propagate exceptions |
|
161 |
|
|
162 |
def seek(self, offset, whence=SEEK_SET): |
|
163 |
return self.fdesc.seek(offset, whence) |
|
164 |
|
|
165 |
def tell(self): |
|
166 |
return self.fdesc.tell() |
|
167 |
|
|
168 |
def truncate(self, size): |
|
169 |
self.fdesc.truncate(size) |
|
170 |
|
|
171 |
def sync_write(self, data): |
|
172 |
#self.dirty = 1 |
|
173 |
self.fdesc.write(data) |
|
174 |
|
|
175 |
def sync_write_chunks(self, chunksize, offset, chunks, size=None): |
|
176 |
#self.dirty = 1 |
|
177 |
return file_sync_write_chunks(self.fdesc, chunksize, offset, chunks, size) |
|
178 |
|
|
179 |
def sync_read(self, size): |
|
180 |
read = self.fdesc.read |
|
181 |
data = '' |
|
182 |
while 1: |
|
183 |
s = read(size) |
|
184 |
if not s: |
|
185 |
break |
|
186 |
data += s |
|
187 |
return data |
|
188 |
|
|
189 |
def sync_read_chunks(self, chunksize, nr, offset=0): |
|
190 |
return file_sync_read_chunks(self.fdesc, chunksize, nr, offset) |
|
191 |
|
b/pithos/backends/lib/hashfiler/mapper.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from os.path import realpath, join, exists, isdir |
|
35 |
from os import makedirs, unlink |
|
36 |
from errno import ENOENT |
|
37 |
|
|
38 |
from pithos.lib.hashfiler.context_file import ContextFile |
|
39 |
|
|
40 |
|
|
41 |
class Mapper(object): |
|
42 |
"""Mapper. |
|
43 |
Required contstructor parameters: mappath, namelen. |
|
44 |
""" |
|
45 |
|
|
46 |
mappath = None |
|
47 |
namelen = None |
|
48 |
|
|
49 |
def __init__(self, **params): |
|
50 |
self.params = params |
|
51 |
self.namelen = params['namelen'] |
|
52 |
mappath = realpath(params['mappath']) |
|
53 |
if not isdir(mappath): |
|
54 |
if not exists(mappath): |
|
55 |
makedirs(mappath) |
|
56 |
else: |
|
57 |
raise ValueError("Variable mappath '%s' is not a directory" % (mappath,)) |
|
58 |
self.mappath = mappath |
|
59 |
|
|
60 |
def get_rear_map(self, name, create=0): |
|
61 |
name = join(self.mappath, hex(int(name))) |
|
62 |
return ContextFile(name, create) |
|
63 |
|
|
64 |
def delete_rear_map(self, name): |
|
65 |
name = join(self.mappath, hex(int(name))) |
|
66 |
try: |
|
67 |
unlink(name) |
|
68 |
return 1 |
|
69 |
except OSError, e: |
|
70 |
if e.errno != ENOENT: |
|
71 |
raise |
|
72 |
return 0 |
|
73 |
|
|
74 |
def map_retr(self, name, blkoff=0, nr=100000000000000): |
|
75 |
"""Return as a list, part of the hashes map of an object |
|
76 |
at the given block offset. |
|
77 |
By default, return the whole hashes map. |
|
78 |
""" |
|
79 |
namelen = self.namelen |
|
80 |
hashes = () |
|
81 |
|
|
82 |
with self.get_rear_map(name, 0) as rmap: |
|
83 |
if rmap: |
|
84 |
hashes = list(rmap.sync_read_chunks(namelen, nr, blkoff)) |
|
85 |
return hashes |
|
86 |
|
|
87 |
def map_stor(self, name, hashes=(), blkoff=0, create=1): |
|
88 |
"""Store hashes in the given hashes map, replacing the old ones.""" |
|
89 |
namelen = self.namelen |
|
90 |
with self.get_rear_map(name, 1) as rmap: |
|
91 |
rmap.sync_write_chunks(namelen, blkoff, hashes, None) |
|
92 |
|
|
93 |
# def map_copy(self, src, dst): |
|
94 |
# """Copy a hashes map to another one, replacing it.""" |
|
95 |
# with self.get_rear_map(src, 0) as rmap: |
|
96 |
# if rmap: |
|
97 |
# rmap.copy_to(dst) |
|
98 |
|
|
99 |
def map_remv(self, name): |
|
100 |
"""Remove a hashes map. Returns true if the map was found and removed.""" |
|
101 |
return self.delete_rear_map(name) |
|
102 |
|
b/pithos/backends/lib/permissions.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from xfeatures import XFeatures |
|
35 |
from groups import Groups |
|
36 |
from public import Public |
|
37 |
|
|
38 |
|
|
39 |
class Permissions(XFeatures, Groups, Public): |
|
40 |
|
|
41 |
def __init__(self, **params): |
|
42 |
XFeatures.__init__(self, **params) |
|
43 |
Groups.__init__(self, **params) |
|
44 |
Public.__init__(self, **params) |
|
45 |
|
|
46 |
def access_grant(self, access, path, member='all', members=()): |
|
47 |
"""Grant a member with an access to a path.""" |
|
48 |
xfeatures = self.xfeature_list(path) |
|
49 |
xfl = len(xfeatures) |
|
50 |
if xfl > 1 or (xfl == 1 and xfeatures[0][0] != path): |
|
51 |
return xfeatures |
|
52 |
if xfl == 0: |
|
53 |
feature = self.alloc_serial() |
|
54 |
self.xfeature_bestow(path, feature) |
|
55 |
else: |
|
56 |
fpath, feature = xfeatures[0] |
|
57 |
|
|
58 |
if members: |
|
59 |
self.feature_setmany(feature, access, members) |
|
60 |
else: |
|
61 |
self.feature_set(feature, access, member) |
|
62 |
|
|
63 |
return () |
|
64 |
|
|
65 |
def access_revoke(self, access, path, member='all', members=()): |
|
66 |
"""Revoke access to path from members. |
|
67 |
Note that this will not revoke access for members |
|
68 |
that are indirectly granted access through group membership. |
|
69 |
""" |
|
70 |
# XXX: Maybe provide a force_revoke that will kick out |
|
71 |
# all groups containing the given members? |
|
72 |
xfeatures = self.xfeature_list(path) |
|
73 |
xfl = len(xfeatures) |
|
74 |
if xfl != 1 or xfeatures[0][0] != path: |
|
75 |
return xfeatures |
|
76 |
|
|
77 |
fpath, feature = xfeatures[0] |
|
78 |
|
|
79 |
if members: |
|
80 |
self.feature_unsetmany(feature, access, members=members) |
|
81 |
else: |
|
82 |
self.feature_unset(feature, access, member) |
|
83 |
|
|
84 |
# XXX: provide a meaningful return value? |
|
85 |
|
|
86 |
return () |
|
87 |
|
|
88 |
def access_check(self, access, path, member): |
|
89 |
"""Return true if the member has this access to the path.""" |
|
90 |
r = self.xfeature_inherit(path) |
|
91 |
if not r: |
|
92 |
return 0 |
|
93 |
|
|
94 |
fpath, feature = r |
|
95 |
memberset = set(self.feature_get(feature, access)) |
|
96 |
if member in memberset: |
|
97 |
return 1 |
|
98 |
|
|
99 |
for group in self.group_parents(self, member): |
|
100 |
if group in memberset: |
|
101 |
return 1 |
|
102 |
|
|
103 |
return 0 |
|
104 |
|
|
105 |
def access_list(self, path): |
|
106 |
"""Return the list of (access, member) pairs for the path.""" |
|
107 |
r = self.xfeature_inherit(path) |
|
108 |
if not r: |
|
109 |
return () |
|
110 |
|
|
111 |
fpath, feature = r |
|
112 |
return self.feature_list(feature) |
|
113 |
|
|
114 |
def access_list_paths(self, member): |
|
115 |
"""Return the list of (access, path) pairs granted to member.""" |
|
116 |
q = ("select distinct key, path from xfeatures inner join " |
|
117 |
" (select distinct feature, key from xfeaturevals inner join " |
|
118 |
" (select name as value from members " |
|
119 |
" where member = ? union select ?) " |
|
120 |
" using (value)) " |
|
121 |
"using (feature)") |
|
122 |
|
|
123 |
self.execute(q, (member, member)) |
|
124 |
return self.fetchall() |
b/pithos/backends/lib/public.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from dbworker import DBWorker |
|
35 |
|
|
36 |
|
|
37 |
class Public(DBWorker): |
|
38 |
"""Paths can be marked as public.""" |
|
39 |
|
|
40 |
def __init__(self, **params): |
|
41 |
DBWorker.__init__(self, **params) |
|
42 |
execute = self.execute |
|
43 |
|
|
44 |
execute(""" create table if not exists public |
|
45 |
( path text primary key ) """) |
|
46 |
|
|
47 |
def public_set(self, path): |
|
48 |
q = "insert or ignore into public (path) values (?)" |
|
49 |
self.execute(q, (path,)) |
|
50 |
|
|
51 |
def public_unset(self, path): |
|
52 |
q = "delete from public where path = ?" |
|
53 |
self.execute(q, (path,)) |
|
54 |
|
|
55 |
def public_check(self, path): |
|
56 |
q = "select 1 from public where path = ?" |
|
57 |
self.execute(q, (path,)) |
|
58 |
return bool(self.fetchone()) |
b/pithos/backends/lib/xfeatures.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
from dbworker import DBWorker |
|
35 |
|
|
36 |
|
|
37 |
class XFeatures(DBWorker): |
|
38 |
"""XFeatures are path properties that allow non-nested |
|
39 |
inheritance patterns. Currently used for storing permissions. |
|
40 |
""" |
|
41 |
|
|
42 |
def __init__(self, **params): |
|
43 |
DBWorker.__init__(self, **params) |
|
44 |
execute = self.execute |
|
45 |
|
|
46 |
execute(""" pragma foreign_keys = on """) |
|
47 |
|
|
48 |
execute(""" create table if not exists xfeatures |
|
49 |
( feature_id integer primary key, |
|
50 |
path text ) """) |
|
51 |
execute(""" create unique index if not exists idx_features_path |
|
52 |
on xfeatures(path) """) |
|
53 |
|
|
54 |
execute(""" create table if not exists xfeaturevals |
|
55 |
( feature_id integer, |
|
56 |
key integer, |
|
57 |
value text, |
|
58 |
primary key (feature_id, key, value) |
|
59 |
foreign key (feature_id) references xfeatures(feature_id) |
|
60 |
on delete cascade ) """) |
|
61 |
|
|
62 |
def xfeature_inherit(self, path): |
|
63 |
"""Return the (path, feature) inherited by the path, or None.""" |
|
64 |
|
|
65 |
q = ("select path, feature_id from xfeatures " |
|
66 |
"where path <= ? " |
|
67 |
"order by path desc limit 1") |
|
68 |
self.execute(q, (path,)) |
|
69 |
r = self.fetchone() |
|
70 |
if r is not None and path.startswith(r[0]): |
|
71 |
return r |
|
72 |
return None |
|
73 |
|
|
74 |
def xfeature_list(self, path): |
|
75 |
"""Return the list of the (prefix, feature) pairs matching path. |
|
76 |
A prefix matches path if either the prefix includes the path, |
|
77 |
or the path includes the prefix. |
|
78 |
""" |
|
79 |
|
|
80 |
inherited = self.xfeature_inherit(path) |
|
81 |
if inherited: |
|
82 |
return [inherited] |
|
83 |
|
|
84 |
q = ("select path, feature_id from xfeatures " |
|
85 |
"where path like ? and path != ? order by path") |
|
86 |
self.execute(q, (path + '%', path,)) |
|
87 |
return self.fetchall() |
|
88 |
|
|
89 |
def xfeature_create(self, path): |
|
90 |
"""Create and return a feature for path. |
|
91 |
If the path already inherits a feature or |
|
92 |
bestows to paths already inheriting a feature, |
|
93 |
create no feature and return None. |
|
94 |
""" |
|
95 |
|
|
96 |
prefixes = self.xfeature_list(path) |
|
97 |
pl = len(prefixes) |
|
98 |
if (pl > 1) or (pl == 1 and prefixes[0][0] != path): |
|
99 |
return None |
|
100 |
q = "insert into xfeatures (path) values (?)" |
|
101 |
id = self.execute(q, (path,)).lastrowid |
|
102 |
return id |
|
103 |
|
|
104 |
def xfeature_destroy(self, path): |
|
105 |
"""Destroy a feature and all its key, value pairs.""" |
|
106 |
|
|
107 |
q = "delete from xfeatures where path = ?" |
|
108 |
self.execute(q, (path,)) |
|
109 |
|
|
110 |
def feature_list(self, feature): |
|
111 |
"""Return the list of all key, value pairs |
|
112 |
associated with a feature. |
|
113 |
""" |
|
114 |
|
|
115 |
q = "select key, value from xfeaturevals where feature = ?" |
|
116 |
self.execute(q, (feature,)) |
|
117 |
return self.fetchall() |
|
118 |
|
|
119 |
def feature_set(self, feature, key, value): |
|
120 |
"""Associate a key, value pair with a feature.""" |
|
121 |
|
|
122 |
q = "insert or ignore into xfeaturevals (feature_id, key, value) values (?, ?, ?)" |
|
123 |
self.execute(q, (feature, key, value)) |
|
124 |
|
|
125 |
def feature_setmany(self, feature, key, values): |
|
126 |
"""Associate the given key, and values with a feature.""" |
|
127 |
|
|
128 |
q = "insert or ignore into xfeaturevals (feature_id, key, value) values (?, ?, ?)" |
|
129 |
self.executemany(q, ((feature, key, v) for v in values)) |
|
130 |
|
|
131 |
def feature_unset(self, feature, key, value): |
|
132 |
"""Disassociate a key, value pair from a feature.""" |
|
133 |
|
|
134 |
q = ("delete from xfeaturevals where " |
|
135 |
"feature_id = ? and key = ? and value = ?") |
|
136 |
self.execute(q, (feature, key, value)) |
|
137 |
|
|
138 |
def feature_unsetmany(self, feature, key, values): |
|
139 |
"""Disassociate the key for the values given, from a feature.""" |
|
140 |
|
|
141 |
q = ("delete from xfeaturevals where " |
|
142 |
"feature_id = ? and key = ? and value = ?") |
|
143 |
self.executemany(q, ((feature, key, v) for v in values)) |
|
144 |
|
|
145 |
def feature_get(self, feature, key): |
|
146 |
"""Return the list of values for a key of a feature.""" |
|
147 |
|
|
148 |
q = "select value from xfeaturevals where feature_id = ? and key = ?" |
|
149 |
self.execute(q, (feature, key)) |
|
150 |
return [r[0] for r in self.fetchall()] |
|
151 |
|
|
152 |
def feature_clear(self, feature, key): |
|
153 |
"""Delete all key, value pairs for a key of a feature.""" |
|
154 |
|
|
155 |
q = "delete from xfeaturevals where feature_id = ? and key = ?" |
|
156 |
self.execute(q, (feature, key)) |
b/pithos/backends/modular.py | ||
---|---|---|
1 |
# Copyright 2011 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
import os |
|
35 |
import time |
|
36 |
import sqlite3 |
|
37 |
import logging |
|
38 |
import hashlib |
|
39 |
import binascii |
|
40 |
|
|
41 |
from base import NotAllowedError, BaseBackend |
|
42 |
from lib.permissions import Permissions |
|
43 |
from lib.hashfiler import Mapper, Blocker |
|
44 |
|
|
45 |
|
|
46 |
logger = logging.getLogger(__name__) |
|
47 |
|
|
48 |
def backend_method(func=None, autocommit=1): |
|
49 |
if func is None: |
|
50 |
def fn(func): |
|
51 |
return backend_method(func, autocommit) |
|
52 |
return fn |
|
53 |
|
|
54 |
if not autocommit: |
|
55 |
return func |
|
56 |
def fn(self, *args, **kw): |
|
57 |
self.con.execute('begin deferred') |
|
58 |
try: |
|
59 |
ret = func(self, *args, **kw) |
|
60 |
self.con.commit() |
|
61 |
return ret |
|
62 |
except: |
|
63 |
self.con.rollback() |
|
64 |
raise |
|
65 |
return fn |
|
66 |
|
|
67 |
|
|
68 |
class ModularBackend(BaseBackend): |
|
69 |
"""A modular backend. |
|
70 |
|
|
71 |
Uses SQLite for storage. |
|
72 |
""" |
|
73 |
|
|
74 |
# TODO: Create account if not present in all functions. |
|
75 |
|
|
76 |
def __init__(self, db): |
|
77 |
self.hash_algorithm = 'sha256' |
|
78 |
self.block_size = 4 * 1024 * 1024 # 4MB |
|
79 |
|
|
80 |
self.default_policy = {'quota': 0, 'versioning': 'auto'} |
|
81 |
|
|
82 |
basepath = os.path.split(db)[0] |
|
83 |
if basepath and not os.path.exists(basepath): |
|
84 |
os.makedirs(basepath) |
|
85 |
if not os.path.isdir(basepath): |
|
86 |
raise RuntimeError("Cannot open database at '%s'" % (db,)) |
|
87 |
|
|
88 |
self.con = sqlite3.connect(basepath + '/db', check_same_thread=False) |
|
89 |
|
|
90 |
sql = '''pragma foreign_keys = on''' |
|
91 |
self.con.execute(sql) |
|
92 |
|
|
93 |
sql = '''create table if not exists versions ( |
|
94 |
version_id integer primary key, |
|
95 |
name text, |
|
96 |
user text, |
|
97 |
tstamp integer not null, |
|
98 |
size integer default 0, |
|
99 |
hide integer default 0)''' |
|
100 |
self.con.execute(sql) |
|
101 |
sql = '''create table if not exists metadata ( |
|
102 |
version_id integer, |
|
103 |
key text, |
|
104 |
value text, |
|
105 |
primary key (version_id, key) |
|
106 |
foreign key (version_id) references versions(version_id) |
|
107 |
on delete cascade)''' |
|
108 |
self.con.execute(sql) |
|
109 |
sql = '''create table if not exists policy ( |
|
110 |
name text, key text, value text, primary key (name, key))''' |
|
111 |
self.con.execute(sql) |
|
112 |
|
|
113 |
self.con.commit() |
|
114 |
|
|
115 |
params = {'blocksize': self.block_size, |
|
116 |
'blockpath': basepath + '/blocks', |
|
117 |
'hashtype': self.hash_algorithm} |
|
118 |
self.blocker = Blocker(**params) |
|
119 |
|
|
120 |
params = {'mappath': basepath + '/maps', |
|
121 |
'namelen': self.blocker.hashlen} |
|
122 |
self.mapper = Mapper(**params) |
|
123 |
|
|
124 |
params = {'connection': self.con, |
|
125 |
'cursor': self.con.cursor()} |
|
126 |
self.permissions = Permissions(**params) |
|
127 |
|
|
128 |
@backend_method |
|
129 |
def list_accounts(self, user, marker=None, limit=10000): |
|
130 |
"""Return a list of accounts the user can access.""" |
|
131 |
|
|
132 |
allowed = self._allowed_accounts(user) |
|
133 |
start, limit = self._list_limits(allowed, marker, limit) |
|
134 |
return allowed[start:start + limit] |
|
135 |
|
|
136 |
@backend_method |
|
137 |
def get_account_meta(self, user, account, until=None): |
|
138 |
"""Return a dictionary with the account metadata.""" |
|
139 |
|
|
140 |
logger.debug("get_account_meta: %s %s", account, until) |
|
141 |
if user != account: |
|
142 |
if until or account not in self._allowed_accounts(user): |
|
143 |
raise NotAllowedError |
|
144 |
else: |
|
145 |
self._create_account(user, account) |
|
146 |
try: |
|
147 |
version_id, mtime = self._get_accountinfo(account, until) |
|
148 |
except NameError: |
|
149 |
# Account does not exist before until. |
|
150 |
version_id = None |
|
151 |
mtime = until |
|
152 |
count, bytes, tstamp = self._get_pathstats(account, until) |
|
153 |
if mtime > tstamp: |
|
154 |
tstamp = mtime |
|
155 |
if until is None: |
|
156 |
modified = tstamp |
|
157 |
else: |
|
158 |
modified = self._get_pathstats(account)[2] # Overall last modification |
|
159 |
if mtime > modified: |
|
160 |
modified = mtime |
|
161 |
|
|
162 |
# Proper count. |
|
163 |
sql = 'select count(name) from (%s) where name glob ? and not name glob ?' |
|
164 |
sql = sql % self._sql_until(until) |
|
165 |
c = self.con.execute(sql, (account + '/*', account + '/*/*')) |
|
166 |
row = c.fetchone() |
|
167 |
count = row[0] |
|
168 |
|
|
169 |
if user != account: |
|
170 |
meta = {'name': account} |
|
171 |
else: |
|
172 |
meta = self._get_metadata(account, version_id) |
|
173 |
meta.update({'name': account, 'count': count, 'bytes': bytes}) |
|
174 |
if until is not None: |
|
175 |
meta.update({'until_timestamp': tstamp}) |
|
176 |
meta.update({'modified': modified}) |
|
177 |
return meta |
|
178 |
|
|
179 |
@backend_method |
|
180 |
def update_account_meta(self, user, account, meta, replace=False): |
|
181 |
"""Update the metadata associated with the account.""" |
|
182 |
|
|
183 |
logger.debug("update_account_meta: %s %s %s", account, meta, replace) |
|
184 |
if user != account: |
|
185 |
raise NotAllowedError |
|
186 |
self._put_metadata(user, account, meta, replace, False) |
|
187 |
|
|
188 |
@backend_method |
|
189 |
def get_account_groups(self, user, account): |
|
190 |
"""Return a dictionary with the user groups defined for this account.""" |
|
191 |
|
|
192 |
logger.debug("get_account_groups: %s", account) |
|
193 |
if user != account: |
|
194 |
if account not in self._allowed_accounts(user): |
|
195 |
raise NotAllowedError |
|
196 |
return {} |
|
197 |
self._create_account(user, account) |
|
198 |
return self._get_groups(account) |
|
199 |
|
|
200 |
@backend_method |
|
201 |
def update_account_groups(self, user, account, groups, replace=False): |
|
202 |
"""Update the groups associated with the account.""" |
|
203 |
|
|
204 |
logger.debug("update_account_groups: %s %s %s", account, groups, replace) |
|
205 |
if user != account: |
|
206 |
raise NotAllowedError |
|
207 |
self._create_account(user, account) |
|
208 |
self._check_groups(groups) |
|
209 |
self._put_groups(account, groups, replace) |
|
210 |
|
|
211 |
@backend_method |
|
212 |
def put_account(self, user, account): |
|
213 |
"""Create a new account with the given name.""" |
|
214 |
|
|
215 |
logger.debug("put_account: %s", account) |
|
216 |
if user != account: |
|
217 |
raise NotAllowedError |
|
218 |
try: |
|
219 |
version_id, mtime = self._get_accountinfo(account) |
|
220 |
except NameError: |
|
221 |
pass |
|
222 |
else: |
|
223 |
raise NameError('Account already exists') |
|
224 |
self._put_version(account, user) |
|
225 |
|
|
226 |
@backend_method |
|
227 |
def delete_account(self, user, account): |
|
228 |
"""Delete the account with the given name.""" |
|
229 |
|
|
230 |
logger.debug("delete_account: %s", account) |
|
231 |
if user != account: |
|
232 |
raise NotAllowedError |
|
233 |
count = self._get_pathstats(account)[0] |
|
234 |
if count > 0: |
|
235 |
raise IndexError('Account is not empty') |
|
236 |
sql = 'delete from versions where name = ?' |
|
237 |
self.con.execute(sql, (account,)) |
|
238 |
self._del_groups(account) |
|
239 |
|
|
240 |
@backend_method |
|
241 |
def list_containers(self, user, account, marker=None, limit=10000, shared=False, until=None): |
|
242 |
"""Return a list of containers existing under an account.""" |
|
243 |
|
|
244 |
logger.debug("list_containers: %s %s %s %s", account, marker, limit, until) |
|
245 |
if user != account: |
|
246 |
if until or account not in self._allowed_accounts(user): |
|
247 |
raise NotAllowedError |
|
248 |
allowed = self._allowed_containers(user, account) |
|
249 |
start, limit = self._list_limits(allowed, marker, limit) |
|
250 |
return allowed[start:start + limit] |
|
251 |
else: |
|
252 |
if shared: |
|
253 |
allowed = [x.split('/', 2)[1] for x in self._shared_paths(account)] |
|
254 |
start, limit = self._list_limits(allowed, marker, limit) |
|
255 |
return allowed[start:start + limit] |
|
256 |
return [x[0] for x in self._list_objects(account, '', '/', marker, limit, False, [], until)] |
|
257 |
|
|
258 |
@backend_method |
|
259 |
def get_container_meta(self, user, account, container, until=None): |
|
260 |
"""Return a dictionary with the container metadata.""" |
|
261 |
|
|
262 |
logger.debug("get_container_meta: %s %s %s", account, container, until) |
|
263 |
if user != account: |
|
264 |
if until or container not in self._allowed_containers(user, account): |
|
265 |
raise NotAllowedError |
|
266 |
path, version_id, mtime = self._get_containerinfo(account, container, until) |
|
267 |
count, bytes, tstamp = self._get_pathstats(path, until) |
|
268 |
if mtime > tstamp: |
|
269 |
tstamp = mtime |
|
270 |
if until is None: |
|
271 |
modified = tstamp |
|
272 |
else: |
|
273 |
modified = self._get_pathstats(path)[2] # Overall last modification |
|
274 |
if mtime > modified: |
|
275 |
modified = mtime |
|
276 |
|
|
277 |
if user != account: |
|
278 |
meta = {'name': container, 'modified': modified} |
|
279 |
else: |
|
280 |
meta = self._get_metadata(path, version_id) |
|
281 |
meta.update({'name': container, 'count': count, 'bytes': bytes, 'modified': modified}) |
|
282 |
if until is not None: |
|
283 |
meta.update({'until_timestamp': tstamp}) |
|
284 |
return meta |
|
285 |
|
|
286 |
@backend_method |
|
287 |
def update_container_meta(self, user, account, container, meta, replace=False): |
|
288 |
"""Update the metadata associated with the container.""" |
|
289 |
|
|
290 |
logger.debug("update_container_meta: %s %s %s %s", account, container, meta, replace) |
|
291 |
if user != account: |
|
292 |
raise NotAllowedError |
|
293 |
path, version_id, mtime = self._get_containerinfo(account, container) |
|
294 |
self._put_metadata(user, path, meta, replace, False) |
|
295 |
|
|
296 |
@backend_method |
|
297 |
def get_container_policy(self, user, account, container): |
|
298 |
"""Return a dictionary with the container policy.""" |
|
299 |
|
|
300 |
logger.debug("get_container_policy: %s %s", account, container) |
|
301 |
if user != account: |
|
302 |
if container not in self._allowed_containers(user, account): |
|
303 |
raise NotAllowedError |
|
304 |
return {} |
|
305 |
path = self._get_containerinfo(account, container)[0] |
|
306 |
return self._get_policy(path) |
|
307 |
|
|
308 |
@backend_method |
|
309 |
def update_container_policy(self, user, account, container, policy, replace=False): |
|
310 |
"""Update the policy associated with the account.""" |
|
311 |
|
|
312 |
logger.debug("update_container_policy: %s %s %s %s", account, container, policy, replace) |
|
313 |
if user != account: |
|
314 |
raise NotAllowedError |
|
315 |
path = self._get_containerinfo(account, container)[0] |
|
316 |
self._check_policy(policy) |
|
317 |
if replace: |
|
318 |
for k, v in self.default_policy.iteritems(): |
|
319 |
if k not in policy: |
|
320 |
policy[k] = v |
|
321 |
for k, v in policy.iteritems(): |
|
322 |
sql = 'insert or replace into policy (name, key, value) values (?, ?, ?)' |
|
323 |
self.con.execute(sql, (path, k, v)) |
|
324 |
|
|
325 |
@backend_method |
|
326 |
def put_container(self, user, account, container, policy=None): |
|
327 |
"""Create a new container with the given name.""" |
|
328 |
|
|
329 |
logger.debug("put_container: %s %s %s", account, container, policy) |
|
330 |
if user != account: |
|
331 |
raise NotAllowedError |
|
332 |
try: |
|
333 |
path, version_id, mtime = self._get_containerinfo(account, container) |
|
334 |
except NameError: |
|
335 |
pass |
|
336 |
else: |
|
337 |
raise NameError('Container already exists') |
|
338 |
if policy: |
|
339 |
self._check_policy(policy) |
|
340 |
path = '/'.join((account, container)) |
|
341 |
version_id = self._put_version(path, user)[0] |
|
342 |
for k, v in self.default_policy.iteritems(): |
|
343 |
if k not in policy: |
|
344 |
policy[k] = v |
|
345 |
for k, v in policy.iteritems(): |
|
346 |
sql = 'insert or replace into policy (name, key, value) values (?, ?, ?)' |
|
347 |
self.con.execute(sql, (path, k, v)) |
|
348 |
|
|
349 |
@backend_method |
|
350 |
def delete_container(self, user, account, container, until=None): |
|
351 |
"""Delete/purge the container with the given name.""" |
|
352 |
|
|
353 |
logger.debug("delete_container: %s %s %s", account, container, until) |
|
354 |
if user != account: |
|
355 |
raise NotAllowedError |
|
356 |
path, version_id, mtime = self._get_containerinfo(account, container) |
|
357 |
|
|
358 |
if until is not None: |
|
359 |
sql = '''select version_id from versions where name like ? and tstamp <= ? |
|
360 |
and version_id not in (select version_id from (%s))''' |
|
361 |
sql = sql % self._sql_until() # Do not delete current versions. |
|
362 |
c = self.con.execute(sql, (path + '/%', until)) |
|
363 |
for v in [x[0] for x in c.fetchall()]: |
|
364 |
self._del_version(v) |
|
365 |
return |
|
366 |
|
|
367 |
count = self._get_pathstats(path)[0] |
|
368 |
if count > 0: |
|
369 |
raise IndexError('Container is not empty') |
|
370 |
sql = 'delete from versions where name = ? or name like ?' # May contain hidden items. |
Also available in: Unified diff