root / snf-pithos-backend / pithos / backends / lib / hashfiler / context_archipelago.py @ f75f40cb
History | View | Annotate | Download (5.9 kB)
1 |
# Copyright 2013 GRNET S.A. All rights reserved.
|
---|---|
2 |
#
|
3 |
# Redistribution and use in source and binary forms, with or
|
4 |
# without modification, are permitted provided that the following
|
5 |
# conditions are met:
|
6 |
#
|
7 |
# 1. Redistributions of source code must retain the above
|
8 |
# copyright notice, this list of conditions and the following
|
9 |
# disclaimer.
|
10 |
#
|
11 |
# 2. Redistributions in binary form must reproduce the above
|
12 |
# copyright notice, this list of conditions and the following
|
13 |
# disclaimer in the documentation and/or other materials
|
14 |
# provided with the distribution.
|
15 |
#
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
# POSSIBILITY OF SUCH DAMAGE.
|
28 |
#
|
29 |
# The views and conclusions contained in the software and
|
30 |
# documentation are those of the authors and should not be
|
31 |
# interpreted as representing official policies, either expressed
|
32 |
# or implied, of GRNET S.A.
|
33 |
|
34 |
from os import SEEK_CUR, SEEK_SET |
35 |
from archipelago.common import ( |
36 |
Request, |
37 |
string_at, |
38 |
) |
39 |
from pithos.workers import monkey |
40 |
monkey.patch_Request() |
41 |
|
42 |
_zeros = ''
|
43 |
|
44 |
|
45 |
def zeros(nr): |
46 |
global _zeros
|
47 |
size = len(_zeros)
|
48 |
if nr == size:
|
49 |
return _zeros
|
50 |
|
51 |
if nr > size:
|
52 |
_zeros += '\0' * (nr - size)
|
53 |
return _zeros
|
54 |
|
55 |
if nr < size:
|
56 |
_zeros = _zeros[:nr] |
57 |
return _zeros
|
58 |
|
59 |
|
60 |
def file_sync_write_chunks(archipelagoobject, chunksize, offset, |
61 |
chunks, size=None):
|
62 |
"""Write given chunks to the given buffered file object.
|
63 |
Writes never span across chunk boundaries.
|
64 |
If size is given stop after or pad until size bytes have been written.
|
65 |
"""
|
66 |
padding = 0
|
67 |
cursize = chunksize * offset |
68 |
archipelagoobject.seek(cursize) |
69 |
for chunk in chunks: |
70 |
if padding:
|
71 |
archipelagoobject.sync_write(buffer(zeros(chunksize), 0, padding)) |
72 |
if size is not None and cursize + chunksize >= size: |
73 |
chunk = chunk[:chunksize - (cursize - size)] |
74 |
archipelagoobject.sync_write(chunk) |
75 |
cursize += len(chunk)
|
76 |
break
|
77 |
archipelagoobject.sync_write(chunk) |
78 |
padding = chunksize - len(chunk)
|
79 |
|
80 |
padding = size - cursize if size is not None else 0 |
81 |
if padding <= 0: |
82 |
return
|
83 |
|
84 |
q, r = divmod(padding, chunksize)
|
85 |
for x in xrange(q): |
86 |
archipelagoobject.sync_write(zeros(chunksize)) |
87 |
archipelagoobject.sync_write(buffer(zeros(chunksize), 0, r)) |
88 |
|
89 |
|
90 |
def file_sync_read_chunks(archipelagoobject, chunksize, nr, offset=0): |
91 |
"""Read and yield groups of chunks from a buffered file object at offset.
|
92 |
Reads never span accros chunksize boundaries.
|
93 |
"""
|
94 |
archipelagoobject.seek(offset * chunksize) |
95 |
while nr:
|
96 |
remains = chunksize |
97 |
chunk = ''
|
98 |
while 1: |
99 |
s = archipelagoobject.sync_read(remains) |
100 |
if not s: |
101 |
if chunk:
|
102 |
yield chunk
|
103 |
return
|
104 |
chunk += s |
105 |
remains -= len(s)
|
106 |
if remains <= 0: |
107 |
break
|
108 |
yield chunk
|
109 |
nr -= 1
|
110 |
|
111 |
|
112 |
class ArchipelagoObject(object): |
113 |
__slots__ = ("name", "ioctx_pool", "dst_port", "create", "offset") |
114 |
|
115 |
def __init__(self, name, ioctx_pool, dst_port=None, create=0): |
116 |
self.name = name
|
117 |
self.ioctx_pool = ioctx_pool
|
118 |
self.create = create
|
119 |
self.dst_port = dst_port
|
120 |
self.offset = 0 |
121 |
|
122 |
def __enter__(self): |
123 |
return self |
124 |
|
125 |
def __exit__(self, exc, arg, trace): |
126 |
return False |
127 |
|
128 |
def seek(self, offset, whence=SEEK_SET): |
129 |
if whence == SEEK_CUR:
|
130 |
offset += self.offset
|
131 |
self.offset = offset
|
132 |
return offset
|
133 |
|
134 |
def tell(self): |
135 |
return self.offset |
136 |
|
137 |
def truncate(self, size): |
138 |
raise NotImplementedError("File truncation is not implemented yet \ |
139 |
in archipelago")
|
140 |
|
141 |
def sync_write(self, data): |
142 |
ioctx = self.ioctx_pool.pool_get()
|
143 |
req = Request.get_write_request(ioctx, self.dst_port, self.name, |
144 |
data=data, offset=self.offset,
|
145 |
datalen=len(data))
|
146 |
req.submit() |
147 |
req.wait() |
148 |
ret = req.success() |
149 |
req.put() |
150 |
self.ioctx_pool.pool_put(ioctx)
|
151 |
if ret:
|
152 |
self.offset += len(data) |
153 |
else:
|
154 |
raise IOError("archipelago: Write request error") |
155 |
|
156 |
def sync_write_chunks(self, chunksize, offset, chunks, size=None): |
157 |
return file_sync_write_chunks(self, chunksize, offset, chunks, size) |
158 |
|
159 |
def sync_read(self, size): |
160 |
read = Request.get_read_request |
161 |
data = ''
|
162 |
datalen = 0
|
163 |
dsize = size |
164 |
while 1: |
165 |
ioctx = self.ioctx_pool.pool_get()
|
166 |
req = read(ioctx, self.dst_port,
|
167 |
self.name, size=dsize-datalen, offset=self.offset) |
168 |
req.submit() |
169 |
req.wait() |
170 |
ret = req.success() |
171 |
if ret:
|
172 |
s = string_at(req.get_data(), dsize-datalen) |
173 |
else:
|
174 |
s = None
|
175 |
req.put() |
176 |
self.ioctx_pool.pool_put(ioctx)
|
177 |
if not s: |
178 |
break
|
179 |
data += s |
180 |
datalen += len(s)
|
181 |
self.offset += len(s) |
182 |
if datalen >= size:
|
183 |
break
|
184 |
return data
|
185 |
|
186 |
def sync_read_chunks(self, chunksize, nr, offset=0): |
187 |
return file_sync_read_chunks(self, chunksize, nr, offset) |