Statistics
| Branch: | Tag: | Revision:

root / pithos / backends / lib / hashfiler / context_file.py @ a9b3f29d

History | View | Annotate | Download (5.4 kB)

1
# Copyright 2011 GRNET S.A. All rights reserved.
2
# 
3
# Redistribution and use in source and binary forms, with or
4
# without modification, are permitted provided that the following
5
# conditions are met:
6
# 
7
#   1. Redistributions of source code must retain the above
8
#      copyright notice, this list of conditions and the following
9
#      disclaimer.
10
# 
11
#   2. Redistributions in binary form must reproduce the above
12
#      copyright notice, this list of conditions and the following
13
#      disclaimer in the documentation and/or other materials
14
#      provided with the distribution.
15
# 
16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
# POSSIBILITY OF SUCH DAMAGE.
28
# 
29
# The views and conclusions contained in the software and
30
# documentation are those of the authors and should not be
31
# interpreted as representing official policies, either expressed
32
# or implied, of GRNET S.A.
33

    
34
from os import SEEK_CUR, SEEK_SET, fsync
35
from errno import ENOENT
36

    
37

    
38
_zeros = ''
39

    
40

    
41
def zeros(nr):
42
    global _zeros
43
    size = len(_zeros)
44
    if nr == size:
45
        return _zeros
46

    
47
    if nr > size:
48
        _zeros += '\0' * (nr - size)
49
        return _zeros
50

    
51
    if nr < size:
52
        _zeros = _zeros[:nr]
53
        return _zeros
54

    
55

    
56
def file_sync_write_chunks(openfile, chunksize, offset, chunks, size=None):
57
    """Write given chunks to the given buffered file object.
58
       Writes never span across chunk boundaries.
59
       If size is given stop after or pad until size bytes have been written.
60
    """
61
    fwrite = openfile.write
62
    seek = openfile.seek
63
    padding = 0
64

    
65
    try:
66
        seek(offset * chunksize)
67
    except IOError, e:
68
        seek = None
69
        for x in xrange(offset):
70
            fwrite(zeros(chunksize))
71

    
72
    cursize = offset * chunksize
73

    
74
    for chunk in chunks:
75
        if padding:
76
            if seek:
77
                seek(padding -1, SEEK_CUR)
78
                fwrite("\x00")
79
            else:
80
                fwrite(buffer(zeros(chunksize), 0, padding))
81
        if size is not None and cursize + chunksize >= size:
82
            chunk = chunk[:chunksize - (cursize - size)]
83
            fwrite(chunk)
84
            cursize += len(chunk)
85
            break
86
        fwrite(chunk)
87
        padding = chunksize - len(chunk)
88

    
89
    padding = size - cursize if size is not None else 0
90
    if padding <= 0:
91
        return
92

    
93
    q, r = divmod(padding, chunksize)
94
    for x in xrange(q):
95
        fwrite(zeros(chunksize))
96
    fwrite(buffer(zeros(chunksize), 0, r))
97

    
98

    
99
def file_sync_read_chunks(openfile, chunksize, nr, offset=0):
100
    """Read and yield groups of chunks from a buffered file object at offset.
101
       Reads never span accros chunksize boundaries.
102
    """
103
    fread = openfile.read
104
    remains = offset * chunksize
105
    seek = openfile.seek
106
    try:
107
        seek(remains)
108
    except IOError, e:
109
        seek = None
110
        while 1:
111
            s = fread(remains)
112
            remains -= len(s)
113
            if remains <= 0:
114
                break
115

    
116
    while nr:
117
        remains = chunksize
118
        chunk = ''
119
        while 1:
120
            s = fread(remains)
121
            if not s:
122
                if chunk:
123
                    yield chunk
124
                return
125
            chunk += s
126
            remains -= len(s)
127
            if remains <= 0:
128
                break
129
        yield chunk
130
        nr -= 1
131

    
132

    
133
class ContextFile(object):
134
    __slots__ = ("name", "fdesc", "create")
135

    
136
    def __init__(self, name, create=0):
137
        self.name = name
138
        self.fdesc = None
139
        self.create = create
140
        #self.dirty = 0
141

    
142
    def __enter__(self):
143
        name = self.name
144
        try:
145
            fdesc = open(name, 'rb+')
146
        except IOError, e:
147
            if not self.create or e.errno != ENOENT:
148
                raise
149
            fdesc = open(name, 'w+')
150

    
151
        self.fdesc = fdesc
152
        return self
153

    
154
    def __exit__(self, exc, arg, trace):
155
        fdesc = self.fdesc
156
        if fdesc is not None:
157
            #if self.dirty:
158
            #    fsync(fdesc.fileno())
159
            fdesc.close()
160
        return False # propagate exceptions
161

    
162
    def seek(self, offset, whence=SEEK_SET):
163
        return self.fdesc.seek(offset, whence)
164

    
165
    def tell(self):
166
        return self.fdesc.tell()
167

    
168
    def truncate(self, size):
169
        self.fdesc.truncate(size)
170

    
171
    def sync_write(self, data):
172
        #self.dirty = 1
173
        self.fdesc.write(data)
174

    
175
    def sync_write_chunks(self, chunksize, offset, chunks, size=None):
176
        #self.dirty = 1
177
        return file_sync_write_chunks(self.fdesc, chunksize, offset, chunks, size)
178

    
179
    def sync_read(self, size):
180
        read = self.fdesc.read
181
        data = ''
182
        while 1:
183
            s = read(size)
184
            if not s:
185
                break
186
            data += s
187
        return data
188

    
189
    def sync_read_chunks(self, chunksize, nr, offset=0):
190
        return file_sync_read_chunks(self.fdesc, chunksize, nr, offset)
191