root / xseg / xtypes / xcache.c @ 246214a1
History | View | Annotate | Download (9.2 kB)
1 |
/*
|
---|---|
2 |
* Copyright 2013 GRNET S.A. All rights reserved.
|
3 |
*
|
4 |
* Redistribution and use in source and binary forms, with or
|
5 |
* without modification, are permitted provided that the following
|
6 |
* conditions are met:
|
7 |
*
|
8 |
* 1. Redistributions of source code must retain the above
|
9 |
* copyright notice, this list of conditions and the following
|
10 |
* disclaimer.
|
11 |
* 2. Redistributions in binary form must reproduce the above
|
12 |
* copyright notice, this list of conditions and the following
|
13 |
* disclaimer in the documentation and/or other materials
|
14 |
* provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
* POSSIBILITY OF SUCH DAMAGE.
|
28 |
*
|
29 |
* The views and conclusions contained in the software and
|
30 |
* documentation are those of the authors and should not be
|
31 |
* interpreted as representing official policies, either expressed
|
32 |
* or implied, of GRNET S.A.
|
33 |
*/
|
34 |
|
35 |
#include <xtypes/xcache.h> |
36 |
//TODO container aware and xptrs
|
37 |
|
38 |
/*
|
39 |
static struct xcache_entry * get_cache_entry(struct xcache *cache, xqindex idx)
|
40 |
{
|
41 |
return &cache->entries[idx];
|
42 |
}
|
43 |
*/
|
44 |
|
45 |
static xqindex __get_cache_idx(struct xcache *cache, struct xcache_entry *ce) |
46 |
{ |
47 |
return (xqindex)(ce - cache->nodes);
|
48 |
} |
49 |
|
50 |
static xqindex __alloc_cache_entry(struct xcache *cache) |
51 |
{ |
52 |
return __xq_pop_head(&cache->free_nodes);
|
53 |
} |
54 |
|
55 |
static xqindex alloc_cache_entry(struct xcache *cache) |
56 |
{ |
57 |
return xq_pop_head(&cache->free_nodes, 1); |
58 |
} |
59 |
|
60 |
static void __free_cache_entry(struct xcache *cache, xqindex idx) |
61 |
{ |
62 |
__xq_append_head(&cache->free_nodes, idx); |
63 |
} |
64 |
|
65 |
static void free_cache_entry(struct xcache *cache, xqindex idx) |
66 |
{ |
67 |
xq_append_head(&cache->free_nodes, idx, 1);
|
68 |
} |
69 |
|
70 |
static void __update_access_time(struct xcache *cache, xqindex idx) |
71 |
{ |
72 |
// cache->times[idx] = __sync_add_and_fetch(&cache->time, 1);
|
73 |
cache->times[idx] = cache->time++; |
74 |
//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
|
75 |
} |
76 |
|
77 |
/*
|
78 |
* Proper precautions should be taken, when getting a cache entry.
|
79 |
* There is a race with removal and finding lru cache entry.
|
80 |
*/
|
81 |
static void xcache_entry_get(struct xcache *cache, xqindex idx) |
82 |
{ |
83 |
struct xcache_entry *ce = &cache->nodes[idx];
|
84 |
__sync_add_and_fetch(&ce->ref, 1);
|
85 |
__update_access_time(cache, idx); |
86 |
} |
87 |
|
88 |
/* put should be called with no locking */
|
89 |
static void xcache_entry_put(struct xcache *cache, xqindex idx) |
90 |
{ |
91 |
struct xcache_entry *ce = &cache->nodes[idx];
|
92 |
unsigned long ref = __sync_sub_and_fetch(&ce->ref, 1); |
93 |
if (!ref){
|
94 |
if (cache->ops.on_put)
|
95 |
cache->ops.on_put(cache->priv, ce->priv); |
96 |
free_cache_entry(cache, idx); |
97 |
} |
98 |
} |
99 |
|
100 |
static int xcache_entry_init(struct xcache *cache, xqindex idx, char *name) |
101 |
{ |
102 |
struct xcache_entry *ce = &cache->nodes[idx];
|
103 |
xlock_release(&ce->lock); |
104 |
ce->ref = 1;
|
105 |
strncpy(ce->name, name, XSEG_MAX_TARGETLEN); |
106 |
ce->name[XSEG_MAX_TARGETLEN] = 0;
|
107 |
if (cache->ops.on_init)
|
108 |
if (cache->ops.on_init(cache->priv, ce->priv) < 0) |
109 |
return -1; |
110 |
return 0; |
111 |
} |
112 |
|
113 |
|
114 |
static xqindex __xcache_lru(struct xcache *cache) |
115 |
{ |
116 |
uint64_t min = -2;
|
117 |
xqindex i, lru = Noneidx; |
118 |
for (i = 0; i < cache->nr_nodes; i++) { |
119 |
//XSEGLOG("cache->times[%llu] = %llu", i, cache->times[i]);
|
120 |
if (min > cache->times[i]){
|
121 |
min = cache->times[i]; |
122 |
lru = i; |
123 |
} |
124 |
} |
125 |
//XSEGLOG("Found lru cache->times[%llu] = %llu", lru, cache->times[lru]);
|
126 |
return lru;
|
127 |
} |
128 |
|
129 |
static xqindex __xcache_lookup(struct xcache *cache, char *name) |
130 |
{ |
131 |
xqindex xqi = Noneidx; |
132 |
if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) < 0){ |
133 |
return Noneidx;
|
134 |
} |
135 |
xcache_entry_get(cache, xqi); |
136 |
return xqi;
|
137 |
} |
138 |
|
139 |
/* after a succesfull call, the handler must be put */
|
140 |
static int __xcache_remove(struct xcache *cache, xcache_handler h) |
141 |
{ |
142 |
int r;
|
143 |
xqindex idx = (xqindex)h; |
144 |
struct xcache_entry *ce = &cache->nodes[idx];
|
145 |
|
146 |
r = xhash_delete(cache->entries, (xhashidx)ce->name); |
147 |
if (r < 0){ |
148 |
//XSEGLOG("Couldn't delete cache entry(h: %llu, cache->nodes[h].priv: %p",
|
149 |
// h, cache->nodes[idx].priv);
|
150 |
return r;
|
151 |
} |
152 |
cache->times[idx] = -1;
|
153 |
//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
|
154 |
return 0; |
155 |
} |
156 |
|
157 |
/*
|
158 |
* Inserts a new entry to cache or returns a diffrent entry with the same key
|
159 |
* (name).
|
160 |
* In either case, ref count on the returned entry, gets increased
|
161 |
*
|
162 |
*/
|
163 |
static xcache_handler __xcache_insert(struct xcache *cache, xcache_handler h) |
164 |
{ |
165 |
int r;
|
166 |
struct xcache_entry *ce;
|
167 |
xqindex lru, idx, xqi; |
168 |
idx = (xqindex)h; |
169 |
ce = &cache->nodes[idx]; |
170 |
|
171 |
/* lookup first to ensure we don't overwrite entries */
|
172 |
if (xhash_lookup(cache->entries, (xhashidx)ce->name, &xqi) >= 0){ |
173 |
xcache_entry_get(cache, xqi); |
174 |
return xqi;
|
175 |
} |
176 |
|
177 |
/* insert new entry to cache */
|
178 |
retry:
|
179 |
r = xhash_insert(cache->entries, (xhashidx)ce->name, idx); |
180 |
if (r == -XHASH_ENOSPC){
|
181 |
lru = __xcache_lru(cache); |
182 |
//assert lru != Noneidx
|
183 |
//validate lru
|
184 |
if (__xcache_remove(cache, lru) < 0) |
185 |
return NoEntry;
|
186 |
xcache_entry_put(cache, lru); |
187 |
goto retry;
|
188 |
} |
189 |
if (r == -XHASH_ERESIZE){
|
190 |
XSEGLOG("Rebuilding internal hash table");
|
191 |
xhash_t *new = xhash_resize(cache->entries, |
192 |
cache->entries->size_shift, |
193 |
cache->entries->limit, NULL);
|
194 |
if (!new)
|
195 |
return NoEntry;
|
196 |
cache->entries = new; |
197 |
goto retry;
|
198 |
} |
199 |
|
200 |
if (r >= 0){ |
201 |
xcache_entry_get(cache, idx); |
202 |
} |
203 |
|
204 |
return ((r < 0) ? NoEntry : h); |
205 |
} |
206 |
|
207 |
xcache_handler xcache_lookup(struct xcache *cache, char *name) |
208 |
{ |
209 |
xqindex xqi = Noneidx; |
210 |
xlock_acquire(&cache->lock, 1);
|
211 |
xqi = __xcache_lookup(cache, name); |
212 |
xlock_release(&cache->lock); |
213 |
return (xcache_handler)xqi;
|
214 |
} |
215 |
|
216 |
xcache_handler xcache_alloc_init(struct xcache *cache, char *name) |
217 |
{ |
218 |
xqindex idx = alloc_cache_entry(cache); |
219 |
if (idx == Noneidx)
|
220 |
return idx;
|
221 |
if (xcache_entry_init(cache, idx, name) < 0){ |
222 |
free_cache_entry(cache, idx); |
223 |
idx = Noneidx; |
224 |
} |
225 |
return (xcache_handler)idx;
|
226 |
} |
227 |
|
228 |
int xcache_init(struct xcache *cache, uint32_t xcache_size, |
229 |
struct xcache_ops *ops, void *priv) |
230 |
{ |
231 |
struct xcache_entry *ce;
|
232 |
unsigned long i; |
233 |
xhashidx shift; |
234 |
|
235 |
/*
|
236 |
* Here we choose a proper size for the hash table.
|
237 |
* It must be able to contain at least xcache_size elements, before
|
238 |
* returns an -EXHASH_RESIZE.
|
239 |
* Thus it must be at least 3/2 * xcache_size (OK, the minimum power of
|
240 |
* two that meets this requirement to be exact).
|
241 |
*
|
242 |
* By choosing a xhash size 8 times the minimum required, we drastically
|
243 |
* decrease the number or xhash rebuilts required by xhash for
|
244 |
* perfomance reasons, sacrificing a logical amount of memory.
|
245 |
*
|
246 |
*/
|
247 |
|
248 |
for (shift = 0; 3*xcache_size/2 >= (1UL << shift); shift++) {} |
249 |
shift += 3;
|
250 |
|
251 |
xlock_release(&cache->lock); |
252 |
cache->size = xcache_size; |
253 |
cache->nr_nodes = cache->size * 2;
|
254 |
cache->time = 0;
|
255 |
cache->ops = *ops; |
256 |
cache->priv = priv; |
257 |
|
258 |
if (!xq_alloc_seq(&cache->free_nodes, cache->nr_nodes,
|
259 |
cache->nr_nodes)){ |
260 |
return -1; |
261 |
} |
262 |
cache->entries = xhash_new(shift, xcache_size, STRING); |
263 |
if (!cache->entries){
|
264 |
return -1; |
265 |
} |
266 |
cache->nodes = xtypes_malloc(cache->nr_nodes * sizeof(struct xcache_entry)); |
267 |
if (!cache->nodes){
|
268 |
return -1; |
269 |
} |
270 |
cache->times = xtypes_malloc(cache->nr_nodes * sizeof(uint64_t));
|
271 |
if (!cache->times){
|
272 |
return -1; |
273 |
} |
274 |
for (i = 0; i < cache->nr_nodes; i++) { |
275 |
cache->times[i] = -1; //so lru will never return a -1 value; |
276 |
} |
277 |
|
278 |
if (cache->ops.on_node_init){
|
279 |
for (i = 0; i < cache->nr_nodes; i++) { |
280 |
ce = &cache->nodes[i]; |
281 |
ce->priv = cache->ops.on_node_init(cache->priv); |
282 |
if (!ce->priv)
|
283 |
return -1; |
284 |
} |
285 |
} |
286 |
|
287 |
return 0; |
288 |
} |
289 |
|
290 |
xcache_handler xcache_insert(struct xcache *cache, xcache_handler h)
|
291 |
{ |
292 |
xcache_handler ret; |
293 |
xlock_acquire(&cache->lock, 1);
|
294 |
ret = __xcache_insert(cache, h); |
295 |
xlock_release(&cache->lock); |
296 |
return ret;
|
297 |
} |
298 |
|
299 |
int xcache_remove(struct xcache *cache, xcache_handler h) |
300 |
{ |
301 |
int r;
|
302 |
xlock_acquire(&cache->lock, 1);
|
303 |
r = __xcache_remove(cache, h); |
304 |
xlock_release(&cache->lock); |
305 |
if (r >= 0) |
306 |
xcache_entry_put(cache, h); |
307 |
return r;
|
308 |
} |
309 |
|
310 |
int xcache_invalidate(struct xcache *cache, char *name) |
311 |
{ |
312 |
xqindex xqi; |
313 |
int r = 0; |
314 |
xlock_acquire(&cache->lock, 1);
|
315 |
if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) >= 0){ |
316 |
//validate xqi
|
317 |
r =__xcache_remove(cache, xqi); |
318 |
xlock_release(&cache->lock); |
319 |
if (r >= 0) |
320 |
xcache_entry_put(cache, xqi); |
321 |
} else {
|
322 |
xlock_release(&cache->lock); |
323 |
} |
324 |
return r;
|
325 |
} |
326 |
|
327 |
void xcache_put(struct xcache *cache, xcache_handler h) |
328 |
{ |
329 |
xqindex idx = (xqindex)h; |
330 |
//xlock_acquire(&cache->lock, 1);
|
331 |
xcache_entry_put(cache, h); |
332 |
//xlock_release(&cache->lock);
|
333 |
} |
334 |
|
335 |
/*
|
336 |
* Put all cache entries.
|
337 |
* Does not free cache resources.
|
338 |
* xcache_free should be implemented for this purpose, to be called by the
|
339 |
* client when all entries are put.
|
340 |
*/
|
341 |
void xcache_close(struct xcache *cache) |
342 |
{ |
343 |
unsigned long i; |
344 |
xlock_acquire(&cache->lock, 1);
|
345 |
for (i = 0; i < cache->nr_nodes; i++) { |
346 |
if (cache->times[i] == -1) |
347 |
continue;
|
348 |
xcache_entry_put(cache, (xqindex)i); |
349 |
} |
350 |
xlock_release(&cache->lock); |
351 |
} |
352 |
|
353 |
#ifdef __KERNEL__
|
354 |
#include <linux/module.h> |
355 |
#include <xtypes/xcache_exports.h> |
356 |
#endif
|