root / xseg / xtypes / xcache.c @ b4832a42
History | View | Annotate | Download (11.4 kB)
1 |
/*
|
---|---|
2 |
* Copyright 2013 GRNET S.A. All rights reserved.
|
3 |
*
|
4 |
* Redistribution and use in source and binary forms, with or
|
5 |
* without modification, are permitted provided that the following
|
6 |
* conditions are met:
|
7 |
*
|
8 |
* 1. Redistributions of source code must retain the above
|
9 |
* copyright notice, this list of conditions and the following
|
10 |
* disclaimer.
|
11 |
* 2. Redistributions in binary form must reproduce the above
|
12 |
* copyright notice, this list of conditions and the following
|
13 |
* disclaimer in the documentation and/or other materials
|
14 |
* provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
|
17 |
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
19 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
|
20 |
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
23 |
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
24 |
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
26 |
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
27 |
* POSSIBILITY OF SUCH DAMAGE.
|
28 |
*
|
29 |
* The views and conclusions contained in the software and
|
30 |
* documentation are those of the authors and should not be
|
31 |
* interpreted as representing official policies, either expressed
|
32 |
* or implied, of GRNET S.A.
|
33 |
*/
|
34 |
|
35 |
#include <xtypes/xcache.h> |
36 |
//TODO container aware and xptrs
|
37 |
|
38 |
/*
|
39 |
static struct xcache_entry * get_cache_entry(struct xcache *cache, xqindex idx)
|
40 |
{
|
41 |
return &cache->entries[idx];
|
42 |
}
|
43 |
*/
|
44 |
|
45 |
static xqindex __get_cache_idx(struct xcache *cache, struct xcache_entry *ce) |
46 |
{ |
47 |
return (xqindex)(ce - cache->nodes);
|
48 |
} |
49 |
|
50 |
static xqindex __alloc_cache_entry(struct xcache *cache) |
51 |
{ |
52 |
return __xq_pop_head(&cache->free_nodes);
|
53 |
} |
54 |
|
55 |
static xqindex alloc_cache_entry(struct xcache *cache) |
56 |
{ |
57 |
return xq_pop_head(&cache->free_nodes, 1); |
58 |
} |
59 |
|
60 |
static void __free_cache_entry(struct xcache *cache, xqindex idx) |
61 |
{ |
62 |
__xq_append_head(&cache->free_nodes, idx); |
63 |
} |
64 |
|
65 |
static void free_cache_entry(struct xcache *cache, xqindex idx) |
66 |
{ |
67 |
xq_append_head(&cache->free_nodes, idx, 1);
|
68 |
} |
69 |
|
70 |
/*
|
71 |
* xbinheap should be protected by cache lock.
|
72 |
*/
|
73 |
static void __update_access_time(struct xcache *cache, xqindex idx) |
74 |
{ |
75 |
struct xcache_entry *ce = &cache->nodes[idx];
|
76 |
if (cache->flags & XCACHE_LRU_ARRAY){
|
77 |
//cache->times[idx] = __sync_add_and_fetch(&cache->time, 1);
|
78 |
cache->times[idx] = cache->time++; |
79 |
} else if (cache->flags & XCACHE_LRU_HEAP) { |
80 |
if (ce->h != NoNode){
|
81 |
xbinheap_increasekey(&cache->binheap, ce->h, cache->time++); |
82 |
} else {
|
83 |
ce->h = xbinheap_insert(&cache->binheap, cache->time++, idx); |
84 |
if (ce->h == NoNode){
|
85 |
XSEGLOG("BUG: Cannot insert to lru binary heap");
|
86 |
} |
87 |
} |
88 |
} |
89 |
//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
|
90 |
} |
91 |
|
92 |
/*
|
93 |
* Proper precautions should be taken, when getting a cache entry.
|
94 |
* There is a race with removal and finding lru cache entry.
|
95 |
*/
|
96 |
static void xcache_entry_get(struct xcache *cache, xqindex idx) |
97 |
{ |
98 |
struct xcache_entry *ce = &cache->nodes[idx];
|
99 |
__sync_add_and_fetch(&ce->ref, 1);
|
100 |
__update_access_time(cache, idx); |
101 |
} |
102 |
|
103 |
/* put should be called with no locking */
|
104 |
static void xcache_entry_put(struct xcache *cache, xqindex idx) |
105 |
{ |
106 |
struct xcache_entry *ce = &cache->nodes[idx];
|
107 |
unsigned long ref = __sync_sub_and_fetch(&ce->ref, 1); |
108 |
if (!ref){
|
109 |
if (cache->ops.on_put)
|
110 |
cache->ops.on_put(cache->priv, ce->priv); |
111 |
free_cache_entry(cache, idx); |
112 |
} |
113 |
} |
114 |
|
115 |
static int xcache_entry_init(struct xcache *cache, xqindex idx, char *name) |
116 |
{ |
117 |
struct xcache_entry *ce = &cache->nodes[idx];
|
118 |
xlock_release(&ce->lock); |
119 |
ce->ref = 1;
|
120 |
strncpy(ce->name, name, XSEG_MAX_TARGETLEN); |
121 |
ce->name[XSEG_MAX_TARGETLEN] = 0;
|
122 |
ce->h = NoNode; |
123 |
if (cache->ops.on_init)
|
124 |
if (cache->ops.on_init(cache->priv, ce->priv) < 0) |
125 |
return -1; |
126 |
return 0; |
127 |
} |
128 |
|
129 |
|
130 |
static xqindex __xcache_lru(struct xcache *cache) |
131 |
{ |
132 |
uint64_t min = -2;
|
133 |
xqindex i, lru = Noneidx; |
134 |
struct xcache_entry *ce;
|
135 |
|
136 |
if (cache->flags & XCACHE_LRU_ARRAY){
|
137 |
for (i = 0; i < cache->nr_nodes; i++) { |
138 |
//XSEGLOG("cache->times[%llu] = %llu", i, cache->times[i]);
|
139 |
if (min > cache->times[i]){
|
140 |
min = cache->times[i]; |
141 |
lru = i; |
142 |
} |
143 |
} |
144 |
//XSEGLOG("Found lru cache->times[%llu] = %llu", lru, cache->times[lru]);
|
145 |
} else if (cache->flags & XCACHE_LRU_HEAP) { |
146 |
lru = xbinheap_extract(&cache->binheap); |
147 |
if (lru == NoNode)
|
148 |
return Noneidx;
|
149 |
ce = &cache->nodes[lru]; |
150 |
ce->h = NoNode; |
151 |
} |
152 |
return lru;
|
153 |
} |
154 |
|
155 |
static xqindex __xcache_lookup(struct xcache *cache, char *name) |
156 |
{ |
157 |
xqindex xqi = Noneidx; |
158 |
if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) < 0){ |
159 |
return Noneidx;
|
160 |
} |
161 |
xcache_entry_get(cache, xqi); |
162 |
return xqi;
|
163 |
} |
164 |
|
165 |
/* after a succesfull call, the handler must be put */
|
166 |
static int __xcache_remove(struct xcache *cache, xcache_handler h) |
167 |
{ |
168 |
int r;
|
169 |
xqindex idx = (xqindex)h; |
170 |
struct xcache_entry *ce = &cache->nodes[idx];
|
171 |
|
172 |
r = xhash_delete(cache->entries, (xhashidx)ce->name); |
173 |
if (r < 0){ |
174 |
//XSEGLOG("Couldn't delete cache entry(h: %llu, cache->nodes[h].priv: %p",
|
175 |
// h, cache->nodes[idx].priv);
|
176 |
return r;
|
177 |
} |
178 |
if (cache->flags & XCACHE_LRU_ARRAY)
|
179 |
cache->times[idx] = XCACHE_LRU_MAX; |
180 |
else if (cache->flags & XCACHE_LRU_HEAP) { |
181 |
if (ce->h != NoNode) {
|
182 |
if (xbinheap_increasekey(&cache->binheap, ce->h, XCACHE_LRU_MAX) < 0){ |
183 |
XSEGLOG("BUG: cannot increase key to XCACHE_LRU_MAX");
|
184 |
} |
185 |
if (xbinheap_extract(&cache->binheap) == NoNode){
|
186 |
XSEGLOG("BUG: cannot remove cache entry from lru");
|
187 |
} |
188 |
ce->h = NoNode; |
189 |
} |
190 |
} |
191 |
//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
|
192 |
return 0; |
193 |
} |
194 |
|
195 |
/*
|
196 |
* Inserts a new entry to cache or returns a diffrent entry with the same key
|
197 |
* (name).
|
198 |
* In either case, ref count on the returned entry, gets increased
|
199 |
*
|
200 |
*/
|
201 |
static xcache_handler __xcache_insert(struct xcache *cache, xcache_handler h, |
202 |
xcache_handler *lru_handler) |
203 |
{ |
204 |
int r;
|
205 |
struct xcache_entry *ce;
|
206 |
xqindex lru, idx, xqi; |
207 |
xhash_t *new; |
208 |
idx = (xqindex)h; |
209 |
ce = &cache->nodes[idx]; |
210 |
|
211 |
*lru_handler = NoEntry; |
212 |
|
213 |
/* lookup first to ensure we don't overwrite entries */
|
214 |
if (xhash_lookup(cache->entries, (xhashidx)ce->name, &xqi) >= 0){ |
215 |
xcache_entry_get(cache, xqi); |
216 |
return xqi;
|
217 |
} |
218 |
|
219 |
/* insert new entry to cache */
|
220 |
retry:
|
221 |
r = xhash_insert(cache->entries, (xhashidx)ce->name, idx); |
222 |
if (r == -XHASH_ENOSPC){
|
223 |
lru = __xcache_lru(cache); |
224 |
//assert lru != Noneidx
|
225 |
//validate lru
|
226 |
if (__xcache_remove(cache, lru) < 0) |
227 |
return NoEntry;
|
228 |
/* Cache entry is put when this function returns, without the
|
229 |
* cache lock held.
|
230 |
*/
|
231 |
goto retry;
|
232 |
} |
233 |
if (r == -XHASH_ERESIZE){
|
234 |
XSEGLOG("Rebuilding internal hash table");
|
235 |
new = xhash_resize(cache->entries, |
236 |
cache->entries->size_shift, |
237 |
cache->entries->limit, NULL);
|
238 |
if (!new)
|
239 |
return NoEntry;
|
240 |
cache->entries = new; |
241 |
goto retry;
|
242 |
} |
243 |
|
244 |
if (r >= 0){ |
245 |
xcache_entry_get(cache, idx); |
246 |
*lru_handler = (xcache_handler)lru; |
247 |
} |
248 |
|
249 |
return ((r < 0) ? NoEntry : h); |
250 |
} |
251 |
|
252 |
xcache_handler xcache_lookup(struct xcache *cache, char *name) |
253 |
{ |
254 |
xqindex xqi = Noneidx; |
255 |
xlock_acquire(&cache->lock, 1);
|
256 |
xqi = __xcache_lookup(cache, name); |
257 |
xlock_release(&cache->lock); |
258 |
return (xcache_handler)xqi;
|
259 |
} |
260 |
|
261 |
xcache_handler xcache_alloc_init(struct xcache *cache, char *name) |
262 |
{ |
263 |
xqindex idx = alloc_cache_entry(cache); |
264 |
if (idx == Noneidx)
|
265 |
return idx;
|
266 |
if (xcache_entry_init(cache, idx, name) < 0){ |
267 |
free_cache_entry(cache, idx); |
268 |
idx = Noneidx; |
269 |
} |
270 |
return (xcache_handler)idx;
|
271 |
} |
272 |
|
273 |
int xcache_init(struct xcache *cache, uint32_t xcache_size, |
274 |
struct xcache_ops *ops, uint32_t flags, void *priv) |
275 |
{ |
276 |
struct xcache_entry *ce;
|
277 |
unsigned long i; |
278 |
xhashidx shift; |
279 |
|
280 |
/*
|
281 |
* Here we choose a proper size for the hash table.
|
282 |
* It must be able to contain at least xcache_size elements, before
|
283 |
* returns an -EXHASH_RESIZE.
|
284 |
* Thus it must be at least 3/2 * xcache_size (OK, the minimum power of
|
285 |
* two that meets this requirement to be exact).
|
286 |
*
|
287 |
* By choosing a xhash size 8 times the minimum required, we drastically
|
288 |
* decrease the number or xhash rebuilts required by xhash for
|
289 |
* perfomance reasons, sacrificing a logical amount of memory.
|
290 |
*
|
291 |
*/
|
292 |
|
293 |
for (shift = 0; 3*xcache_size/2 >= (1UL << shift); shift++) {} |
294 |
shift += 3;
|
295 |
|
296 |
xlock_release(&cache->lock); |
297 |
cache->size = xcache_size; |
298 |
cache->nr_nodes = cache->size * 2;
|
299 |
cache->time = 0;
|
300 |
cache->ops = *ops; |
301 |
cache->priv = priv; |
302 |
cache->flags = flags; |
303 |
|
304 |
if (!xq_alloc_seq(&cache->free_nodes, cache->nr_nodes,
|
305 |
cache->nr_nodes)){ |
306 |
return -1; |
307 |
} |
308 |
cache->entries = xhash_new(shift, xcache_size, STRING); |
309 |
if (!cache->entries){
|
310 |
return -1; |
311 |
} |
312 |
cache->nodes = xtypes_malloc(cache->nr_nodes * sizeof(struct xcache_entry)); |
313 |
if (!cache->nodes){
|
314 |
return -1; |
315 |
} |
316 |
if (flags & XCACHE_LRU_ARRAY){
|
317 |
cache->times = xtypes_malloc(cache->nr_nodes * sizeof(uint64_t));
|
318 |
if (!cache->times){
|
319 |
return -1; |
320 |
} |
321 |
for (i = 0; i < cache->nr_nodes; i++) { |
322 |
cache->times[i] = XCACHE_LRU_MAX; //so lru will never return a this value;
|
323 |
} |
324 |
} |
325 |
|
326 |
if (cache->ops.on_node_init){
|
327 |
for (i = 0; i < cache->nr_nodes; i++) { |
328 |
ce = &cache->nodes[i]; |
329 |
ce->priv = cache->ops.on_node_init(cache->priv); |
330 |
if (!ce->priv)
|
331 |
return -1; |
332 |
} |
333 |
} |
334 |
if (flags & XCACHE_LRU_HEAP){
|
335 |
if (xbinheap_init(&cache->binheap, xcache_size, XBINHEAP_MIN,NULL) < 0){ |
336 |
return -1; |
337 |
} |
338 |
} |
339 |
|
340 |
return 0; |
341 |
} |
342 |
|
343 |
xcache_handler xcache_insert(struct xcache *cache, xcache_handler h)
|
344 |
{ |
345 |
xcache_handler ret, lru = NoEntry; |
346 |
struct xcache_entry *new_ce, *evicted_ce;
|
347 |
int r;
|
348 |
|
349 |
xlock_acquire(&cache->lock, 1);
|
350 |
ret = __xcache_insert(cache, h, &lru); |
351 |
xlock_release(&cache->lock); |
352 |
|
353 |
if (lru != NoEntry){
|
354 |
//assert ret != h
|
355 |
new_ce = &cache->nodes[h]; |
356 |
evicted_ce = &cache->nodes[lru]; |
357 |
r = cache->ops.on_evict(cache->priv, evicted_ce->priv, new_ce->priv); |
358 |
if (!r){
|
359 |
xcache_entry_put(cache, lru); |
360 |
} |
361 |
} |
362 |
|
363 |
return ret;
|
364 |
} |
365 |
|
366 |
int xcache_remove(struct xcache *cache, xcache_handler h) |
367 |
{ |
368 |
int r;
|
369 |
xlock_acquire(&cache->lock, 1);
|
370 |
r = __xcache_remove(cache, h); |
371 |
xlock_release(&cache->lock); |
372 |
if (r >= 0) |
373 |
xcache_entry_put(cache, h); |
374 |
return r;
|
375 |
} |
376 |
|
377 |
int xcache_invalidate(struct xcache *cache, char *name) |
378 |
{ |
379 |
xqindex xqi; |
380 |
int r = 0; |
381 |
xlock_acquire(&cache->lock, 1);
|
382 |
if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) >= 0){ |
383 |
//validate xqi
|
384 |
r =__xcache_remove(cache, xqi); |
385 |
xlock_release(&cache->lock); |
386 |
if (r >= 0) |
387 |
xcache_entry_put(cache, xqi); |
388 |
} else {
|
389 |
xlock_release(&cache->lock); |
390 |
} |
391 |
return r;
|
392 |
} |
393 |
|
394 |
void xcache_put(struct xcache *cache, xcache_handler h) |
395 |
{ |
396 |
xqindex idx = (xqindex)h; |
397 |
//xlock_acquire(&cache->lock, 1);
|
398 |
xcache_entry_put(cache, idx); |
399 |
//xlock_release(&cache->lock);
|
400 |
} |
401 |
|
402 |
/*
|
403 |
* Put all cache entries.
|
404 |
* Does not free cache resources.
|
405 |
* xcache_free should be called by the client when all entries are put.
|
406 |
*/
|
407 |
void xcache_close(struct xcache *cache) |
408 |
{ |
409 |
unsigned long i; |
410 |
struct xcache_entry *ce;
|
411 |
xlock_acquire(&cache->lock, 1);
|
412 |
for (i = 0; i < cache->nr_nodes; i++) { |
413 |
if (cache->flags & XCACHE_LRU_ARRAY){
|
414 |
if (cache->times[i] == XCACHE_LRU_MAX)
|
415 |
continue;
|
416 |
xcache_entry_put(cache, (xqindex)i); |
417 |
} else if (cache->flags & XCACHE_LRU_HEAP) { |
418 |
ce = &cache->nodes[i]; |
419 |
if (ce->h == NoNode)
|
420 |
continue;
|
421 |
xcache_entry_put(cache, (xqindex)i); |
422 |
} |
423 |
} |
424 |
xlock_release(&cache->lock); |
425 |
} |
426 |
|
427 |
void xcache_free(struct xcache *cache) |
428 |
{ |
429 |
if (cache->flags & XCACHE_LRU_HEAP){
|
430 |
xbinheap_free(&cache->binheap); |
431 |
} else if (cache->flags & XCACHE_LRU_ARRAY){ |
432 |
xtypes_free(cache->times); |
433 |
} |
434 |
xtypes_free(cache->nodes); |
435 |
xhash_free(cache->entries); |
436 |
} |
437 |
|
438 |
#ifdef __KERNEL__
|
439 |
#include <linux/module.h> |
440 |
#include <xtypes/xcache_exports.h> |
441 |
#endif
|