Statistics
| Branch: | Tag: | Revision:

root / xseg / xtypes / xcache.c @ c79b0be5

History | View | Annotate | Download (7 kB)

1
/*
2
 * Copyright 2013 GRNET S.A. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or
5
 * without modification, are permitted provided that the following
6
 * conditions are met:
7
 *
8
 *   1. Redistributions of source code must retain the above
9
 *      copyright notice, this list of conditions and the following
10
 *      disclaimer.
11
 *   2. Redistributions in binary form must reproduce the above
12
 *      copyright notice, this list of conditions and the following
13
 *      disclaimer in the documentation and/or other materials
14
 *      provided with the distribution.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17
 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23
 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24
 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
 * POSSIBILITY OF SUCH DAMAGE.
28
 *
29
 * The views and conclusions contained in the software and
30
 * documentation are those of the authors and should not be
31
 * interpreted as representing official policies, either expressed
32
 * or implied, of GRNET S.A.
33
 */
34

    
35
#include <xtypes/xcache.h>
36
//TODO container aware and xptrs
37

    
38
/*
39
static struct xcache_entry * get_cache_entry(struct xcache *cache, xqindex idx)
40
{
41
   return &cache->entries[idx];
42
}
43
*/
44

    
45
static xqindex __get_cache_idx(struct xcache *cache, struct xcache_entry *ce)
46
{
47
        return (xqindex)(ce - cache->nodes);
48
}
49

    
50
static xqindex __alloc_cache_entry(struct xcache *cache)
51
{
52
        return __xq_pop_head(&cache->free_nodes);
53
}
54

    
55
static xqindex alloc_cache_entry(struct xcache *cache)
56
{
57
        return xq_pop_head(&cache->free_nodes, 1);
58
}
59

    
60
static void __free_cache_entry(struct xcache *cache, xqindex idx)
61
{
62
        __xq_append_head(&cache->free_nodes, idx);
63
}
64

    
65
static void free_cache_entry(struct xcache *cache, xqindex idx)
66
{
67
        xq_append_head(&cache->free_nodes, idx, 1);
68
}
69

    
70
static void xcache_entry_get(struct xcache *cache, xqindex idx)
71
{
72
        struct xcache_entry *ce = &cache->nodes[idx];
73
        ce->ref++;
74
        cache->times[idx] = cache->time++;
75
}
76

    
77
static void xcache_entry_put(struct xcache *cache, xqindex idx)
78
{
79
        struct xcache_entry *ce = &cache->nodes[idx];
80
        ce->ref--;
81
        if (!ce->ref){
82
                if (cache->ops.on_put)
83
                        cache->ops.on_put(cache->priv, ce->priv);
84
                free_cache_entry(cache, idx);
85
        }
86
}
87

    
88
static int xcache_entry_init(struct xcache *cache, xqindex idx, char *name)
89
{
90
        struct xcache_entry *ce = &cache->nodes[idx];
91
        xlock_release(&ce->lock);
92
        ce->ref = 0;
93
        strncpy(ce->name, name, XSEG_MAX_TARGETLEN);
94
        ce->name[XSEG_MAX_TARGETLEN] = 0;
95
        if (cache->ops.on_init)
96
                if (cache->ops.on_init(cache->priv, ce->priv) < 0)
97
                        return -1;
98
        xcache_entry_get(cache, idx);
99
        return 0;
100
}
101

    
102

    
103
static xqindex __xcache_lru(struct xcache *cache)
104
{
105
        uint64_t min = -2;
106
        xqindex i, lru = Noneidx;
107
        for (i = 0; i < cache->nr_nodes; i++) {
108
                if (min > cache->times[i]){
109
                        min = cache->times[i];
110
                        lru = i;
111
                }
112
        }
113
        return lru;
114
}
115

    
116
static xqindex __xcache_lookup(struct xcache *cache, char *name)
117
{
118
        xqindex xqi = Noneidx;
119
        if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) < 0){
120
                return Noneidx;
121
        }
122
        xcache_entry_get(cache, xqi);
123
        return xqi;
124
}
125

    
126
static int __xcache_remove(struct xcache *cache, xcache_handler h)
127
{
128
        int r;
129
        xqindex idx = (xqindex)h;
130
        struct xcache_entry *ce = &cache->nodes[idx];
131

    
132
        r = xhash_delete(cache->entries, (xhashidx)ce->name);
133
        if (r < 0)
134
                return r;
135
        //put here, so the last one holding a ref to the entry will
136
        //free the node
137
        cache->times[idx] = -1;
138
        xcache_entry_put(cache, idx);
139
        return 0;
140
}
141

    
142
static int __xcache_insert(struct xcache *cache, xcache_handler h)
143
{
144
        int r;
145
        struct xcache_entry *ce;
146
        xqindex lru, idx;
147
        idx = (xqindex)h;
148
        ce = &cache->nodes[idx];
149
        //TODO lookup first to ensure we don't overwrite entries
150
retry:
151
        r = xhash_insert(cache->entries, (xhashidx)ce->name, idx);
152
        if (r == -XHASH_ENOSPC){
153
                lru = __xcache_lru(cache);
154
                //assert lru != Noneidx
155
                //validate lru
156
                if (__xcache_remove(cache, lru) < 0)
157
                        return -1;
158
                goto retry;
159
        }
160
        if (r == -XHASH_ERESIZE){
161
                xhash_t *new = xhash_resize(cache->entries,
162
                                cache->entries->size_shift,
163
                                cache->entries->limit, NULL);
164
                if (!new)
165
                        return -1;
166
                cache->entries = new;
167
                goto retry;
168
        }
169

    
170
        return r;
171
}
172

    
173
xcache_handler xcache_lookup(struct xcache *cache, char *name)
174
{
175
        xqindex xqi = Noneidx;
176
        xlock_acquire(&cache->lock, 1);
177
        xqi = __xcache_lookup(cache, name);
178
        xlock_release(&cache->lock);
179
        return (xcache_handler)xqi;
180
}
181

    
182
xcache_handler xcache_alloc_init(struct xcache *cache, char *name)
183
{
184
        xqindex idx = alloc_cache_entry(cache);
185
        if (idx == Noneidx)
186
                return idx;
187
        if (xcache_entry_init(cache, idx, name) < 0){
188
                free_cache_entry(cache, idx);
189
                idx = Noneidx;
190
        }
191
        return (xcache_handler)idx;
192
}
193

    
194
int xcache_init(struct xcache *cache, uint32_t xcache_size,
195
                struct xcache_ops *ops, void *priv)
196
{
197
        struct xcache_entry *ce;
198
        unsigned long i;
199
        xhashidx shift;
200

    
201
        /*TODO: Document why this is needed*/
202
        for (shift = 0; xcache_size > (1UL << shift); shift++) {}
203
        shift += 3;
204

    
205
        xlock_release(&cache->lock);
206
        cache->size = xcache_size;
207
        cache->nr_nodes = cache->size * 2;
208
        cache->time = 0;
209
        cache->ops = *ops;
210
        cache->priv = priv;
211

    
212
        if (!xq_alloc_seq(&cache->free_nodes, cache->nr_nodes,
213
                                cache->nr_nodes)){
214
                return -1;
215
        }
216
        cache->entries = xhash_new(shift, xcache_size, STRING);
217
        if (!cache->entries){
218
                return -1;
219
        }
220
        cache->nodes = xtypes_malloc(cache->nr_nodes *
221
                                                                        sizeof(struct xcache_entry));
222
        if (!cache->nodes){
223
                return -1;
224
        }
225
        cache->times = xtypes_malloc(cache->nr_nodes * sizeof(uint64_t));
226
        if (!cache->times){
227
                return -1;
228
        }
229
        for (i = 0; i < cache->nr_nodes; i++) {
230
                cache->times[i] = -1; //so lru will never return a -1 value;
231
        }
232

    
233
        if (cache->ops.on_node_init){
234
                for (i = 0; i < cache->nr_nodes; i++) {
235
                        ce = &cache->nodes[i];
236
                        ce->priv = cache->ops.on_node_init(cache->priv);
237
                }
238
        }
239

    
240
        return 0;
241
}
242

    
243
int xcache_insert(struct xcache *cache, xcache_handler h)
244
{
245
        int r;
246
        xlock_acquire(&cache->lock, 1);
247
        r = __xcache_insert(cache, h);
248
        xlock_release(&cache->lock);
249
        return r;
250
}
251

    
252
int xcache_remove(struct xcache *cache, xcache_handler h)
253
{
254
        int r;
255
        xlock_acquire(&cache->lock, 1);
256
        r = __xcache_remove(cache, h);
257
        xlock_release(&cache->lock);
258
        return r;
259
}
260

    
261
int xcache_invalidate(struct xcache *cache, char *name)
262
{
263
        xqindex xqi;
264
        int r = 0;
265
        xlock_acquire(&cache->lock, 1);
266
        if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) >= 0){
267
                //validate xqi
268
                r =__xcache_remove(cache, xqi);
269
        }
270
        xlock_release(&cache->lock);
271
        return r;
272
}
273

    
274
void xcache_put(struct xcache *cache, xcache_handler h)
275
{
276
        xqindex idx = (xqindex)h;
277
        xcache_entry_put(cache, h);
278
}
279

    
280
void xcache_close(struct xcache *cache)
281
{
282
        return;
283
}
284

    
285
#ifdef __KERNEL__
286
#include <linux/module.h>
287
#include <xtypes/xcache_exports.h>
288
#endif