Revision 246214a1 xseg/xtypes/xcache.c

b/xseg/xtypes/xcache.c
69 69

  
70 70
static void __update_access_time(struct xcache *cache, xqindex idx)
71 71
{
72
	cache->times[idx] = __sync_add_and_fetch(&cache->time, 1);
73
	XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
72
//	cache->times[idx] = __sync_add_and_fetch(&cache->time, 1);
73
	cache->times[idx] = cache->time++;
74
	//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
74 75
}
75 76

  
77
/*
78
 * Proper precautions should be taken, when getting a cache entry.
79
 * There is a race with removal and finding lru cache entry.
80
 */
76 81
static void xcache_entry_get(struct xcache *cache, xqindex idx)
77 82
{
78 83
	struct xcache_entry *ce = &cache->nodes[idx];
79
	ce->ref++;
84
	__sync_add_and_fetch(&ce->ref, 1);
80 85
	__update_access_time(cache, idx);
81 86
}
82 87

  
88
/* put should be called with no locking */
83 89
static void xcache_entry_put(struct xcache *cache, xqindex idx)
84 90
{
85 91
	struct xcache_entry *ce = &cache->nodes[idx];
86
	ce->ref--;
87
	if (!ce->ref){
92
	unsigned long ref = __sync_sub_and_fetch(&ce->ref, 1);
93
	if (!ref){
88 94
		if (cache->ops.on_put)
89 95
			cache->ops.on_put(cache->priv, ce->priv);
90 96
		free_cache_entry(cache, idx);
......
110 116
	uint64_t min = -2;
111 117
	xqindex i, lru = Noneidx;
112 118
	for (i = 0; i < cache->nr_nodes; i++) {
113
		XSEGLOG("cache->times[%llu] = %llu", i, cache->times[i]);
119
		//XSEGLOG("cache->times[%llu] = %llu", i, cache->times[i]);
114 120
		if (min > cache->times[i]){
115 121
			min = cache->times[i];
116 122
			lru = i;
117 123
		}
118 124
	}
119
	XSEGLOG("Found lru cache->times[%llu] = %llu", lru, cache->times[lru]);
125
	//XSEGLOG("Found lru cache->times[%llu] = %llu", lru, cache->times[lru]);
120 126
	return lru;
121 127
}
122 128

  
......
130 136
	return xqi;
131 137
}
132 138

  
139
/* after a succesfull call, the handler must be put */
133 140
static int __xcache_remove(struct xcache *cache, xcache_handler h)
134 141
{
135 142
	int r;
......
138 145

  
139 146
	r = xhash_delete(cache->entries, (xhashidx)ce->name);
140 147
	if (r < 0){
141
		XSEGLOG("Couldn't delete cache entry(h: %llu, cache->nodes[h].priv: %p",
142
				h, cache->nodes[idx].priv);
148
		//XSEGLOG("Couldn't delete cache entry(h: %llu, cache->nodes[h].priv: %p",
149
		//		h, cache->nodes[idx].priv);
143 150
		return r;
144 151
	}
145
	//put here, so the last one holding a ref to the entry will
146
	//free the node
147 152
	cache->times[idx] = -1;
148
	XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
149
	xcache_entry_put(cache, idx);
153
	//XSEGLOG("cache->times[%llu] = %llu", idx, cache->times[idx]);
150 154
	return 0;
151 155
}
152 156

  
153
static int __xcache_insert(struct xcache *cache, xcache_handler h)
157
/*
158
 * Inserts a new entry to cache or returns a diffrent entry with the same key
159
 * (name).
160
 * In either case, ref count on the returned entry, gets increased
161
 *
162
 */
163
static xcache_handler __xcache_insert(struct xcache *cache, xcache_handler h)
154 164
{
155 165
	int r;
156 166
	struct xcache_entry *ce;
157
	xqindex lru, idx;
167
	xqindex lru, idx, xqi;
158 168
	idx = (xqindex)h;
159 169
	ce = &cache->nodes[idx];
160
	//TODO lookup first to ensure we don't overwrite entries
170

  
171
	/* lookup first to ensure we don't overwrite entries */
172
	if (xhash_lookup(cache->entries, (xhashidx)ce->name, &xqi) >= 0){
173
		xcache_entry_get(cache, xqi);
174
		return xqi;
175
	}
176

  
177
	/* insert new entry to cache */
161 178
retry:
162 179
	r = xhash_insert(cache->entries, (xhashidx)ce->name, idx);
163 180
	if (r == -XHASH_ENOSPC){
......
165 182
		//assert lru != Noneidx
166 183
		//validate lru
167 184
		if (__xcache_remove(cache, lru) < 0)
168
			return -1;
185
			return NoEntry;
186
		xcache_entry_put(cache, lru);
169 187
		goto retry;
170 188
	}
171 189
	if (r == -XHASH_ERESIZE){
190
		XSEGLOG("Rebuilding internal hash table");
172 191
		xhash_t *new = xhash_resize(cache->entries,
173 192
				cache->entries->size_shift,
174 193
				cache->entries->limit, NULL);
175 194
		if (!new)
176
			return -1;
195
			return NoEntry;
177 196
		cache->entries = new;
178 197
		goto retry;
179 198
	}
199

  
180 200
	if (r >= 0){
181
		//get cache entry here.
182
		//will be put by xcache_remove
183 201
		xcache_entry_get(cache, idx);
184 202
	}
185 203

  
186
	return r;
204
	return ((r < 0) ? NoEntry : h);
187 205
}
188 206

  
189 207
xcache_handler xcache_lookup(struct xcache *cache, char *name)
......
219 237
	 * It must be able to contain at least xcache_size elements, before
220 238
	 * returns an -EXHASH_RESIZE.
221 239
	 * Thus it must be at least 3/2 * xcache_size (OK, the minimum power of
222
	 * two that meats this requirement to be exact).
240
	 * two that meets this requirement to be exact).
223 241
	 *
224 242
	 * By choosing a xhash size 8 times the minimum required, we drastically
225 243
	 * decrease the number or xhash rebuilts required by xhash for
......
269 287
	return 0;
270 288
}
271 289

  
272
int xcache_insert(struct xcache *cache, xcache_handler h)
290
xcache_handler xcache_insert(struct xcache *cache, xcache_handler h)
273 291
{
274
	int r;
292
	xcache_handler ret;
275 293
	xlock_acquire(&cache->lock, 1);
276
	r = __xcache_insert(cache, h);
294
	ret = __xcache_insert(cache, h);
277 295
	xlock_release(&cache->lock);
278
	return r;
296
	return ret;
279 297
}
280 298

  
281 299
int xcache_remove(struct xcache *cache, xcache_handler h)
......
284 302
	xlock_acquire(&cache->lock, 1);
285 303
	r = __xcache_remove(cache, h);
286 304
	xlock_release(&cache->lock);
305
	if (r >= 0)
306
		xcache_entry_put(cache, h);
287 307
	return r;
288 308
}
289 309

  
......
295 315
	if (xhash_lookup(cache->entries, (xhashidx)name, &xqi) >= 0){
296 316
		//validate xqi
297 317
		r =__xcache_remove(cache, xqi);
318
		xlock_release(&cache->lock);
319
		if (r >= 0)
320
			xcache_entry_put(cache, xqi);
321
	} else {
322
		xlock_release(&cache->lock);
298 323
	}
299
	xlock_release(&cache->lock);
300 324
	return r;
301 325
}
302 326

  
303 327
void xcache_put(struct xcache *cache, xcache_handler h)
304 328
{
305 329
	xqindex idx = (xqindex)h;
306
	xlock_acquire(&cache->lock, 1);
330
	//xlock_acquire(&cache->lock, 1);
307 331
	xcache_entry_put(cache, h);
308
	xlock_release(&cache->lock);
332
	//xlock_release(&cache->lock);
309 333
}
310 334

  
335
/*
336
 * Put all cache entries.
337
 * Does not free cache resources.
338
 * xcache_free should be implemented for this purpose, to be called by the
339
 * client when all entries are put.
340
 */
311 341
void xcache_close(struct xcache *cache)
312 342
{
313
	return;
343
	unsigned long i;
344
	xlock_acquire(&cache->lock, 1);
345
	for (i = 0; i < cache->nr_nodes; i++) {
346
		if (cache->times[i] == -1)
347
			continue;
348
		xcache_entry_put(cache, (xqindex)i);
349
	}
350
	xlock_release(&cache->lock);
314 351
}
315 352

  
316 353
#ifdef __KERNEL__

Also available in: Unified diff