Revision 5be18103
b/xseg/peers/user/mt-mapperd.c | ||
---|---|---|
433 | 433 |
r = xhash_insert(mapper->hashmaps, (xhashidx) map->volume, (xhashidx) map); |
434 | 434 |
while (r == -XHASH_ERESIZE) { |
435 | 435 |
xhashidx shift = xhash_grow_size_shift(mapper->hashmaps); |
436 |
xhash_t *new_hashmap = xhash_resize(mapper->hashmaps, shift, NULL); |
|
436 |
xhash_t *new_hashmap = xhash_resize(mapper->hashmaps, shift, 0, NULL);
|
|
437 | 437 |
if (!new_hashmap){ |
438 | 438 |
XSEGLOG2(&lc, E, "Cannot grow mapper->hashmaps to sizeshift %llu", |
439 | 439 |
(unsigned long long) shift); |
... | ... | |
455 | 455 |
r = xhash_delete(mapper->hashmaps, (xhashidx) map->volume); |
456 | 456 |
while (r == -XHASH_ERESIZE) { |
457 | 457 |
xhashidx shift = xhash_shrink_size_shift(mapper->hashmaps); |
458 |
xhash_t *new_hashmap = xhash_resize(mapper->hashmaps, shift, NULL); |
|
458 |
xhash_t *new_hashmap = xhash_resize(mapper->hashmaps, shift, 0, NULL);
|
|
459 | 459 |
if (!new_hashmap){ |
460 | 460 |
XSEGLOG2(&lc, E, "Cannot shrink mapper->hashmaps to sizeshift %llu", |
461 | 461 |
(unsigned long long) shift); |
... | ... | |
590 | 590 |
int r = xhash_insert(map->objects, mn->objectidx, (xhashidx) mn); |
591 | 591 |
if (r == -XHASH_ERESIZE) { |
592 | 592 |
unsigned long shift = xhash_grow_size_shift(map->objects); |
593 |
map->objects = xhash_resize(map->objects, shift, NULL); |
|
593 |
map->objects = xhash_resize(map->objects, shift, 0, NULL);
|
|
594 | 594 |
if (!map->objects) |
595 | 595 |
return -1; |
596 | 596 |
r = xhash_insert(map->objects, mn->objectidx, (xhashidx) mn); |
... | ... | |
1283 | 1283 |
r = xhash_insert(mio->copyups_nodes, (xhashidx) req, (xhashidx) mn); |
1284 | 1284 |
if (r == -XHASH_ERESIZE) { |
1285 | 1285 |
xhashidx shift = xhash_grow_size_shift(mio->copyups_nodes); |
1286 |
xhash_t *new_hashmap = xhash_resize(mio->copyups_nodes, shift, NULL); |
|
1286 |
xhash_t *new_hashmap = xhash_resize(mio->copyups_nodes, shift, 0, NULL);
|
|
1287 | 1287 |
if (!new_hashmap) |
1288 | 1288 |
goto out; |
1289 | 1289 |
mio->copyups_nodes = new_hashmap; |
... | ... | |
1299 | 1299 |
r = xhash_delete(mio->copyups_nodes, (xhashidx) req); |
1300 | 1300 |
if (r == -XHASH_ERESIZE) { |
1301 | 1301 |
xhashidx shift = xhash_shrink_size_shift(mio->copyups_nodes); |
1302 |
xhash_t *new_hashmap = xhash_resize(mio->copyups_nodes, shift, NULL); |
|
1302 |
xhash_t *new_hashmap = xhash_resize(mio->copyups_nodes, shift, 0, NULL);
|
|
1303 | 1303 |
if (!new_hashmap) |
1304 | 1304 |
goto out; |
1305 | 1305 |
mio->copyups_nodes = new_hashmap; |
... | ... | |
1679 | 1679 |
m->version = 0; /* version 0 should be pithos maps */ |
1680 | 1680 |
} |
1681 | 1681 |
m->flags = 0; |
1682 |
m->objects = xhash_new(3, INTEGER); |
|
1682 |
m->objects = xhash_new(3, 0, INTEGER);
|
|
1683 | 1683 |
if (!m->objects){ |
1684 | 1684 |
XSEGLOG2(&lc, E, "Cannot allocate object hashmap for map %s", |
1685 | 1685 |
m->volume); |
... | ... | |
2841 | 2841 |
struct mapperd *mapperd = malloc(sizeof(struct mapperd)); |
2842 | 2842 |
peer->priv = mapperd; |
2843 | 2843 |
mapper = mapperd; |
2844 |
mapper->hashmaps = xhash_new(3, STRING); |
|
2844 |
mapper->hashmaps = xhash_new(3, 0, STRING);
|
|
2845 | 2845 |
|
2846 | 2846 |
for (i = 0; i < peer->nr_ops; i++) { |
2847 | 2847 |
struct mapper_io *mio = malloc(sizeof(struct mapper_io)); |
2848 |
mio->copyups_nodes = xhash_new(3, INTEGER); |
|
2848 |
mio->copyups_nodes = xhash_new(3, 0, INTEGER);
|
|
2849 | 2849 |
mio->copyups = 0; |
2850 | 2850 |
mio->err = 0; |
2851 | 2851 |
mio->active = 0; |
b/xseg/peers/user/mt-vlmcd.c | ||
---|---|---|
181 | 181 |
r = xhash_insert(vlmc->volumes, (xhashidx) vi->name, (xhashidx) vi); |
182 | 182 |
while (r == -XHASH_ERESIZE) { |
183 | 183 |
xhashidx shift = xhash_grow_size_shift(vlmc->volumes); |
184 |
xhash_t *new_hashmap = xhash_resize(vlmc->volumes, shift, NULL); |
|
184 |
xhash_t *new_hashmap = xhash_resize(vlmc->volumes, shift, 0, NULL);
|
|
185 | 185 |
if (!new_hashmap){ |
186 | 186 |
XSEGLOG2(&lc, E, "Cannot grow vlmc->volumes to sizeshift %llu", |
187 | 187 |
(unsigned long long) shift); |
... | ... | |
206 | 206 |
r = xhash_delete(vlmc->volumes, (xhashidx) vi->name); |
207 | 207 |
while (r == -XHASH_ERESIZE) { |
208 | 208 |
xhashidx shift = xhash_shrink_size_shift(vlmc->volumes); |
209 |
xhash_t *new_hashmap = xhash_resize(vlmc->volumes, shift, NULL); |
|
209 |
xhash_t *new_hashmap = xhash_resize(vlmc->volumes, shift, 0, NULL);
|
|
210 | 210 |
if (!new_hashmap){ |
211 | 211 |
XSEGLOG2(&lc, E, "Cannot shrink vlmc->volumes to sizeshift %llu", |
212 | 212 |
(unsigned long long) shift); |
... | ... | |
800 | 800 |
} |
801 | 801 |
peer->priv = (void *) vlmc; |
802 | 802 |
|
803 |
vlmc->volumes = xhash_new(3, STRING); |
|
803 |
vlmc->volumes = xhash_new(3, 0, STRING);
|
|
804 | 804 |
if (!vlmc->volumes){ |
805 | 805 |
XSEGLOG2(&lc, E, "Cannot alloc vlmc"); |
806 | 806 |
return -1; |
b/xseg/xseg/xseg.c | ||
---|---|---|
778 | 778 |
priv->segment_type = *segtype; |
779 | 779 |
priv->peer_type = *peertype; |
780 | 780 |
priv->wakeup = wakeup; |
781 |
priv->req_data = xhash_new(3, INTEGER); //FIXME should be relative to XSEG_DEF_REQS |
|
781 |
priv->req_data = xhash_new(3, 0, INTEGER); //FIXME should be relative to XSEG_DEF_REQS
|
|
782 | 782 |
if (!priv->req_data) |
783 | 783 |
goto err_priv; |
784 | 784 |
xlock_release(&priv->reqdatalock); |
... | ... | |
1546 | 1546 |
req_data = xseg->priv->req_data; |
1547 | 1547 |
r = xhash_insert(req_data, (xhashidx) xreq, (xhashidx) data); |
1548 | 1548 |
if (r == -XHASH_ERESIZE) { |
1549 |
req_data = xhash_resize(req_data, xhash_grow_size_shift(req_data), NULL); |
|
1549 |
req_data = xhash_resize(req_data, xhash_grow_size_shift(req_data), 0, NULL);
|
|
1550 | 1550 |
if (req_data) { |
1551 | 1551 |
xseg->priv->req_data = req_data; |
1552 | 1552 |
r = xhash_insert(req_data, (xhashidx) xreq, (xhashidx) data); |
... | ... | |
1573 | 1573 |
if (r >= 0) { |
1574 | 1574 |
r = xhash_delete(req_data, (xhashidx) xreq); |
1575 | 1575 |
if (r == -XHASH_ERESIZE) { |
1576 |
req_data = xhash_resize(req_data, xhash_shrink_size_shift(req_data), NULL); |
|
1576 |
req_data = xhash_resize(req_data, xhash_shrink_size_shift(req_data), 0, NULL);
|
|
1577 | 1577 |
if (req_data){ |
1578 | 1578 |
xseg->priv->req_data = req_data; |
1579 | 1579 |
r = xhash_delete(req_data, (xhashidx) xreq); |
b/xseg/xtypes/xhash.c | ||
---|---|---|
222 | 222 |
*/ |
223 | 223 |
|
224 | 224 |
void |
225 |
xhash_init__(xhash_t *xhash, xhashidx size_shift, xhashidx minsize_shift,
|
|
226 |
enum xhash_type type, bool vals) |
|
225 |
xhash_init__(xhash_t *xhash, xhashidx size_shift, xhashidx minsize_shift, |
|
226 |
xhashidx limit, enum xhash_type type, bool vals)
|
|
227 | 227 |
{ |
228 | 228 |
xhashidx nr_items = 1UL << size_shift; |
229 | 229 |
xhashidx *kvs = (xhashidx *) ((char *) xhash + sizeof(struct xhash)); |
... | ... | |
231 | 231 |
|
232 | 232 |
XPTRSET(&xhash->kvs, kvs); |
233 | 233 |
|
234 |
|
|
234 |
|
|
235 | 235 |
if (!vals) { |
236 | 236 |
for (i=0; i < nr_items; i++) |
237 | 237 |
kvs[i] = UNUSED; |
... | ... | |
250 | 250 |
xhash->dummies = xhash->used = 0; |
251 | 251 |
xhash->size_shift = size_shift; |
252 | 252 |
xhash->minsize_shift = minsize_shift; |
253 |
xhash->limit = limit; |
|
253 | 254 |
xhash->type = type; |
254 | 255 |
|
255 | 256 |
ZEROSTAT(xhash->inserts); |
... | ... | |
269 | 270 |
|
270 | 271 |
|
271 | 272 |
xhash_t * |
272 |
xhash_new__(xhashidx size_shift, xhashidx minsize_shift, |
|
273 |
xhash_new__(xhashidx size_shift, xhashidx minsize_shift, xhashidx limit,
|
|
273 | 274 |
enum xhash_type type, bool vals) |
274 | 275 |
{ |
275 | 276 |
struct xhash *xhash; |
... | ... | |
279 | 280 |
return NULL; |
280 | 281 |
} |
281 | 282 |
|
282 |
xhash_init__(xhash, size_shift, minsize_shift, type, vals); |
|
283 |
xhash_init__(xhash, size_shift, minsize_shift, limit, type, vals);
|
|
283 | 284 |
|
284 | 285 |
return xhash; |
285 | 286 |
} |
286 | 287 |
|
287 | 288 |
|
288 | 289 |
xhash_t * |
289 |
xhash_resize__(struct xhash *xhash, xhashidx new_size_shift, bool vals) |
|
290 |
xhash_resize__(struct xhash *xhash, xhashidx new_size_shift, xhashidx new_limit, |
|
291 |
bool vals) |
|
290 | 292 |
{ |
291 |
return xhash_new__(new_size_shift, xhash->minsize_shift, xhash->type, vals); |
|
293 |
return xhash_new__(new_size_shift, xhash->minsize_shift, new_limit, |
|
294 |
xhash->type, vals); |
|
292 | 295 |
} |
293 | 296 |
|
294 | 297 |
int |
... | ... | |
382 | 385 |
} |
383 | 386 |
|
384 | 387 |
xhash_t * |
385 |
xhash_new(xhashidx minsize_shift, enum xhash_type type) |
|
388 |
xhash_new(xhashidx minsize_shift, xhashidx limit, enum xhash_type type)
|
|
386 | 389 |
{ |
387 |
return xhash_new__(minsize_shift, minsize_shift, type, true); |
|
390 |
return xhash_new__(minsize_shift, minsize_shift, limit, type, true);
|
|
388 | 391 |
} |
389 | 392 |
|
390 | 393 |
void xhash_free(struct xhash *xhash) |
... | ... | |
392 | 395 |
xtypes_free(xhash); |
393 | 396 |
} |
394 | 397 |
|
395 |
void xhash_init(struct xhash *xhash, xhashidx minsize_shift, enum xhash_type type) |
|
398 |
void xhash_init(struct xhash *xhash, xhashidx minsize_shift, xhashidx limit, |
|
399 |
enum xhash_type type) |
|
396 | 400 |
{ |
397 |
xhash_init__(xhash, minsize_shift, minsize_shift, type, true); |
|
401 |
xhash_init__(xhash, minsize_shift, minsize_shift, limit, type, true);
|
|
398 | 402 |
} |
399 | 403 |
|
400 | 404 |
/* |
... | ... | |
492 | 496 |
|
493 | 497 |
int xhash_insert(struct xhash *xhash, xhashidx key, xhashidx val) |
494 | 498 |
{ |
499 |
if (xhash->limit && xhash->used >= xhash->limit) |
|
500 |
return -XHASH_ENOSPC; |
|
495 | 501 |
if (grow_check(xhash)) |
496 | 502 |
return -XHASH_ERESIZE; |
497 | 503 |
xhash_insert__(xhash, key, val); |
... | ... | |
517 | 523 |
} |
518 | 524 |
|
519 | 525 |
xhash_t * |
520 |
xhash_resize(xhash_t *xhash, xhashidx new_size_shift, xhash_t *new) |
|
526 |
xhash_resize(xhash_t *xhash, xhashidx new_size_shift, xhashidx new_limit, |
|
527 |
xhash_t *new) |
|
521 | 528 |
{ |
522 | 529 |
//XSEGLOG("Resizing xhash from %llu to %llu", xhash->size_shift, new_size_shift); |
523 | 530 |
xhashidx i; |
524 | 531 |
int f = !!new; |
525 | 532 |
if (!f) |
526 |
new = xhash_new__(new_size_shift, xhash->minsize_shift, xhash->type, true); |
|
533 |
new = xhash_new__(new_size_shift, xhash->minsize_shift, new_limit, |
|
534 |
xhash->type, true); |
|
527 | 535 |
else |
528 |
xhash_init__(new, new_size_shift, xhash->minsize_shift, xhash->type, true); |
|
536 |
xhash_init__(new, new_size_shift, xhash->minsize_shift, new_limit, |
|
537 |
xhash->type, true); |
|
529 | 538 |
|
530 | 539 |
if (!new) |
531 | 540 |
return NULL; |
b/xseg/xtypes/xhash.h | ||
---|---|---|
53 | 53 |
|
54 | 54 |
#define XHASH_ERESIZE 1 |
55 | 55 |
#define XHASH_EEXIST 2 |
56 |
#define XHASH_ENOSPC 3 |
|
56 | 57 |
|
57 | 58 |
enum xhash_type { |
58 | 59 |
INTEGER = 0, /* signed/unsigned integers, pointers, etc */ |
... | ... | |
67 | 68 |
xhashidx used; |
68 | 69 |
xhashidx dummies; |
69 | 70 |
xhashidx defval; |
71 |
xhashidx limit; |
|
70 | 72 |
enum xhash_type type; |
71 | 73 |
#ifdef PHASH_STATS |
72 | 74 |
xhashidx inserts; |
... | ... | |
131 | 133 |
xhashidx xhash_shrink_size_shift(xhash_t *xhash); |
132 | 134 |
ssize_t xhash_get_alloc_size(xhashidx size_shift); |
133 | 135 |
|
134 |
xhash_t *xhash_new(xhashidx minsize_shift, enum xhash_type type); |
|
136 |
xhash_t *xhash_new(xhashidx minsize_shift, xhashidx limit, enum xhash_type type);
|
|
135 | 137 |
void xhash_free(xhash_t *xhash); // pairs with _new() |
136 |
void xhash_init(struct xhash *xhash, xhashidx minsize_shift, enum xhash_type type); |
|
138 |
void xhash_init(struct xhash *xhash, xhashidx minsize_shift, xhashidx limit, |
|
139 |
enum xhash_type type); |
|
137 | 140 |
|
138 |
xhash_t * xhash_resize(xhash_t *xhash, xhashidx new_size_shift, xhash_t *newxhash); |
|
141 |
xhash_t * xhash_resize(xhash_t *xhash, xhashidx new_size_shift, |
|
142 |
xhashidx newlimit, xhash_t *newxhash); |
|
139 | 143 |
int xhash_insert(xhash_t *xhash, xhashidx key, xhashidx val); |
140 | 144 |
int xhash_update(xhash_t *xhash, xhashidx key, xhashidx val); |
141 | 145 |
int xhash_freql_update(xhash_t *xhash, xhashidx key, xhashidx val); |
b/xseg/xtypes/xhash_test.c | ||
---|---|---|
48 | 48 |
perror("malloc"); |
49 | 49 |
exit(1); |
50 | 50 |
} |
51 |
xhash_resize(h, sizeshift, new); |
|
51 |
xhash_resize(h, sizeshift, 0, new);
|
|
52 | 52 |
free(h); |
53 | 53 |
return new; |
54 | 54 |
} |
... | ... | |
100 | 100 |
perror("malloc"); |
101 | 101 |
exit(1); |
102 | 102 |
} |
103 |
xhash_init(h, 2, STRING); |
|
103 |
xhash_init(h, 2, 0, STRING);
|
|
104 | 104 |
for (i = 10; i < loops; i++) { |
105 | 105 |
int ret; |
106 | 106 |
xhashidx r; |
... | ... | |
164 | 164 |
perror("malloc"); |
165 | 165 |
exit(1); |
166 | 166 |
} |
167 |
xhash_init(h, 3, STRING); |
|
167 |
xhash_init(h, 3, 0, STRING);
|
|
168 | 168 |
for (i = 0; i < 4; i++) { |
169 | 169 |
int ret; |
170 | 170 |
xhashidx r; |
... | ... | |
210 | 210 |
return 0; |
211 | 211 |
} |
212 | 212 |
|
213 |
//TODO add test for limit |
|
213 | 214 |
int main(int argc, char **argv) { |
214 | 215 |
xhashidx loops, i, v; |
215 | 216 |
struct xhash *h; |
... | ... | |
226 | 227 |
perror("malloc"); |
227 | 228 |
exit(1); |
228 | 229 |
} |
229 |
xhash_init(h, 2, INTEGER); |
|
230 |
xhash_init(h, 2, 0, INTEGER);
|
|
230 | 231 |
for (i = 10; i < loops; i++) { |
231 | 232 |
int ret; |
232 | 233 |
xhashidx r; |
b/xseg/xtypes/xobj.c | ||
---|---|---|
60 | 60 |
*/ |
61 | 61 |
//bytes = xheap_get_chunk_size(xhash); |
62 | 62 |
|
63 |
xhash_init(xhash, 3, INTEGER); |
|
63 |
xhash_init(xhash, 3, 0, INTEGER);
|
|
64 | 64 |
obj_h->allocated = XPTR_MAKE(xhash, container); |
65 | 65 |
obj_h->list = 0; |
66 | 66 |
obj_h->flags = 0; |
... | ... | |
118 | 118 |
new = xheap_allocate(heap, size); |
119 | 119 |
if (!new) |
120 | 120 |
goto err; |
121 |
xhash_resize(allocated, sizeshift, new); |
|
121 |
xhash_resize(allocated, sizeshift, 0, new);
|
|
122 | 122 |
xheap_free(allocated); |
123 | 123 |
allocated = new; |
124 | 124 |
obj_h->allocated = XPTR_MAKE(allocated, container); |
Also available in: Unified diff