2 #include <xseg/domain.h>
6 #define NULL ((void *)0)
9 #define XSEG_NR_TYPES 16
10 #define XSEG_NR_PEER_TYPES 64
11 #define XSEG_MIN_PAGE_SIZE 4096
13 static struct xseg_type *__types[XSEG_NR_TYPES];
14 static unsigned int __nr_types;
15 static struct xseg_peer *__peer_types[XSEG_NR_PEER_TYPES];
16 static unsigned int __nr_peer_types;
18 static void __lock_segment(struct xseg *xseg)
20 volatile uint64_t *flags;
21 flags = &xseg->shared->flags;
22 while (__sync_fetch_and_or(flags, XSEG_F_LOCK));
25 static void __unlock_segment(struct xseg *xseg)
27 volatile uint64_t *flags;
28 flags = &xseg->shared->flags;
29 __sync_fetch_and_and(flags, ~XSEG_F_LOCK);
32 static struct xseg_type *__find_type(const char *name, long *index)
35 for (i = 0; (*index = i) < __nr_types; i++)
36 if (!strncmp(__types[i]->name, name, XSEG_TNAMESIZE))
41 static struct xseg_peer *__find_peer_type(const char *name, int64_t *index)
44 for (i = 0; (*index = i) < __nr_peer_types; i++) {
45 if (!strncmp(__peer_types[i]->name, name, XSEG_TNAMESIZE))
46 return __peer_types[i];
51 void xseg_report_peer_types(void)
54 XSEGLOG("total %u peer types:\n", __nr_peer_types);
55 for (i = 0; i < __nr_peer_types; i++)
56 XSEGLOG("%ld: '%s'\n", i, __peer_types[i]->name);
59 static struct xseg_type *__find_or_load_type(const char *name)
62 struct xseg_type *type = __find_type(name, &i);
67 return __find_type(name, &i);
70 static struct xseg_peer *__find_or_load_peer_type(const char *name)
73 struct xseg_peer *peer_type = __find_peer_type(name, &i);
78 return __find_peer_type(name, &i);
81 static struct xseg_peer *__get_peer_type(struct xseg *xseg, uint32_t serial)
84 struct xseg_peer *type;
85 struct xseg_private *priv = xseg->priv;
86 char (*shared_peer_types)[XSEG_TNAMESIZE];
88 if (serial >= xseg->max_peer_types) {
89 XSEGLOG("invalid peer type serial %d >= %d\n",
90 serial, xseg->max_peer_types);
94 type = priv->peer_types[serial];
98 /* xseg->shared->peer_types is an append-only array,
99 * therefore this should be safe
100 * without either locking or string copying. */
101 shared_peer_types = XSEG_TAKE_PTR(xseg->shared->peer_types, xseg->segment);
102 name = shared_peer_types[serial];
104 XSEGLOG("nonexistent peer type serial %d\n", serial);
108 type = __find_or_load_peer_type(name);
110 XSEGLOG("could not find driver for peer type %d [%s]\n",
113 priv->peer_types[serial] = type;
117 static inline int __validate_port(struct xseg *xseg, uint32_t portno)
119 return portno < xseg->config.nr_ports;
122 static inline int __validate_ptr(struct xseg *xseg, xptr ptr)
124 return ptr < xseg->segment_size;
127 /* type:name:nr_ports:nr_requests:request_size:extra_size:page_shift */
129 #define TOK(s, sp, def) \
146 static unsigned long strul(char *s)
150 unsigned char c = *s - '0';
160 static char *strncopy(char *dest, const char *src, uint32_t n)
164 for (i = 0; i < n; i++) {
175 int xseg_parse_spec(char *segspec, struct xseg_config *config)
177 /* default: "posix:globalxseg:4:512:64:1024:12" */
178 char *s = segspec, *sp = segspec;
182 strncpy(config->type, s, XSEG_TNAMESIZE);
183 config->type[XSEG_TNAMESIZE-1] = 0;
186 TOK(s, sp, "globalxseg");
187 strncpy(config->name, s, XSEG_NAMESIZE);
188 config->name[XSEG_NAMESIZE-1] = 0;
192 config->nr_ports = strul(s);
196 config->nr_requests = strul(s);
200 config->request_size = strul(s);
204 config->extra_size = strul(s);
208 config->page_shift = strul(s);
212 int xseg_register_type(struct xseg_type *type)
216 struct xseg_type *__type;
218 __type = __find_type(type->name, &i);
220 XSEGLOG("type %s already exists\n", type->name);
224 if (__nr_types >= XSEG_NR_TYPES) {
225 XSEGLOG("maximum type registrations reached: %u\n", __nr_types);
230 type->name[XSEG_TNAMESIZE-1] = 0;
231 __types[__nr_types] = type;
239 int xseg_unregister_type(const char *name)
243 struct xseg_type *__type;
245 __type = __find_type(name, &i);
247 XSEGLOG("segment type '%s' does not exist\n", name);
252 __types[i] = __types[__nr_types];
253 __types[__nr_types] = NULL;
260 int xseg_register_peer(struct xseg_peer *peer_type)
264 struct xseg_peer *type;
266 type = __find_peer_type(peer_type->name, &i);
268 XSEGLOG("peer type '%s' already exists\n", type->name);
272 if (__nr_peer_types >= XSEG_NR_PEER_TYPES) {
273 XSEGLOG("maximum peer type registrations reached: %u",
279 if (peer_type->peer_ops.signal_init()) {
280 XSEGLOG("peer type '%s': signal initialization failed\n",
286 peer_type->name[XSEG_TNAMESIZE-1] = 0;
287 __peer_types[__nr_peer_types] = peer_type;
288 __nr_peer_types += 1;
296 int xseg_unregister_peer(const char *name)
299 struct xseg_peer *driver;
302 driver = __find_peer_type(name, &i);
304 XSEGLOG("peer type '%s' does not exist\n", name);
308 __nr_peer_types -= 1;
309 __peer_types[i] = __peer_types[__nr_peer_types];
310 __peer_types[__nr_peer_types] = NULL;
311 driver->peer_ops.signal_quit();
318 int64_t __enable_driver(struct xseg *xseg, struct xseg_peer *driver)
321 char (*drivers)[XSEG_TNAMESIZE];
322 uint32_t max_drivers = xseg->max_peer_types;
324 if (xseg->shared->nr_peer_types >= max_drivers) {
325 XSEGLOG("cannot register '%s': driver namespace full\n",
330 drivers = XSEG_TAKE_PTR(xseg->shared->peer_types, xseg->segment);
331 for (r = 0; r < max_drivers; r++) {
334 if (!strncmp(drivers[r], driver->name, XSEG_TNAMESIZE))
342 /* assert(xseg->shared->nr_peer_types == r); */
343 xseg->shared->nr_peer_types = r + 1;
344 strncpy(drivers[r], driver->name, XSEG_TNAMESIZE);
345 drivers[r][XSEG_TNAMESIZE-1] = 0;
348 xseg->priv->peer_types[r] = driver;
352 int64_t xseg_enable_driver(struct xseg *xseg, const char *name)
355 struct xseg_peer *driver;
358 driver = __find_peer_type(name, &r);
360 XSEGLOG("driver '%s' not found\n", name);
364 __lock_segment(xseg);
365 r = __enable_driver(xseg, driver);
366 __unlock_segment(xseg);
372 int xseg_disable_driver(struct xseg *xseg, const char *name)
376 struct xseg_private *priv = xseg->priv;
377 struct xseg_peer *driver;
379 driver = __find_peer_type(name, &i);
381 XSEGLOG("driver '%s' not found\n", name);
385 for (i = 0; i < xseg->max_peer_types; i++)
386 if (priv->peer_types[i] == driver)
387 priv->peer_types[i] = NULL;
394 /* NOTE: calculate_segment_size() and initialize_segment()
395 * must always be exactly in sync!
398 static uint64_t calculate_segment_size(struct xseg_config *config)
401 uint32_t page_size, page_shift = config->page_shift;
403 /* assert(sizeof(struct xseg) <= (1 << 9)); */
405 if (page_shift < 9) {
406 XSEGLOG("page_shift must be >= %d\n", 9);
410 page_size = 1 << page_shift;
412 /* struct xseg itself */
413 size += page_size + config->heap_size;
414 size = __align(size, page_shift);
419 static long initialize_segment(struct xseg *xseg, struct xseg_config *cfg)
421 uint32_t page_shift = cfg->page_shift, page_size = 1 << page_shift;
422 struct xseg_shared *shared;
423 char *segment = (char *)xseg;
426 uint64_t bodysize, size = page_size, i;
428 struct xseg_heap *heap;
430 struct xseg_object_handler *obj_h;
433 if (page_size < XSEG_MIN_PAGE_SIZE)
436 xseg->segment_size = size;
437 xseg->segment = segment;
440 xseg->heap = XSEG_MAKE_PTR(segment + size, segment);
441 size += sizeof(xseg_heap);
442 size = __align(size, page_shift);
444 heap = XSEG_TAKE_PTR(xseg->heap, segment);
445 heap->size = config->heap_size;
446 heap->start = XSEG_MAKE_PTR(segment+size, segment);
447 heap->cur = heap->start;
449 /* build object_handler handler */
450 mem = xseg_allocate(heap, sizeof(struct xseg_object_handler));
453 xseg->object_handlers = mem;
454 obj_h = XSEG_TAKE_PTR(xseg->object_handlers, segment);
455 r = xseg_init_object_handler(segment, obj_h, MAGIC_OBJH,
456 sizeof(struct xseg_object_handler), xseg->heap);
460 //now that we have object handlers handler, use that to allocate
461 //new object handlers
463 //allocate requests handler
464 mem = xseg_get_obj(obj_h, X_ALLOC);
467 obj_h = XSEG_TAKE_PTR(mem, segment);
468 r = xseg_init_object_handler(segment, obj_h, MAGIC_REQ,
469 sizeof(struct xseg_request), xseg->heap);
472 xseg->requests = mem;
474 //allocate ports handler
475 obj_h = XSEG_TAKE_PTR(xseg->object_handlers, segment);
476 mem = xseg_get_obj(obj_h, X_ALLOC);
479 obj_h = XSEG_TAKE_PTR(mem, segment);
480 r = xseg_init_object_handler(segment, obj_h, MAGIC_PORT,
481 sizeof(struct xseg_port), xseg->heap);
486 //allocate buffers4K handler
487 obj_h = XSEG_TAKE_PTR(xseg->object_handlers, segment);
488 mem = xseg_get_obj(obj_h, X_ALLOC);
491 obj_h = XSEG_TAKE_PTR(mem, segment);
492 r = xseg_init_object_handler(segment, obj_h, MAGIC_4K,
496 xseg->buffers4K = mem;
498 //allocate buffers256K handler
499 obj_h = XSEG_TAKE_PTR(xseg->object_handlers, segment);
500 mem = xseg_get_obj(obj_h, X_ALLOC);
503 obj_h = XSEG_TAKE_PTR(mem, segment);
504 r = xseg_init_object_handler(segment, obj_h, MAGIC_256K,
505 256*1024, xseg->heap);
508 xseg->buffers256K = mem;
510 //allocate buffers4M handler
511 obj_h = XSEG_TAKE_PTR(xseg->object_handlers, segment);
512 mem = xseg_get_obj(obj_h, X_ALLOC);
515 obj_h = XSEG_TAKE_PTR(mem, segment);
516 r = xseg_init_object_handler(segment, obj_h, MAGIC_4M,
517 4096*1024, xseg->heap);
520 xseg->buffers4M = mem;
522 //allocate xseg_shared memory
523 mem = xseg_allocate(heap, sizeof(struct xseg_shared));
526 shared = (struct xseg_shared *) XSEG_TAKE_PTR(mem, segment);
528 shared->nr_peer_types = 0;
531 mem = xseg_allocate(heap, page_size);
534 shared->peer_types = mem;
535 xseg->max_peer_types = get_alloc_bytes(page_size) / XSEG_TNAMESIZE;
537 memcpy(&xseg->config, cfg, sizeof(struct xseg_config));
539 xseg->counters.req_cnt = 0;
540 xseg->counters.avg_req_lat = 0;
545 int xseg_create(struct xseg_config *cfg)
547 struct xseg *xseg = NULL;
548 struct xseg_type *type;
549 struct xseg_operations *xops;
553 type = __find_or_load_type(cfg->type);
555 cfg->type[XSEG_TNAMESIZE-1] = 0;
556 XSEGLOG("type '%s' does not exist\n", cfg->type);
560 size = calculate_segment_size(cfg);
562 XSEGLOG("invalid config!\n");
567 cfg->name[XSEG_NAMESIZE-1] = 0;
568 r = xops->allocate(cfg->name, size);
570 XSEGLOG("cannot allocate segment!\n");
574 xseg = xops->map(cfg->name, size, NULL);
576 XSEGLOG("cannot map segment!\n");
580 r = initialize_segment(xseg, cfg);
581 xops->unmap(xseg, size);
583 XSEGLOG("cannot initilize segment!\n");
591 xops->deallocate(cfg->name);
596 void xseg_destroy(struct xseg *xseg)
598 struct xseg_type *type;
601 type = __find_or_load_type(xseg->config.type);
603 XSEGLOG("no segment type '%s'\n", xseg->config.type);
607 /* should destroy() leave() first? */
608 type->ops.deallocate(xseg->config.name);
613 static int pointer_ok( unsigned long ptr,
618 int ret = !(ptr >= base && ptr < base + size);
620 XSEGLOG("invalid pointer '->%s' [%llx on %llx]!\n",
621 (unsigned long long)ptr,
622 (unsigned long long)base,
627 #define POINTER_OK(xseg, field, base) \
628 pointer_ok( (unsigned long)((xseg)->field), \
629 (unsigned long)(base), \
630 (xseg)->segment_size, \
633 static int xseg_validate_pointers(struct xseg *xseg)
636 r += POINTER_OK(xseg, requests, xseg->segment);
637 r += POINTER_OK(xseg, free_requests, xseg->segment);
638 r += POINTER_OK(xseg, ports, xseg->segment);
639 r += POINTER_OK(xseg, buffers, xseg->segment);
640 r += POINTER_OK(xseg, extra, xseg->segment);
641 r += POINTER_OK(xseg, shared, xseg->segment);
645 struct xseg *xseg_join( char *segtypename,
652 struct xseg *xseg, *__xseg;
654 struct xseg_peer *peertype;
655 struct xseg_type *segtype;
656 struct xseg_private *priv;
657 struct xseg_operations *xops;
658 struct xseg_peer_operations *pops;
663 peertype = __find_or_load_peer_type(peertypename);
665 XSEGLOG("Peer type '%s' not found\n", peertypename);
670 segtype = __find_or_load_type(segtypename);
672 XSEGLOG("Segment type '%s' not found\n", segtypename);
679 xops = &segtype->ops;
680 pops = &peertype->peer_ops;
682 xseg = pops->malloc(sizeof(struct xseg));
684 XSEGLOG("Cannot allocate memory");
688 priv = pops->malloc(sizeof(struct xseg_private));
690 XSEGLOG("Cannot allocate memory");
694 __xseg = xops->map(segname, XSEG_MIN_PAGE_SIZE, NULL);
696 XSEGLOG("Cannot map segment");
700 size = __xseg->segment_size;
701 /* XSEGLOG("joined segment of size: %lu\n", (unsigned long)size); */
702 xops->unmap(__xseg, XSEG_MIN_PAGE_SIZE);
704 __xseg = xops->map(segname, size, xseg);
706 XSEGLOG("Cannot map segment");
710 priv->segment_type = *segtype;
711 priv->peer_type = *peertype;
712 priv->wakeup = wakeup;
713 xseg->max_peer_types = __xseg->max_peer_types;
715 priv->peer_types = pops->malloc(sizeof(void *) * xseg->max_peer_types);
716 if (!priv->peer_types) {
717 XSEGLOG("Cannot allocate memory");
720 memset(priv->peer_types, 0, sizeof(void *) * xseg->max_peer_types);
723 xseg->config = __xseg->config;
724 xseg->version = __xseg->version;
725 xseg->requests = XSEG_TAKE_PTR(__xseg->requests, __xseg);
726 xseg->free_requests = XSEG_TAKE_PTR(__xseg->free_requests, __xseg);
727 xseg->ports = XSEG_TAKE_PTR(__xseg->ports, __xseg);
728 xseg->buffers = XSEG_TAKE_PTR(__xseg->buffers, __xseg);
729 xseg->extra = XSEG_TAKE_PTR(__xseg->extra, __xseg);
730 xseg->shared = XSEG_TAKE_PTR(__xseg->shared, __xseg);
731 xseg->segment_size = size;
732 xseg->segment = __xseg;
734 r = xseg_validate_pointers(xseg);
736 XSEGLOG("found %d invalid xseg pointers!\n", r);
741 r = xops->signal_join(xseg);
743 XSEGLOG("Cannot attach signaling to segment! (error: %d)\n", r);
751 pops->mfree(priv->peer_types);
753 xops->unmap(__xseg, size);
762 void xseg_leave(struct xseg *xseg)
764 struct xseg_type *type;
767 type = __find_or_load_type(xseg->config.type);
769 XSEGLOG("no segment type '%s'\n", xseg->config.type);
775 type->ops.unmap(xseg->segment, xseg->segment_size);
778 int xseg_prepare_wait(struct xseg *xseg, uint32_t portno)
780 if (!__validate_port(xseg, portno))
783 return xseg->priv->peer_type.peer_ops.prepare_wait(xseg, portno);
786 int xseg_cancel_wait(struct xseg *xseg, uint32_t portno)
788 if (!__validate_port(xseg, portno))
790 return xseg->priv->peer_type.peer_ops.cancel_wait(xseg, portno);
793 int xseg_wait_signal(struct xseg *xseg, uint32_t usec_timeout)
795 return xseg->priv->peer_type.peer_ops.wait_signal(xseg, usec_timeout);
798 int xseg_signal(struct xseg *xseg, uint32_t portno)
800 struct xseg_peer *type;
801 struct xseg_port *port;
802 if (!__validate_port(xseg, portno))
805 port = &xseg->ports[portno];
806 type = __get_peer_type(xseg, port->peer_type);
810 return type->peer_ops.signal(xseg, portno);
813 int xseg_alloc_requests(struct xseg *xseg, uint32_t portno, uint32_t nr)
815 struct xseg_port *port;
816 if (!__validate_port(xseg, portno))
819 port = &xseg->ports[portno];
820 return xq_head_to_tail(xseg->free_requests, &port->free_queue, nr, portno);
823 int xseg_free_requests(struct xseg *xseg, uint32_t portno, int nr)
825 struct xseg_port *port;
826 if (!__validate_port(xseg, portno))
829 port = &xseg->ports[portno];
830 return xq_head_to_tail(&port->free_queue, xseg->free_requests, nr, portno);
833 struct xseg_request *xseg_get_request(struct xseg *xseg, uint32_t portno)
835 struct xseg_request *req;
836 struct xseg_port *port;
838 if (!__validate_port(xseg, portno))
841 port = &xseg->ports[portno];
842 xqi = xq_pop_head(&port->free_queue, portno);
846 req = &xseg->requests[xqi];
847 req->portno = portno;
850 req->timestamp.tv_sec = 0;
851 req->timestamp.tv_usec = 0;
856 int xseg_put_request ( struct xseg *xseg,
858 struct xseg_request *xreq )
860 xqindex xqi = xreq - xseg->requests;
861 xreq->data = xreq->buffer;
862 xreq->datalen = xreq->bufferlen;
866 if (xreq->elapsed != 0) {
867 __lock_segment(xseg);
868 ++(xseg->counters.req_cnt);
869 xseg->counters.avg_req_lat += xreq->elapsed;
870 __unlock_segment(xseg);
873 return xq_append_head(&xseg->ports[portno].free_queue, xqi, portno) == Noneidx;
876 int xseg_prep_request ( struct xseg_request *req,
877 uint32_t targetlen, uint64_t datalen )
879 if (targetlen + datalen > req->bufferlen)
882 req->data = req->buffer;
883 req->target = req->buffer + req->bufferlen - targetlen;
884 req->datalen = datalen;
885 req->targetlen = targetlen;
889 static void __update_timestamp(struct xseg_request *xreq)
893 __get_current_time(&tv);
894 if (xreq->timestamp.tv_sec != 0)
896 * FIXME: Make xreq->elapsed timeval/timespec again to avoid the
899 xreq->elapsed += (tv.tv_sec - xreq->timestamp.tv_sec) * 1000000
900 + (tv.tv_usec - xreq->timestamp.tv_usec);
902 xreq->timestamp.tv_sec = tv.tv_sec;
903 xreq->timestamp.tv_usec = tv.tv_usec;
906 xserial xseg_submit ( struct xseg *xseg, uint32_t portno,
907 struct xseg_request *xreq )
909 xserial serial = NoSerial;
911 struct xseg_port *port;
912 if (!__validate_port(xseg, portno))
915 __update_timestamp(xreq);
917 port = &xseg->ports[portno];
918 xqi = xreq - xseg->requests;
919 serial = xq_append_tail(&port->request_queue, xqi, portno);
924 struct xseg_request *xseg_receive(struct xseg *xseg, uint32_t portno)
927 struct xseg_port *port;
928 if (!__validate_port(xseg, portno))
931 port = &xseg->ports[portno];
932 xqi = xq_pop_head(&port->reply_queue, portno);
936 __update_timestamp(&xseg->requests[xqi]);
938 return xseg->requests + xqi;
941 struct xseg_request *xseg_accept(struct xseg *xseg, uint32_t portno)
944 struct xseg_port *port;
945 if (!__validate_port(xseg, portno))
948 port = &xseg->ports[portno];
949 xqi = xq_pop_head(&port->request_queue, portno);
953 return xseg->requests + xqi;
956 xserial xseg_respond ( struct xseg *xseg, uint32_t portno,
957 struct xseg_request *xreq )
959 xserial serial = NoSerial;
961 struct xseg_port *port;
962 if (!__validate_port(xseg, portno))
965 port = &xseg->ports[portno];
966 xqi = xreq - xseg->requests;
967 serial = xq_append_tail(&port->reply_queue, xqi, portno);
973 struct xseg_port *xseg_bind_port(struct xseg *xseg, uint32_t req)
975 uint32_t portno, maxno, id = __get_id(), force;
976 struct xseg_port *port;
978 if (req >= xseg->config.nr_ports) {
980 maxno = xseg->config.nr_ports;
988 __lock_segment(xseg);
989 for (; portno < maxno; portno++) {
991 port = &xseg->ports[portno];
992 if (port->owner && !force)
994 driver = __enable_driver(xseg, &xseg->priv->peer_type);
997 port->peer_type = (uint64_t)driver;
1003 __unlock_segment(xseg);
1008 int xseg_initialize(void)
1010 return __xseg_preinit(); /* with or without lock ? */
1013 int xseg_finalize(void)
1015 /* finalize not supported yet */
1019 #define X_ALLOC ((uint32_t) (1 << 0))
1022 * xseg -> address of malloced struct xseg, each peer takes on join
1023 * segment -> address of mmapped segment
1027 xptr xseg_get_obj(struct xseg_object_handler * obj_h, uint32_t flags)
1029 struct xseg *segment = XPTR(obj_h->segment);
1030 struct xseg_object *obj;
1033 while (obj_h->list) {
1035 obj = XSEG_TAKE_PTR(list, segment);
1037 if (__sync_bool_compare_and_swap(&obj_h->list, list, objptr)) {
1041 if (!(flags & X_ALLOC))
1043 if (xlock_try_lock(&obj_h->lock, 1)) {
1044 //allocate minimum 64 objects
1045 xseg_alloc_obj(obj_h, 64);
1046 xlock_release(&obj_h->lock);
1051 void xseg_put_obj(struct xseg_object_handler * obj_h, struct xseg_object *obj)
1053 struct xseg *segment = XPTR(obj_h->segment);
1054 xptr list, objptr = XSEG_MAKE_PTR(obj, segment);
1058 } while(__sync_bool_compare_and_swap(&obj_h->list, list, objptr));
1061 uint64_t get_alloc_bytes(uint64_t bytes)
1063 return __get_alloc_bytes(bytes) - sizeof(struct free_space_header);
1066 uint64_t __get_alloc_bytes(uint64_t bytes)
1068 return __align(bytes + sizeof(struct free_space_header), 12);
1071 //should be called with object_handler lock held
1072 int xseg_alloc_obj(struct xseg_object_handler *obj_h, uint64_t nr)
1074 struct xseg *segment = XPTR(&obj_h->segment);
1075 struct xseg_heap *heap = XSEG_TAKE_PTR(obj_h->heap, segment);
1076 uint64_t used, bytes = nr * obj_h->size;
1077 xptr objptr, mem = xseg_allocate(heap, bytes);
1078 struct xseg_object *obj;
1079 xhash_t *allocated = XSEG_TAKE_PTR(obj_h->allocated, segment);
1085 bytes = get_alloc_bytes(bytes);
1087 while (used + obj_h->size < bytes) {
1089 obj = XSEG_TAKE_PTR(objptr, xseg);
1090 used += obj_h->size;
1091 obj->magic = obj_h->magic;
1092 obj->size = obj_h->size;
1093 obj->next = xptr + used; //point to the next obj
1094 r = xhash_insert(allocated, objptr, objptr); //keep track of allocated objects
1096 if (r == -XHASH_ERESIZE) {
1097 ul_t sizeshift = grow_size_shift(allocated);
1100 xptr newptr, oldptr;
1101 size = xhash_get_alloc_size(sizeshift);
1102 newptr = xseg_allocate(heap, size);
1104 xseg_free(heap, xptr);
1107 new = XSEG_TAKE_PTR(newptr, segment);
1108 xhash_resize(allocated, sizeshift, new);
1110 oldptr = XSEG_MAKE_PTR(allocated, segment);
1111 xseg_free(heap, oldptr);
1113 obj_h->allocated = XSEG_MAKE_PTR(allocated, segment);
1116 obj->next = 0; //list is null terminated
1118 //assert obj_h->list == 0
1119 ojbptr = obj_h->list;
1120 }while(!__sync_bool_compare_and_swap(&obj_handler->list, objptr, xptr));
1124 xptr xseg_allocate(struct xseg_heap *heap, uint64_t bytes)
1126 struct xseg *segment = XPTR(&heap->segment);
1127 struct xseg_free_space_header *fsh;
1130 bytes = __get_alloc_bytes(bytes);
1132 if ((heap->cur - heap->start) > bytes)
1134 ret = xseg_heap->cur;
1135 } while (!__sync_bool_compare_and_swap(&heap->cur, ret, (xptr) cur + bytes));
1137 fsh = (struct xseg_free_space_header *) XSEG_TAKE_PTR(segment, ret);
1139 ret += sizeof(struct xseg_free_space_header);
1143 void xseg_free(struct xseg_heap *heap, xptr ptr)
1145 struct xseg *segment = XPTR(&heap->segment);
1146 struct xseg_free_space_header *fsh;
1147 uint64_t size = XSEG_TAKE_PTR(segment, ptr);
1148 //split space to objects
1151 int xseg_init_object_handler(struct xseg *xseg, struct xseg_object_handler *obj_h,
1152 uint32_t magic, uint64_t size, xptr heap)
1154 struct xseg_heap *xheap = XSEG_TAKE_PTR(heap, xseg->segment);
1155 obj_h->magic = magic;
1156 obj_h->obj_size = size;
1157 //use 18 as min size shift for all new hashtables, cause we align
1158 //memory to 4K. minsize 19 would give us two pages because of the
1159 //free memory header.
1160 mem = xseg_allocate(xheap, xhash_get_alloc_size(18));
1163 xhash = XSEG_TAKE_PTR(mem, xseg->segment);
1164 xhash_init(xhash, 18);
1165 obj_h->allocated = mem;
1169 XPTRSET(&obj_h->segment, xseg->segment);
1170 xlock_release(&obj_h->lock);
1174 int xseg_init_port(struct xseg *xseg, struct xseg_port *port)
1177 struct xseg_heap *heap = XSEG_TAKE_PTR(xseg->heap, xseg->segment);
1181 //each port starts with minimum 512 requests;
1182 //TODO make it configurable
1183 //TODO since max number of requests is not fixed
1184 // maybe we should make xqs expand when necessary
1185 uint64_t nr_reqs = 512;
1187 //how many bytes to allocate for a queue
1188 bytes = sizeof(struct xq) + nr_reqs*sizeof(xqindex);
1189 mem = xseg_allocate(heap, bytes);
1192 //how many did we got, and calculate what's left of buffer
1193 bytes = get_alloc_bytes(bytes) - sizeof(struct xq);
1194 port->free_queue = mem;
1195 //initialize queue with max nr it can hold
1196 q = (struct xq *)XSEG_TAKE_PTR(&port->free_queue, xseg->segment);
1197 buf = XSEG_TAKE_PTR(mem + sizeof(struct xq), xseg->segment);
1198 xq_init_empty(q, bytes/sizeof(xqindex), buf);
1200 //and for request queue
1201 bytes = sizeof(struct xq) + nr_reqs*sizeof(xqindex);
1202 mem = xseg_allocate(heap, bytes);
1205 bytes = get_alloc_bytes(bytes) - sizeof(struct xq);
1206 port->request_queue = mem;
1207 q = (struct xq *)XSEG_TAKE_PTR(&port->request_queue, xseg->segment);
1208 buf = XSEG_TAKE_PTR(mem + sizeof(struct xq), xseg->segment);
1209 xq_init_empty(q, bytes/sizeof(xqindex), buf);
1211 //and for reply_queue
1212 bytes = sizeof(struct xq) + nr_reqs*sizeof(xqindex);
1213 mem = xseg_allocate(heap, bytes);
1216 bytes = get_alloc_bytes(bytes) - sizeof(struct xq);
1217 port->reply_queue = mem;
1218 q = (struct xq *)XSEG_TAKE_PTR(&port->reply_queue, xseg->segment);
1219 buf = XSEG_TAKE_PTR(mem + sizeof(struct xq), xseg->segment);
1220 xq_init_empty(q, bytes/sizeof(xqindex), buf);
1225 xseg_free(heap, port->request_queue);
1226 port->request_queue = 0;
1228 xseg_free(heap, port->free_queue);
1229 port->free_queue = 0;
1235 void xseg_put_port(struct xseg *xseg, struct xseg_port *port)
1237 struct xseg_heap *heap = XSEG_TAKE_PTR(xseg->heap, xseg->segment);
1239 if (port->request_queue) {
1240 xseg_free(heap, port->request_queue);
1241 port->request_queue = 0;
1243 if (port->free_queue) {
1244 xseg_free(heap, port->free_queue);
1245 port->free_queue = 0;
1247 if (port->reply_queue) {
1248 xseg_free(heap, port->reply_queue);
1249 port->reply_queue = 0;
1252 xseg_put_obj(obj_h, port);
1256 #include <linux/module.h>
1257 #include <xseg/xseg_exports.h>