Merge branch 'xseg-refactor' of ssh://ray.cslab.ece.ntua.gr/repos/archip into xseg...
[archipelago] / xseg / peers / kernel / xsegbd.c
index 5091755..c511496 100644 (file)
@@ -34,31 +34,34 @@ MODULE_LICENSE("GPL");
 static long sector_size = 0;
 static long blksize = 512;
 static int major = 0;
-static int max_nr_pending = 1024;
+static int max_dev = 1024;
 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
-static char spec[256] = "segdev:xsegbd:4:512:64:1024:12";
+static char spec[256] = "segdev:xsegbd:4:1024:12";
 
 module_param(sector_size, long, 0644);
 module_param(blksize, long, 0644);
-module_param(max_nr_pending, int, 0644);
+module_param(max_dev, int, 0644);
 module_param(major, int, 0644);
 module_param_string(name, name, sizeof(name), 0644);
 module_param_string(spec, spec, sizeof(spec), 0644);
 
-struct pending {
-       struct request *request;
-       struct completion *comp;
-       struct xsegbd_device *dev;
-};
-
-static struct xq blk_queue_pending;
-static struct pending *blk_req_pending;
-static unsigned int nr_pending;
-static spinlock_t __lock;
 static struct xsegbd xsegbd;
+static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
 static DEFINE_MUTEX(xsegbd_mutex);
-static LIST_HEAD(xsegbd_dev_list);
-static DEFINE_SPINLOCK(xsegbd_dev_list_lock);
+static DEFINE_SPINLOCK(xsegbd_devices_lock);
+
+
+
+static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
+{
+       struct xsegbd_device *xsegbd_dev = NULL;
+
+       spin_lock(&xsegbd_devices_lock);
+       xsegbd_dev = xsegbd_devices[id];
+       spin_unlock(&xsegbd_devices_lock);
+
+       return xsegbd_dev;
+}
 
 /* ************************* */
 /* ***** sysfs helpers ***** */
@@ -84,7 +87,7 @@ static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
 /* ** XSEG Initialization ** */
 /* ************************* */
 
-static void xseg_callback(struct xseg *xseg, uint32_t portno);
+static void xseg_callback(uint32_t portno);
 
 int xsegbd_xseg_init(void)
 {
@@ -107,6 +110,7 @@ int xsegbd_xseg_init(void)
                XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
                         xsegbd.config.type);
 
+       /* leave it here for now */
        XSEGLOG("joining segment");
        xsegbd.xseg = xseg_join(        xsegbd.config.type,
                                        xsegbd.config.name,
@@ -188,7 +192,7 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        struct gendisk *disk;
        unsigned int max_request_size_bytes;
 
-       spin_lock_init(&xsegbd_dev->lock);
+       spin_lock_init(&xsegbd_dev->rqlock);
 
        xsegbd_dev->xsegbd = &xsegbd;
 
@@ -196,7 +200,10 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        if (!xsegbd_dev->blk_queue)
                goto out;
 
-       blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->lock);
+       if (!blk_init_allocated_queue(xsegbd_dev->blk_queue, 
+                       xseg_request_fn, &xsegbd_dev->rqlock))
+               goto outqueue;
+
        xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
 
        blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
@@ -217,12 +224,7 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        /* vkoukis says we don't need partitions */
        xsegbd_dev->gd = disk = alloc_disk(1);
        if (!disk)
-               /* FIXME: We call xsegbd_dev_release if something goes wrong, to cleanup
-                * disks/queues/etc.
-                * Would it be better to do the cleanup here, and conditionally cleanup
-                * in dev_release?
-                */
-               goto out;
+               goto outqueue;
 
        disk->major = xsegbd_dev->major;
        disk->first_minor = 0; // id * XSEGBD_MINORS;
@@ -233,23 +235,14 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
 
        ret = 0;
-       spin_lock_irq(&__lock);
-       if (nr_pending + xsegbd_dev->nr_requests > max_nr_pending)
-               ret = -ENOBUFS;
-       else
-               nr_pending += xsegbd_dev->nr_requests;
-       spin_unlock_irq(&__lock);
-
-       if (ret)
-               goto out;
-
+       
        /* allow a non-zero sector_size parameter to override the disk size */
        if (sector_size)
                xsegbd_dev->sectors = sector_size;
        else {
                ret = xsegbd_get_size(xsegbd_dev);
                if (ret)
-                       goto out;
+                       goto outdisk;
        }
 
        set_capacity(disk, xsegbd_dev->sectors);
@@ -258,13 +251,21 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
 
        return 0;
 
+
+outdisk:
+       put_disk(xsegbd_dev->gd);
+outqueue:
+       blk_cleanup_queue(xsegbd_dev->blk_queue);
 out:
+       xsegbd_dev->gd = NULL;
        return ret;
 }
 
 static void xsegbd_dev_release(struct device *dev)
 {
        struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
+       
+       xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
 
        /* cleanup gendisk and blk_queue the right way */
        if (xsegbd_dev->gd) {
@@ -275,26 +276,22 @@ static void xsegbd_dev_release(struct device *dev)
                put_disk(xsegbd_dev->gd);
        }
 
-       /* xsegbd actually does not need use waiting. 
-        * maybe we use xseg_cancel_wait for clarity
-        * with xseg_segdev kernel driver convert this
-        * to a noop
-        */
-//     xseg_cancel_wait(xseg, xsegbd_dev->src_portno);
-
-       if (xseg_free_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests) != 0)
-               XSEGLOG("Error trying to free requests!\n");
+//     if (xseg_free_requests(xsegbd_dev->xseg, 
+//                     xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
+//             XSEGLOG("Error trying to free requests!\n");
 
-       WARN_ON(nr_pending < xsegbd_dev->nr_requests);
-       spin_lock_irq(&__lock);
-       nr_pending -= xsegbd_dev->nr_requests;
-       spin_unlock_irq(&__lock);
 
        unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
 
-       spin_lock(&xsegbd_dev_list_lock);
-       list_del_init(&xsegbd_dev->node);
-       spin_unlock(&xsegbd_dev_list_lock);
+       spin_lock(&xsegbd_devices_lock);
+       BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
+       xsegbd_devices[xsegbd_dev->src_portno] = NULL;
+       spin_unlock(&xsegbd_devices_lock);
+
+       if (xsegbd_dev->blk_req_pending)
+               kfree(xsegbd_dev->blk_req_pending);
+       xq_free(&xsegbd_dev->blk_queue_pending);
+
        kfree(xsegbd_dev);
 
        module_put(THIS_MODULE);
@@ -310,7 +307,7 @@ static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
        struct bio_vec *bvec;
        struct req_iterator iter;
        uint64_t off = 0;
-       char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
+       char *data = xseg_get_data(xseg, xreq);
        rq_for_each_segment(bvec, blkreq, iter) {
                char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
                memcpy(data + off, bdata, bvec->bv_len);
@@ -325,7 +322,7 @@ static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
        struct bio_vec *bvec;
        struct req_iterator iter;
        uint64_t off = 0;
-       char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
+       char *data = xseg_get_data(xseg, xreq);
        rq_for_each_segment(bvec, blkreq, iter) {
                char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
                memcpy(bdata, data + off, bvec->bv_len);
@@ -339,41 +336,71 @@ static void xseg_request_fn(struct request_queue *rq)
        struct xseg_request *xreq;
        struct xsegbd_device *xsegbd_dev = rq->queuedata;
        struct request *blkreq;
-       struct pending *pending;
+       struct xsegbd_pending *pending;
        xqindex blkreq_idx;
        char *target;
        uint64_t datalen;
+       xport p;
+       int r;
 
        for (;;) {
-               xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
+               blkreq_idx = Noneidx;
+               xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, 
+                               xsegbd_dev->dst_portno, X_ALLOC);
                if (!xreq)
                        break;
 
+               blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 
+                                               xsegbd_dev->src_portno);
+               if (blkreq_idx == Noneidx)
+                       break;
+               
+               if (blkreq_idx >= xsegbd_dev->nr_requests) {
+                       XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
+                       BUG_ON(1);
+                       break;
+               }
+
                blkreq = blk_fetch_request(rq);
                if (!blkreq)
                        break;
 
                if (blkreq->cmd_type != REQ_TYPE_FS) {
+                       //we lose xreq here
                        XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
                        __blk_end_request_all(blkreq, 0);
+                       continue;
                }
 
-
                datalen = blk_rq_bytes(blkreq);
-               BUG_ON(xseg_prep_request(xsegbd.xseg, xreq, xsegbd_dev->targetlen, datalen));
-               BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
+               r = xseg_prep_request(xsegbd_dev->xseg, xreq, 
+                                       xsegbd_dev->targetlen, datalen);
+               if (r < 0) {
+                       XSEGLOG("couldn't prep request");
+                       __blk_end_request_err(blkreq, r);
+                       BUG_ON(1);
+                       break;
+               }
+               r = -ENOMEM;
+               if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
+                       XSEGLOG("malformed req buffers");
+                       __blk_end_request_err(blkreq, r);
+                       BUG_ON(1);
+                       break;
+               }
 
-               target = XSEG_TAKE_PTR(xreq->target, xsegbd.xseg->segment);
+               target = xseg_get_target(xsegbd_dev->xseg, xreq);
                strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
-               blkreq_idx = xq_pop_head(&blk_queue_pending, 1);
-               BUG_ON(blkreq_idx == Noneidx);
-               pending = &blk_req_pending[blkreq_idx];
+
+               pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
                pending->dev = xsegbd_dev;
                pending->request = blkreq;
                pending->comp = NULL;
-               xreq->priv = (uint64_t)blkreq_idx;
+               
                xreq->size = datalen;
                xreq->offset = blk_rq_pos(blkreq) << 9;
+               xreq->priv = (uint64_t) blkreq_idx;
+
                /*
                if (xreq->offset >= (sector_size << 9))
                        XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
@@ -390,24 +417,40 @@ static void xseg_request_fn(struct request_queue *rq)
 
                if (rq_data_dir(blkreq)) {
                        /* unlock for data transfers? */
-                       blk_to_xseg(xsegbd.xseg, xreq, blkreq);
+                       blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
                        xreq->op = X_WRITE;
                } else {
                        xreq->op = X_READ;
                }
 
-               BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
-       }
 
-       WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, xsegbd_dev->dst_portno) < 0);
+               r = -EIO;
+               p = xseg_submit(xsegbd_dev->xseg, xreq, 
+                                       xsegbd_dev->src_portno, X_ALLOC);
+               if (p == NoPort) {
+                       XSEGLOG("coundn't submit req");
+                       BUG_ON(1);
+                       __blk_end_request_err(blkreq, r);
+                       break;
+               }
+               WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
+       }
        if (xreq)
-               BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xsegbd_dev->src_portno, xreq) == NoSerial);
+               BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq, 
+                                       xsegbd_dev->src_portno) == -1);
+       if (blkreq_idx != Noneidx)
+               BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, 
+                               blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
 }
 
 int update_dev_sectors_from_request(   struct xsegbd_device *xsegbd_dev,
                                        struct xseg_request *xreq       )
 {
        void *data;
+       if (!xreq) {
+               XSEGLOG("Invalid xreq");
+               return -EIO;
+       }
 
        if (xreq->state & XS_FAILED)
                return -ENOENT;
@@ -415,7 +458,11 @@ int update_dev_sectors_from_request(       struct xsegbd_device *xsegbd_dev,
        if (!(xreq->state & XS_SERVED))
                return -EIO;
 
-       data = XSEG_TAKE_PTR(xreq->data, xsegbd.xseg->segment);
+       data = xseg_get_data(xsegbd_dev->xseg, xreq);
+       if (!data) {
+               XSEGLOG("Invalid req data");
+               return -EIO;
+       }
        xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
        return 0;
 }
@@ -426,76 +473,99 @@ static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
        char *target;
        uint64_t datalen;
        xqindex blkreq_idx;
-       struct pending *pending;
+       struct xsegbd_pending *pending;
        struct completion comp;
-       int ret = -EBUSY;
-
-       xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
+       xport p;
+       void *data;
+       int ret = -EBUSY, r;
+       xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
+                       xsegbd_dev->dst_portno, X_ALLOC);
        if (!xreq)
                goto out;
 
        datalen = sizeof(uint64_t);
-       BUG_ON(xseg_prep_request(xsegbd.xseg, xreq, xsegbd_dev->targetlen, datalen));
+       BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen));
        BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
 
        init_completion(&comp);
-       blkreq_idx = xq_pop_head(&blk_queue_pending, 1);
-       BUG_ON(blkreq_idx == Noneidx);
-       pending = &blk_req_pending[blkreq_idx];
+       blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
+       if (blkreq_idx == Noneidx)
+               goto out;
+       
+       pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
        pending->dev = xsegbd_dev;
        pending->request = NULL;
        pending->comp = &comp;
-       xreq->priv = (uint64_t)blkreq_idx;
 
-       target = XSEG_TAKE_PTR(xreq->target, xsegbd.xseg->segment);
+       
+       xreq->priv = (uint64_t) blkreq_idx;
+
+       target = xseg_get_target(xsegbd_dev->xseg, xreq);
        strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
        xreq->size = datalen;
        xreq->offset = 0;
-
        xreq->op = X_INFO;
 
-       /* waiting is not needed.
-        * but it should be better to use xseg_prepare_wait
-        * and the xseg_segdev kernel driver, would be a no op
-        */
-//     port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
-//     port->waitcue = (uint64_t)(long)xsegbd_dev;
-
-       BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
-       WARN_ON(xseg_signal(xsegbd.xseg, xsegbd_dev->dst_portno) < 0);
-
+       xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+       p = xseg_submit(xsegbd_dev->xseg, xreq, 
+                               xsegbd_dev->src_portno, X_ALLOC);
+       if ( p == NoPort) {
+               XSEGLOG("couldn't submit request");
+               BUG_ON(1);
+               goto out_queue;
+       }
+       WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
+       XSEGLOG("Before wait for completion, xreq %lx", (unsigned long) xreq);
        wait_for_completion_interruptible(&comp);
-       XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
+       XSEGLOG("Woken up after wait_for_completion_interruptible(), xreq: %lx", (unsigned long) xreq);
        ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
-       XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
+       //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
 out:
-       BUG_ON(xseg_put_request(xsegbd.xseg, xsegbd_dev->src_portno, xreq) == NoSerial);
+       BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
        return ret;
+
+out_queue:
+       pending->dev = NULL;
+       pending->comp = NULL;
+       xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
+       
+       goto out;
 }
 
-static void xseg_callback(struct xseg *xseg, uint32_t portno)
+static void xseg_callback(xport portno)
 {
-       struct xsegbd_device *xsegbd_dev = NULL, *old_dev = NULL;
+       struct xsegbd_device *xsegbd_dev;
        struct xseg_request *xreq;
        struct request *blkreq;
-       struct pending *pending;
+       struct xsegbd_pending *pending;
        unsigned long flags;
-       uint32_t blkreq_idx;
+       xqindex blkreq_idx, ridx;
        int err;
+       void *data;
+
+       xsegbd_dev  = __xsegbd_get_dev(portno);
+       if (!xsegbd_dev) {
+               XSEGLOG("portno: %u has no xsegbd device assigned", portno);
+               WARN_ON(1);
+               return;
+       }
 
        for (;;) {
-               xreq = xseg_receive(xseg, portno);
+               xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+               xreq = xseg_receive(xsegbd_dev->xseg, portno);
                if (!xreq)
                        break;
 
-               /* we rely upon our peers to not have touched ->priv */
-               blkreq_idx = (uint64_t)xreq->priv;
-               if (blkreq_idx >= max_nr_pending) {
+               xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+
+               blkreq_idx = (xqindex) xreq->priv;
+               if (blkreq_idx >= xsegbd_dev->nr_requests) {
                        WARN_ON(1);
+                       //FIXME maybe put request?
                        continue;
                }
 
-               pending = &blk_req_pending[blkreq_idx];
+               pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
                if (pending->comp) {
                        /* someone is blocking on this request
                           and will handle it when we wake them up. */
@@ -508,39 +578,53 @@ static void xseg_callback(struct xseg *xseg, uint32_t portno)
                /* this is now treated as a block I/O request to end */
                blkreq = pending->request;
                pending->request = NULL;
-               xsegbd_dev = pending->dev;
+               if (xsegbd_dev != pending->dev) {
+                       //FIXME maybe put request?
+                       XSEGLOG("xsegbd_dev != pending->dev");
+                       BUG_ON(1);
+                       continue;
+               }
                pending->dev = NULL;
-               WARN_ON(!blkreq);
-
-               if ((xsegbd_dev != old_dev) && old_dev) {
-                       spin_lock_irqsave(&old_dev->lock, flags);
-                       xseg_request_fn(old_dev->blk_queue);
-                       spin_unlock_irqrestore(&old_dev->lock, flags);
+               if (!blkreq){
+                       //FIXME maybe put request?
+                       XSEGLOG("blkreq does not exist");
+                       BUG_ON(1);
+                       continue;
                }
 
-               old_dev = xsegbd_dev;
-
+               err = -EIO;
                if (!(xreq->state & XS_SERVED))
                        goto blk_end;
 
                if (xreq->serviced != blk_rq_bytes(blkreq))
                        goto blk_end;
 
-               /* unlock for data transfer? */
-               if (!rq_data_dir(blkreq))
-                       xseg_to_blk(xseg, xreq, blkreq);
-
                err = 0;
+               /* unlock for data transfer? */
+               if (!rq_data_dir(blkreq)){
+                       xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
+               }       
 blk_end:
                blk_end_request_all(blkreq, err);
-               xq_append_head(&blk_queue_pending, blkreq_idx, 1);
-               BUG_ON(xseg_put_request(xseg, xreq->portno, xreq) == NoSerial);
+               
+               ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, 
+                                       blkreq_idx, xsegbd_dev->src_portno);
+               if (ridx == Noneidx) {
+                       XSEGLOG("couldnt append blkreq_idx");
+                       WARN_ON(1);
+               }
+
+               if (xseg_put_request(xsegbd_dev->xseg, xreq, 
+                                               xsegbd_dev->src_portno) < 0){
+                       XSEGLOG("couldn't put req");
+                       BUG_ON(1);
+               }
        }
 
        if (xsegbd_dev) {
-               spin_lock_irqsave(&xsegbd_dev->lock, flags);
+               spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
                xseg_request_fn(xsegbd_dev->blk_queue);
-               spin_unlock_irqrestore(&xsegbd_dev->lock, flags);
+               spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
        }
 }
 
@@ -630,6 +714,40 @@ out:
        return ret;
 }
 
+static ssize_t xsegbd_cleanup(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf,
+                                       size_t size)
+{
+       struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
+       int ret = size, i;
+       struct request *blkreq = NULL;
+       struct xsegbd_pending *pending = NULL;
+       struct completion *comp = NULL;
+
+       mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
+       for (i = 0; i < xsegbd_dev->nr_requests; i++) {
+               xlock_acquire(&xsegbd_dev->blk_queue_pending.lock, 
+                               xsegbd_dev->src_portno);
+               if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
+                       pending = &xsegbd_dev->blk_req_pending[i];
+                       blkreq = pending->request;
+                       pending->request = NULL;
+                       comp = pending->comp;
+                       pending->comp = NULL;
+                       if (blkreq)
+                               blk_end_request_all(blkreq, -EIO);
+                       if (comp)
+                               complete(comp);
+                       __xq_append_tail(&xsegbd_dev->blk_queue_pending, i);
+               }
+               xlock_release(&xsegbd_dev->blk_queue_pending.lock);
+       }
+
+       mutex_unlock(&xsegbd_mutex);
+       return ret;
+}
+
 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
@@ -638,6 +756,7 @@ static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
+static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
 
 static struct attribute *xsegbd_attrs[] = {
        &dev_attr_size.attr,
@@ -648,6 +767,7 @@ static struct attribute *xsegbd_attrs[] = {
        &dev_attr_reqs.attr,
        &dev_attr_target.attr,
        &dev_attr_refresh.attr,
+       &dev_attr_cleanup.attr,
        NULL
 };
 
@@ -707,10 +827,8 @@ static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
 {
        struct xsegbd_device *xsegbd_dev;
-       struct xseg_port *xport;
+       struct xseg_port *port;
        ssize_t ret = -ENOMEM;
-       int new_id = 0;
-       struct list_head *tmp;
 
        if (!try_module_get(THIS_MODULE))
                return -ENODEV;
@@ -719,7 +837,7 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
        if (!xsegbd_dev)
                goto out;
 
-       spin_lock_init(&xsegbd_dev->lock);
+       spin_lock_init(&xsegbd_dev->rqlock);
        INIT_LIST_HEAD(&xsegbd_dev->node);
 
        /* parse cmd */
@@ -731,27 +849,14 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
        }
        xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
 
-       spin_lock(&xsegbd_dev_list_lock);
-
-       list_for_each(tmp, &xsegbd_dev_list) {
-               struct xsegbd_device *entry;
-
-               entry = list_entry(tmp, struct xsegbd_device, node);
-
-               if (entry->src_portno == xsegbd_dev->src_portno) {
-                       ret = -EINVAL;
-                       goto out_unlock;
-               }
-
-               if (entry->id >= new_id)
-                       new_id = entry->id + 1;
+       spin_lock(&xsegbd_devices_lock);
+       if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
+               ret = -EINVAL;
+               goto out_unlock;
        }
-
-       xsegbd_dev->id = new_id;
-
-       list_add_tail(&xsegbd_dev->node, &xsegbd_dev_list);
-
-       spin_unlock(&xsegbd_dev_list_lock);
+       xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
+       xsegbd_dev->id = xsegbd_dev->src_portno;
+       spin_unlock(&xsegbd_devices_lock);
 
        XSEGLOG("registering block device major %d", major);
        ret = register_blkdev(major, XSEGBD_NAME);
@@ -767,46 +872,77 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
        if (ret)
                goto out_blkdev;
 
+       if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, 
+                               xsegbd_dev->nr_requests,
+                               xsegbd_dev->nr_requests))
+               goto out_bus;
+
+       xsegbd_dev->blk_req_pending = kzalloc(
+                       xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
+                                  GFP_KERNEL);
+       if (!xsegbd_dev->blk_req_pending)
+               goto out_freeq;
+
+       
+       XSEGLOG("joining segment");
+       //FIXME use xsebd module config for now
+       xsegbd_dev->xseg = xseg_join(   xsegbd.config.type,
+                                       xsegbd.config.name,
+                                       "segdev",
+                                       xseg_callback           );
+       if (!xsegbd_dev->xseg)
+               goto out_freepending;
+       
+
        XSEGLOG("binding to source port %u (destination %u)",
                        xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
-       xport = xseg_bind_port(xsegbd.xseg, xsegbd_dev->src_portno);
-       if (!xport) {
+       port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+       if (!port) {
                XSEGLOG("cannot bind to port");
                ret = -EFAULT;
 
-               goto out_bus;
+               goto out_xseg;
        }
-       /* make sure we don't get any requests until we're ready to handle them */
-       xport->waitcue = (long) NULL;
-
-       XSEGLOG("allocating %u requests", xsegbd_dev->nr_requests);
-       if (xseg_alloc_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests)) {
-               XSEGLOG("cannot allocate requests");
+       
+       if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
+               XSEGLOG("portno != xsegbd_dev->src_portno");
+               BUG_ON(1);
                ret = -EFAULT;
-
-               goto out_bus;
+               goto out_xseg;
        }
+       
+       /* make sure we don't get any requests until we're ready to handle them */
+       xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
 
        ret = xsegbd_dev_init(xsegbd_dev);
        if (ret)
-               goto out_bus;
+               goto out_xseg;
 
+       xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
        return count;
 
+out_xseg:
+       xseg_leave(xsegbd_dev->xseg);
+       
+out_freepending:
+       kfree(xsegbd_dev->blk_req_pending);
+
+out_freeq:
+       xq_free(&xsegbd_dev->blk_queue_pending);
+
 out_bus:
        xsegbd_bus_del_dev(xsegbd_dev);
-
        return ret;
 
 out_blkdev:
        unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
 
 out_delentry:
-       spin_lock(&xsegbd_dev_list_lock);
-       list_del_init(&xsegbd_dev->node);
+       spin_lock(&xsegbd_devices_lock);
+       xsegbd_devices[xsegbd_dev->src_portno] = NULL;
 
 out_unlock:
-       spin_unlock(&xsegbd_dev_list_lock);
+       spin_unlock(&xsegbd_devices_lock);
 
 out_dev:
        kfree(xsegbd_dev);
@@ -815,24 +951,6 @@ out:
        return ret;
 }
 
-static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
-{
-       struct list_head *tmp;
-       struct xsegbd_device *xsegbd_dev;
-
-
-       spin_lock(&xsegbd_dev_list_lock);
-       list_for_each(tmp, &xsegbd_dev_list) {
-               xsegbd_dev = list_entry(tmp, struct xsegbd_device, node);
-               if (xsegbd_dev->id == id) {
-                       spin_unlock(&xsegbd_dev_list_lock);
-                       return xsegbd_dev;
-               }
-       }
-       spin_unlock(&xsegbd_dev_list_lock);
-       return NULL;
-}
-
 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
 {
        struct xsegbd_device *xsegbd_dev = NULL;
@@ -855,7 +973,6 @@ static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count
                ret = -ENOENT;
                goto out_unlock;
        }
-
        xsegbd_bus_del_dev(xsegbd_dev);
 
 out_unlock:
@@ -898,18 +1015,16 @@ static void xsegbd_sysfs_cleanup(void)
 static int __init xsegbd_init(void)
 {
        int ret = -ENOMEM;
-
-       if (!xq_alloc_seq(&blk_queue_pending, max_nr_pending, max_nr_pending))
+       xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
+       if (!xsegbd_devices)
                goto out;
 
-       blk_req_pending = kzalloc(sizeof(struct pending) * max_nr_pending, GFP_KERNEL);
-       if (!blk_req_pending)
-               goto out_queue;
+       spin_lock_init(&xsegbd_devices_lock);
 
        ret = -ENOSYS;
        ret = xsegbd_xseg_init();
        if (ret)
-               goto out_pending;
+               goto out_free;
 
        ret = xsegbd_sysfs_init();
        if (ret)
@@ -922,10 +1037,10 @@ out:
 
 out_xseg:
        xsegbd_xseg_quit();
-out_pending:
-       kfree(blk_req_pending);
-out_queue:
-       xq_free(&blk_queue_pending);
+       
+out_free:
+       kfree(xsegbd_devices);
+
        goto out;
 }