add support for configurable max allocated requests and local req cache
[archipelago] / xseg / peers / kernel / xsegbd.c
index 912454c..74d7809 100644 (file)
 #include <linux/bio.h>
 #include <linux/device.h>
 #include <linux/completion.h>
-
+#include <linux/wait.h>
 #include <sys/kernel/segdev.h>
 #include "xsegbd.h"
+#include <xseg/protocol.h>
 
 #define XSEGBD_MINORS 1
 /* define max request size to be used in xsegbd */
 //FIXME should we make this 4MB instead of 256KB ?
-#define XSEGBD_MAX_REQUEST_SIZE 262144U
+//#define XSEGBD_MAX_REQUEST_SIZE 262144U
+#define XSEGBD_MAX_REQUEST_SIZE 4194304U
 
 MODULE_DESCRIPTION("xsegbd");
 MODULE_AUTHOR("XSEG");
@@ -45,15 +47,13 @@ module_param(major, int, 0644);
 module_param_string(name, name, sizeof(name), 0644);
 module_param_string(spec, spec, sizeof(spec), 0644);
 
-//static spinlock_t __lock;
 static struct xsegbd xsegbd;
 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
 static DEFINE_MUTEX(xsegbd_mutex);
 static DEFINE_SPINLOCK(xsegbd_devices_lock);
 
 
-
-static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
+struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
 {
        struct xsegbd_device *xsegbd_dev = NULL;
 
@@ -88,7 +88,7 @@ static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
 /* ** XSEG Initialization ** */
 /* ************************* */
 
-static void xseg_callback(struct xseg *xseg, uint32_t portno);
+static void xseg_callback(uint32_t portno);
 
 int xsegbd_xseg_init(void)
 {
@@ -186,6 +186,7 @@ static const struct block_device_operations xsegbd_ops = {
 
 static void xseg_request_fn(struct request_queue *rq);
 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
+static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev);
 
 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
 {
@@ -197,11 +198,11 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
 
        xsegbd_dev->xsegbd = &xsegbd;
 
-       xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
+       /* allocates and initializes queue */
+       xsegbd_dev->blk_queue = blk_init_queue(xseg_request_fn, &xsegbd_dev->rqlock);
        if (!xsegbd_dev->blk_queue)
                goto out;
 
-       blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->rqlock);
        xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
 
        blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
@@ -209,10 +210,11 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
        blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
        
-       //blk_queue_max_segments(dev->blk_queue, 512);
 
        max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
        blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
+//     blk_queue_max_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 10);
+       blk_queue_max_segments(xsegbd_dev->blk_queue, 1024);
        blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
        blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
        blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
@@ -222,11 +224,6 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        /* vkoukis says we don't need partitions */
        xsegbd_dev->gd = disk = alloc_disk(1);
        if (!disk)
-               /* FIXME: We call xsegbd_dev_release if something goes wrong, to cleanup
-                * disks/queues/etc.
-                * Would it be better to do the cleanup here, and conditionally cleanup
-                * in dev_release?
-                */
                goto out;
 
        disk->major = xsegbd_dev->major;
@@ -238,7 +235,7 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
 
        ret = 0;
-       
+
        /* allow a non-zero sector_size parameter to override the disk size */
        if (sector_size)
                xsegbd_dev->sectors = sector_size;
@@ -252,9 +249,8 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
        XSEGLOG("xsegbd active...");
        add_disk(disk); /* immediately activates the device */
 
-       return 0;
-
 out:
+       /* on error, everything is cleaned up in xsegbd_dev_release */
        return ret;
 }
 
@@ -262,40 +258,46 @@ static void xsegbd_dev_release(struct device *dev)
 {
        struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
 
+
        /* cleanup gendisk and blk_queue the right way */
        if (xsegbd_dev->gd) {
                if (xsegbd_dev->gd->flags & GENHD_FL_UP)
                        del_gendisk(xsegbd_dev->gd);
 
-               blk_cleanup_queue(xsegbd_dev->blk_queue);
-               put_disk(xsegbd_dev->gd);
+               xsegbd_mapclose(xsegbd_dev);
        }
 
-       /* xsegbd actually does not need to use waiting. 
-        * maybe we can use xseg_cancel_wait for clarity
-        * with the xseg_segdev kernel driver to convert 
-        * this to a noop
-        */
-       xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
-
-       if (xseg_free_requests(xsegbd_dev->xseg, 
-                       xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
-               XSEGLOG("Error trying to free requests!\n");
-
-
-       unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
-
        spin_lock(&xsegbd_devices_lock);
        BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
        xsegbd_devices[xsegbd_dev->src_portno] = NULL;
        spin_unlock(&xsegbd_devices_lock);
 
-       if (xsegbd_dev->blk_req_pending)
+       XSEGLOG("releasing id: %d", xsegbd_dev->id);
+//     xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+       xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+
+       if (xsegbd_dev->blk_queue)
+               blk_cleanup_queue(xsegbd_dev->blk_queue);
+       if (xsegbd_dev->gd)
+               put_disk(xsegbd_dev->gd);
+
+//     if (xseg_free_requests(xsegbd_dev->xseg, 
+//                     xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
+//             XSEGLOG("Error trying to free requests!\n");
+
+       if (xsegbd_dev->xseg){
+               xseg_leave(xsegbd_dev->xseg);
+               xsegbd_dev->xseg = NULL;
+       }
+
+       unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
+
+       if (xsegbd_dev->blk_req_pending){
                kfree(xsegbd_dev->blk_req_pending);
+               xsegbd_dev->blk_req_pending = NULL;
+       }
        xq_free(&xsegbd_dev->blk_queue_pending);
-
        kfree(xsegbd_dev);
-
        module_put(THIS_MODULE);
 }
 
@@ -344,8 +346,18 @@ static void xseg_request_fn(struct request_queue *rq)
        uint64_t datalen;
        xport p;
        int r;
+       unsigned long flags;
 
+       spin_unlock_irq(&xsegbd_dev->rqlock);
        for (;;) {
+               if (current_thread_info()->preempt_count || irqs_disabled()){
+                       XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
+                                       current_thread_info()->preempt_count, irqs_disabled());
+               }
+               //XSEGLOG("Priority: %d", current_thread_info()->task->prio);
+               //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio);
+               //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio);
+               //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority);
                blkreq_idx = Noneidx;
                xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, 
                                xsegbd_dev->dst_portno, X_ALLOC);
@@ -357,29 +369,62 @@ static void xseg_request_fn(struct request_queue *rq)
                if (blkreq_idx == Noneidx)
                        break;
 
+               if (blkreq_idx >= xsegbd_dev->nr_requests) {
+                       XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
+                       BUG_ON(1);
+                       break;
+               }
+
+
+               spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
                blkreq = blk_fetch_request(rq);
-               if (!blkreq)
+               if (!blkreq){
+                       spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
                        break;
+               }
 
                if (blkreq->cmd_type != REQ_TYPE_FS) {
+                       //FIXME we lose xreq here
                        XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
                        __blk_end_request_all(blkreq, 0);
+                       spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
+                       continue;
+               }
+               spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
+               if (current_thread_info()->preempt_count || irqs_disabled()){
+                       XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
+                                       current_thread_info()->preempt_count, irqs_disabled());
                }
 
                datalen = blk_rq_bytes(blkreq);
-               BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, 
-                                       xsegbd_dev->targetlen, datalen));
-               BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
+               r = xseg_prep_request(xsegbd_dev->xseg, xreq, 
+                                       xsegbd_dev->targetlen, datalen);
+               if (r < 0) {
+                       XSEGLOG("couldn't prep request");
+                       blk_end_request_err(blkreq, r);
+                       BUG_ON(1);
+                       break;
+               }
+               r = -ENOMEM;
+               if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
+                       XSEGLOG("malformed req buffers");
+                       blk_end_request_err(blkreq, r);
+                       BUG_ON(1);
+                       break;
+               }
 
                target = xseg_get_target(xsegbd_dev->xseg, xreq);
                strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
-               BUG_ON(blkreq_idx == Noneidx);
+
                pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
                pending->dev = xsegbd_dev;
                pending->request = blkreq;
                pending->comp = NULL;
+
                xreq->size = datalen;
                xreq->offset = blk_rq_pos(blkreq) << 9;
+               xreq->priv = (uint64_t) blkreq_idx;
+
                /*
                if (xreq->offset >= (sector_size << 9))
                        XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
@@ -394,32 +439,24 @@ static void xseg_request_fn(struct request_queue *rq)
                if (blkreq->cmd_flags & REQ_FUA)
                        xreq->flags |= XF_FUA;
 
-               XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu", 
-                               xreq, xreq->size, xreq->offset, blkreq_idx);
-
                if (rq_data_dir(blkreq)) {
-                       /* unlock for data transfers? */
                        blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
-                       XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu completed blk_to_xseg", 
-                               xreq, xreq->size, xreq->offset, blkreq_idx);
                        xreq->op = X_WRITE;
                } else {
                        xreq->op = X_READ;
                }
 
-               //maybe put this in loop start, and on break, 
-               //just do xseg_get_req_data
-               spin_lock(&xsegbd_dev->reqdatalock);
-               r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
-               spin_unlock(&xsegbd_dev->reqdatalock);
-               BUG_ON(r < 0);
-               XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu set req data", 
-                               xreq, xreq->size, xreq->offset, blkreq_idx);
-
-               BUG_ON((p = xseg_submit(xsegbd_dev->xseg, xreq, 
-                                       xsegbd_dev->src_portno, X_ALLOC)) == NoPort);
-               XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu submitted", 
-                               xreq, xreq->size, xreq->offset, blkreq_idx);
+
+//             XSEGLOG("%s : %lu (%lu)", xsegbd_dev->target, xreq->offset, xreq->datalen);
+               r = -EIO;
+               p = xseg_submit(xsegbd_dev->xseg, xreq, 
+                                       xsegbd_dev->src_portno, X_ALLOC);
+               if (p == NoPort) {
+                       XSEGLOG("coundn't submit req");
+                       WARN_ON(1);
+                       blk_end_request_err(blkreq, r);
+                       break;
+               }
                WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
        }
        if (xreq)
@@ -427,13 +464,18 @@ static void xseg_request_fn(struct request_queue *rq)
                                        xsegbd_dev->src_portno) == -1);
        if (blkreq_idx != Noneidx)
                BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, 
-                                       blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
+                               blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
+       spin_lock_irq(&xsegbd_dev->rqlock);
 }
 
 int update_dev_sectors_from_request(   struct xsegbd_device *xsegbd_dev,
                                        struct xseg_request *xreq       )
 {
        void *data;
+       if (!xreq) {
+               XSEGLOG("Invalid xreq");
+               return -EIO;
+       }
 
        if (xreq->state & XS_FAILED)
                return -ENOENT;
@@ -442,6 +484,14 @@ int update_dev_sectors_from_request(       struct xsegbd_device *xsegbd_dev,
                return -EIO;
 
        data = xseg_get_data(xsegbd_dev->xseg, xreq);
+       if (!data) {
+               XSEGLOG("Invalid req data");
+               return -EIO;
+       }
+       if (!xsegbd_dev) {
+               XSEGLOG("Invalid xsegbd_dev");
+               return -ENOENT;
+       }
        xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
        return 0;
 }
@@ -450,115 +500,153 @@ static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
 {
        struct xseg_request *xreq;
        char *target;
-       uint64_t datalen;
        xqindex blkreq_idx;
        struct xsegbd_pending *pending;
        struct completion comp;
        xport p;
-       void *data;
-       int ret = -EBUSY, r;
+       int ret = -EBUSY;
+
        xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
                        xsegbd_dev->dst_portno, X_ALLOC);
        if (!xreq)
                goto out;
 
-       datalen = sizeof(uint64_t);
-       BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen));
-       BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
+       BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 
+                               sizeof(struct xseg_reply_info)));
 
        init_completion(&comp);
        blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
        if (blkreq_idx == Noneidx)
-               goto out;
-       
+               goto out_put;
+
        pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
        pending->dev = xsegbd_dev;
        pending->request = NULL;
        pending->comp = &comp;
 
-       
-       spin_lock(&xsegbd_dev->reqdatalock);
-       r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
-       spin_unlock(&xsegbd_dev->reqdatalock);
-       if (r < 0)
-               goto out_queue;
-       XSEGLOG("for req: %lx, set data %llu (lx: %lx)", xreq, blkreq_idx, (void *) blkreq_idx);
+
+       xreq->priv = (uint64_t) blkreq_idx;
 
        target = xseg_get_target(xsegbd_dev->xseg, xreq);
        strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
-       xreq->size = datalen;
+       xreq->size = xreq->datalen;
        xreq->offset = 0;
-
        xreq->op = X_INFO;
 
-       /* waiting is not needed.
-        * but it should be better to use xseg_prepare_wait
-        * and the xseg_segdev kernel driver, would be a no op
-        */
-
        xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
        p = xseg_submit(xsegbd_dev->xseg, xreq, 
                                xsegbd_dev->src_portno, X_ALLOC);
-       BUG_ON(p == NoPort);
        if ( p == NoPort) {
-               goto out_data;
+               XSEGLOG("couldn't submit request");
+               BUG_ON(1);
+               goto out_queue;
        }
        WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
-
+       XSEGLOG("Before wait for completion, comp %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
        wait_for_completion_interruptible(&comp);
-       XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
+       XSEGLOG("Woken up after wait_for_completion_interruptible(), comp: %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
        ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
        XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
+
+out_queue:
+       pending->dev = NULL;
+       pending->comp = NULL;
+       xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
+out_put:
+       BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
 out:
-       BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) < 0);
        return ret;
+}
+
+static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev)
+{
+       struct xseg_request *xreq;
+       char *target;
+       xqindex blkreq_idx;
+       struct xsegbd_pending *pending;
+       struct completion comp;
+       xport p;
+       int ret = -EBUSY;
+
+       xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
+                       xsegbd_dev->dst_portno, X_ALLOC);
+       if (!xreq)
+               goto out;
+
+       BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 0));
+
+       init_completion(&comp);
+       blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
+       if (blkreq_idx == Noneidx)
+               goto out_put;
+
+       pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
+       pending->dev = xsegbd_dev;
+       pending->request = NULL;
+       pending->comp = &comp;
+
+
+       xreq->priv = (uint64_t) blkreq_idx;
+
+       target = xseg_get_target(xsegbd_dev->xseg, xreq);
+       strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
+       xreq->size = xreq->datalen;
+       xreq->offset = 0;
+       xreq->op = X_CLOSE;
+
+       xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+       p = xseg_submit(xsegbd_dev->xseg, xreq, 
+                               xsegbd_dev->src_portno, X_ALLOC);
+       if ( p == NoPort) {
+               XSEGLOG("couldn't submit request");
+               BUG_ON(1);
+               goto out_queue;
+       }
+       WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
+       wait_for_completion_interruptible(&comp);
+       ret = 0;
+       if (xreq->state & XS_FAILED)
+               XSEGLOG("Couldn't close disk on mapper");
 
-out_data:
-       spin_lock(&xsegbd_dev->reqdatalock);
-       r = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data);
-       spin_unlock(&xsegbd_dev->reqdatalock);
 out_queue:
+       pending->dev = NULL;
+       pending->comp = NULL;
        xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
-       
-       goto out;
+out_put:
+       BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
+out:
+       return ret;
 }
 
-static void xseg_callback(struct xseg *xseg, xport portno)
+static void xseg_callback(xport portno)
 {
        struct xsegbd_device *xsegbd_dev;
        struct xseg_request *xreq;
        struct request *blkreq;
        struct xsegbd_pending *pending;
        unsigned long flags;
-       xqindex blkreq_idx;
+       xqindex blkreq_idx, ridx;
        int err;
-       void *data;
 
        xsegbd_dev  = __xsegbd_get_dev(portno);
        if (!xsegbd_dev) {
-               WARN_ON(3);
+               XSEGLOG("portno: %u has no xsegbd device assigned", portno);
+               WARN_ON(1);
                return;
        }
 
        for (;;) {
-               xreq = xseg_receive(xsegbd_dev->xseg, portno);
+               xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+               xreq = xseg_receive(xsegbd_dev->xseg, portno, 0);
                if (!xreq)
                        break;
 
-               spin_lock(&xsegbd_dev->reqdatalock);
-               err = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data); 
-               spin_unlock(&xsegbd_dev->reqdatalock);
-               XSEGLOG("for req: %lx, got data %llu (lx %lx)", xreq, (xqindex) data, data);
-               if (err < 0) {
-                       WARN_ON(2);
-                       //maybe put request?
-                       continue;
-               }
+//             xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
 
-               blkreq_idx = (xqindex) data;
+               blkreq_idx = (xqindex) xreq->priv;
                if (blkreq_idx >= xsegbd_dev->nr_requests) {
                        WARN_ON(1);
-                       //maybe put request?
+                       //FIXME maybe put request?
                        continue;
                }
 
@@ -575,31 +663,47 @@ static void xseg_callback(struct xseg *xseg, xport portno)
                /* this is now treated as a block I/O request to end */
                blkreq = pending->request;
                pending->request = NULL;
-               //xsegbd_dev = pending->dev;
-               BUG_ON(xsegbd_dev != pending->dev);
+               if (xsegbd_dev != pending->dev) {
+                       //FIXME maybe put request?
+                       XSEGLOG("xsegbd_dev != pending->dev");
+                       WARN_ON(1);
+                       continue;
+               }
                pending->dev = NULL;
-               WARN_ON(!blkreq);
+               if (!blkreq){
+                       //FIXME maybe put request?
+                       XSEGLOG("blkreq does not exist");
+                       WARN_ON(1);
+                       continue;
+               }
 
+               err = -EIO;
                if (!(xreq->state & XS_SERVED))
                        goto blk_end;
 
                if (xreq->serviced != blk_rq_bytes(blkreq))
                        goto blk_end;
 
-               /* unlock for data transfer? */
+               err = 0;
                if (!rq_data_dir(blkreq)){
                        xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
-                       XSEGLOG("for req: %lx, completed xseg_to_blk", xreq);
-               }       
-
-               err = 0;
+               }
 blk_end:
                blk_end_request_all(blkreq, err);
-               XSEGLOG("for req: %lx, completed", xreq);
-               xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
-               BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) < 0);
-       }
 
+               ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, 
+                                       blkreq_idx, xsegbd_dev->src_portno);
+               if (ridx == Noneidx) {
+                       XSEGLOG("couldnt append blkreq_idx");
+                       WARN_ON(1);
+               }
+
+               if (xseg_put_request(xsegbd_dev->xseg, xreq, 
+                                               xsegbd_dev->src_portno) < 0){
+                       XSEGLOG("couldn't put req");
+                       BUG_ON(1);
+               }
+       }
        if (xsegbd_dev) {
                spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
                xseg_request_fn(xsegbd_dev->blk_queue);
@@ -693,6 +797,46 @@ out:
        return ret;
 }
 
+//FIXME
+//maybe try callback, first and then do a more invasive cleanup
+static ssize_t xsegbd_cleanup(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf,
+                                       size_t size)
+{
+       struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
+       int ret = size, i;
+       struct request *blkreq = NULL;
+       struct xsegbd_pending *pending = NULL;
+       struct completion *comp = NULL;
+
+       mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
+       xlock_acquire(&xsegbd_dev->blk_queue_pending.lock, 
+                               xsegbd_dev->src_portno);
+       for (i = 0; i < xsegbd_dev->nr_requests; i++) {
+               if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
+                       pending = &xsegbd_dev->blk_req_pending[i];
+                       blkreq = pending->request;
+                       pending->request = NULL;
+                       comp = pending->comp;
+                       pending->comp = NULL;
+                       if (blkreq){
+                               XSEGLOG("Cleaning up blkreq %lx [%d]", (unsigned long) blkreq, i);
+                               blk_end_request_all(blkreq, -EIO);
+                       }
+                       if (comp){
+                               XSEGLOG("Cleaning up comp %lx [%d]", (unsigned long) comp, i);
+                               complete(comp);
+                       }
+                       __xq_append_tail(&xsegbd_dev->blk_queue_pending, i);
+               }
+       }
+       xlock_release(&xsegbd_dev->blk_queue_pending.lock);
+
+       mutex_unlock(&xsegbd_mutex);
+       return ret;
+}
+
 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
@@ -701,6 +845,7 @@ static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
+static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
 
 static struct attribute *xsegbd_attrs[] = {
        &dev_attr_size.attr,
@@ -711,6 +856,7 @@ static struct attribute *xsegbd_attrs[] = {
        &dev_attr_reqs.attr,
        &dev_attr_target.attr,
        &dev_attr_refresh.attr,
+       &dev_attr_cleanup.attr,
        NULL
 };
 
@@ -781,7 +927,6 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
                goto out;
 
        spin_lock_init(&xsegbd_dev->rqlock);
-       spin_lock_init(&xsegbd_dev->reqdatalock);
        INIT_LIST_HEAD(&xsegbd_dev->node);
 
        /* parse cmd */
@@ -825,7 +970,7 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
                        xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
                                   GFP_KERNEL);
        if (!xsegbd_dev->blk_req_pending)
-               goto out_freeq;
+               goto out_bus;
 
        
        XSEGLOG("joining segment");
@@ -835,42 +980,39 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
                                        "segdev",
                                        xseg_callback           );
        if (!xsegbd_dev->xseg)
-               goto out_freepending;
+               goto out_bus;
        
-
-       XSEGLOG("binding to source port %u (destination %u)",
+       XSEGLOG("%s binding to source port %u (destination %u)", xsegbd_dev->target,
                        xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
-       port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+       port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno, NULL);
        if (!port) {
                XSEGLOG("cannot bind to port");
                ret = -EFAULT;
 
-               goto out_xseg;
+               goto out_bus;
        }
-       //FIXME rollback here
-       BUG_ON(xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port));
        
+       if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
+               XSEGLOG("portno != xsegbd_dev->src_portno");
+               BUG_ON(1);
+               ret = -EFAULT;
+               goto out_bus;
+       }
+       xseg_init_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
+
+
        /* make sure we don't get any requests until we're ready to handle them */
        xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
 
        ret = xsegbd_dev_init(xsegbd_dev);
        if (ret)
-               goto out_xseg;
+               goto out_bus;
 
+       xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
        return count;
 
-out_xseg:
-       xseg_leave(xsegbd_dev->xseg);
-       
-out_freepending:
-       kfree(xsegbd_dev->blk_req_pending);
-
-out_freeq:
-       xq_free(&xsegbd_dev->blk_queue_pending);
-
 out_bus:
        xsegbd_bus_del_dev(xsegbd_dev);
-
        return ret;
 
 out_blkdev: