X-Git-Url: https://code.grnet.gr/git/archipelago/blobdiff_plain/989ad27ba6db9287884736a2613ae1b69f270d14..2d8188235729e9cac09c9d003cd4aa1200ab6c80:/xseg/peers/kernel/xsegbd.c diff --git a/xseg/peers/kernel/xsegbd.c b/xseg/peers/kernel/xsegbd.c index c511496..38aa85f 100644 --- a/xseg/peers/kernel/xsegbd.c +++ b/xseg/peers/kernel/xsegbd.c @@ -1,3 +1,22 @@ +/* + * Copyright (C) 2012 GRNET S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + /* xsegbd.c * */ @@ -18,14 +37,14 @@ #include #include #include - +#include #include #include "xsegbd.h" +#include #define XSEGBD_MINORS 1 /* define max request size to be used in xsegbd */ -//FIXME should we make this 4MB instead of 256KB ? -#define XSEGBD_MAX_REQUEST_SIZE 262144U +#define XSEGBD_MAX_REQUEST_SIZE 4194304U MODULE_DESCRIPTION("xsegbd"); MODULE_AUTHOR("XSEG"); @@ -34,13 +53,16 @@ MODULE_LICENSE("GPL"); static long sector_size = 0; static long blksize = 512; static int major = 0; -static int max_dev = 1024; +static int max_dev = 200; +static long start_portno = 0; +static long end_portno = 199; static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd"; -static char spec[256] = "segdev:xsegbd:4:1024:12"; +static char spec[256] = "segdev:xsegbd:512:1024:12"; module_param(sector_size, long, 0644); module_param(blksize, long, 0644); -module_param(max_dev, int, 0644); +module_param(start_portno, long, 0644); +module_param(end_portno, long, 0644); module_param(major, int, 0644); module_param_string(name, name, sizeof(name), 0644); module_param_string(spec, spec, sizeof(spec), 0644); @@ -51,8 +73,7 @@ static DEFINE_MUTEX(xsegbd_mutex); static DEFINE_SPINLOCK(xsegbd_devices_lock); - -static struct xsegbd_device *__xsegbd_get_dev(unsigned long id) +struct xsegbd_device *__xsegbd_get_dev(unsigned long id) { struct xsegbd_device *xsegbd_dev = NULL; @@ -63,6 +84,11 @@ static struct xsegbd_device *__xsegbd_get_dev(unsigned long id) return xsegbd_dev; } +static int src_portno_to_id(xport src_portno) +{ + return (src_portno - start_portno); +} + /* ************************* */ /* ***** sysfs helpers ***** */ /* ************************* */ @@ -185,6 +211,7 @@ static const struct block_device_operations xsegbd_ops = { static void xseg_request_fn(struct request_queue *rq); static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev); +static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev); static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev) { @@ -196,14 +223,11 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev) xsegbd_dev->xsegbd = &xsegbd; - xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL); + /* allocates and initializes queue */ + xsegbd_dev->blk_queue = blk_init_queue(xseg_request_fn, &xsegbd_dev->rqlock); if (!xsegbd_dev->blk_queue) goto out; - if (!blk_init_allocated_queue(xsegbd_dev->blk_queue, - xseg_request_fn, &xsegbd_dev->rqlock)) - goto outqueue; - xsegbd_dev->blk_queue->queuedata = xsegbd_dev; blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA); @@ -211,10 +235,11 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev) blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize); blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY); - //blk_queue_max_segments(dev->blk_queue, 512); max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE; blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9); +// blk_queue_max_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 10); + blk_queue_max_segments(xsegbd_dev->blk_queue, 1024); blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes); blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes); blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes); @@ -222,12 +247,12 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue); /* vkoukis says we don't need partitions */ - xsegbd_dev->gd = disk = alloc_disk(1); + xsegbd_dev->gd = disk = alloc_disk(XSEGBD_MINORS); if (!disk) - goto outqueue; + goto out; disk->major = xsegbd_dev->major; - disk->first_minor = 0; // id * XSEGBD_MINORS; + disk->first_minor = xsegbd_dev->id * XSEGBD_MINORS; disk->fops = &xsegbd_ops; disk->queue = xsegbd_dev->blk_queue; disk->private_data = xsegbd_dev; @@ -235,65 +260,67 @@ static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev) snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id); ret = 0; - + /* allow a non-zero sector_size parameter to override the disk size */ if (sector_size) xsegbd_dev->sectors = sector_size; else { ret = xsegbd_get_size(xsegbd_dev); if (ret) - goto outdisk; + goto out; } set_capacity(disk, xsegbd_dev->sectors); XSEGLOG("xsegbd active..."); add_disk(disk); /* immediately activates the device */ - return 0; - - -outdisk: - put_disk(xsegbd_dev->gd); -outqueue: - blk_cleanup_queue(xsegbd_dev->blk_queue); out: - xsegbd_dev->gd = NULL; + /* on error, everything is cleaned up in xsegbd_dev_release */ return ret; } static void xsegbd_dev_release(struct device *dev) { struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev); - - xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); + /* cleanup gendisk and blk_queue the right way */ if (xsegbd_dev->gd) { if (xsegbd_dev->gd->flags & GENHD_FL_UP) del_gendisk(xsegbd_dev->gd); + xsegbd_mapclose(xsegbd_dev); + } + + spin_lock(&xsegbd_devices_lock); + BUG_ON(xsegbd_devices[xsegbd_dev->id] != xsegbd_dev); + xsegbd_devices[xsegbd_dev->id] = NULL; + spin_unlock(&xsegbd_devices_lock); + + XSEGLOG("releasing id: %d", xsegbd_dev->id); +// xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); + xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno); + + if (xsegbd_dev->blk_queue) blk_cleanup_queue(xsegbd_dev->blk_queue); + if (xsegbd_dev->gd) put_disk(xsegbd_dev->gd); - } -// if (xseg_free_requests(xsegbd_dev->xseg, +// if (xseg_free_requests(xsegbd_dev->xseg, // xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0) // XSEGLOG("Error trying to free requests!\n"); + if (xsegbd_dev->xseg){ + xseg_leave(xsegbd_dev->xseg); + xsegbd_dev->xseg = NULL; + } - unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME); - - spin_lock(&xsegbd_devices_lock); - BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev); - xsegbd_devices[xsegbd_dev->src_portno] = NULL; - spin_unlock(&xsegbd_devices_lock); - - if (xsegbd_dev->blk_req_pending) + if (xsegbd_dev->blk_req_pending){ kfree(xsegbd_dev->blk_req_pending); + xsegbd_dev->blk_req_pending = NULL; + } xq_free(&xsegbd_dev->blk_queue_pending); - kfree(xsegbd_dev); - module_put(THIS_MODULE); } @@ -342,8 +369,18 @@ static void xseg_request_fn(struct request_queue *rq) uint64_t datalen; xport p; int r; + unsigned long flags; + spin_unlock_irq(&xsegbd_dev->rqlock); for (;;) { + if (current_thread_info()->preempt_count || irqs_disabled()){ + XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ", + current_thread_info()->preempt_count, irqs_disabled()); + } + //XSEGLOG("Priority: %d", current_thread_info()->task->prio); + //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio); + //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio); + //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority); blkreq_idx = Noneidx; xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, xsegbd_dev->dst_portno, X_ALLOC); @@ -354,38 +391,48 @@ static void xseg_request_fn(struct request_queue *rq) xsegbd_dev->src_portno); if (blkreq_idx == Noneidx) break; - + if (blkreq_idx >= xsegbd_dev->nr_requests) { XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests"); - BUG_ON(1); + WARN_ON(1); break; } + + spin_lock_irqsave(&xsegbd_dev->rqlock, flags); blkreq = blk_fetch_request(rq); - if (!blkreq) + if (!blkreq){ + spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags); break; + } if (blkreq->cmd_type != REQ_TYPE_FS) { - //we lose xreq here + //FIXME we lose xreq here XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type); __blk_end_request_all(blkreq, 0); + spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags); continue; } + spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags); + if (current_thread_info()->preempt_count || irqs_disabled()){ + XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ", + current_thread_info()->preempt_count, irqs_disabled()); + } datalen = blk_rq_bytes(blkreq); r = xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen); if (r < 0) { XSEGLOG("couldn't prep request"); - __blk_end_request_err(blkreq, r); - BUG_ON(1); + blk_end_request_err(blkreq, r); + WARN_ON(1); break; } r = -ENOMEM; if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){ XSEGLOG("malformed req buffers"); - __blk_end_request_err(blkreq, r); - BUG_ON(1); + blk_end_request_err(blkreq, r); + WARN_ON(1); break; } @@ -396,7 +443,7 @@ static void xseg_request_fn(struct request_queue *rq) pending->dev = xsegbd_dev; pending->request = blkreq; pending->comp = NULL; - + xreq->size = datalen; xreq->offset = blk_rq_pos(blkreq) << 9; xreq->priv = (uint64_t) blkreq_idx; @@ -416,7 +463,6 @@ static void xseg_request_fn(struct request_queue *rq) xreq->flags |= XF_FUA; if (rq_data_dir(blkreq)) { - /* unlock for data transfers? */ blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq); xreq->op = X_WRITE; } else { @@ -424,23 +470,25 @@ static void xseg_request_fn(struct request_queue *rq) } +// XSEGLOG("%s : %lu (%lu)", xsegbd_dev->target, xreq->offset, xreq->datalen); r = -EIO; p = xseg_submit(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno, X_ALLOC); if (p == NoPort) { XSEGLOG("coundn't submit req"); - BUG_ON(1); - __blk_end_request_err(blkreq, r); + WARN_ON(1); + blk_end_request_err(blkreq, r); break; } WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0); } if (xreq) - BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq, + WARN_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq, xsegbd_dev->src_portno) == -1); if (blkreq_idx != Noneidx) - BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, + WARN_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, xsegbd_dev->src_portno) == Noneidx); + spin_lock_irq(&xsegbd_dev->rqlock); } int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev, @@ -463,6 +511,10 @@ int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev, XSEGLOG("Invalid req data"); return -EIO; } + if (!xsegbd_dev) { + XSEGLOG("Invalid xsegbd_dev"); + return -ENOENT; + } xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL; return 0; } @@ -471,65 +523,122 @@ static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev) { struct xseg_request *xreq; char *target; - uint64_t datalen; xqindex blkreq_idx; struct xsegbd_pending *pending; struct completion comp; xport p; - void *data; - int ret = -EBUSY, r; + int ret = -EBUSY; + xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, xsegbd_dev->dst_portno, X_ALLOC); if (!xreq) goto out; - datalen = sizeof(uint64_t); - BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen)); - BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen); + WARN_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, + sizeof(struct xseg_reply_info))); init_completion(&comp); blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1); if (blkreq_idx == Noneidx) - goto out; - + goto out_put; + pending = &xsegbd_dev->blk_req_pending[blkreq_idx]; pending->dev = xsegbd_dev; pending->request = NULL; pending->comp = ∁ - + xreq->priv = (uint64_t) blkreq_idx; target = xseg_get_target(xsegbd_dev->xseg, xreq); strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen); - xreq->size = datalen; + xreq->size = xreq->datalen; xreq->offset = 0; xreq->op = X_INFO; xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); - p = xseg_submit(xsegbd_dev->xseg, xreq, + p = xseg_submit(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno, X_ALLOC); if ( p == NoPort) { XSEGLOG("couldn't submit request"); - BUG_ON(1); + WARN_ON(1); goto out_queue; } WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0); - XSEGLOG("Before wait for completion, xreq %lx", (unsigned long) xreq); + XSEGLOG("Before wait for completion, comp %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx); wait_for_completion_interruptible(&comp); - XSEGLOG("Woken up after wait_for_completion_interruptible(), xreq: %lx", (unsigned long) xreq); + XSEGLOG("Woken up after wait_for_completion_interruptible(), comp: %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx); ret = update_dev_sectors_from_request(xsegbd_dev, xreq); - //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors); + XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors); + +out_queue: + pending->dev = NULL; + pending->comp = NULL; + xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1); +out_put: + WARN_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1); out: - BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1); return ret; +} + +static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev) +{ + struct xseg_request *xreq; + char *target; + xqindex blkreq_idx; + struct xsegbd_pending *pending; + struct completion comp; + xport p; + int ret = -EBUSY; + + xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, + xsegbd_dev->dst_portno, X_ALLOC); + if (!xreq) + goto out; + + WARN_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 0)); + + init_completion(&comp); + blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1); + if (blkreq_idx == Noneidx) + goto out_put; + + pending = &xsegbd_dev->blk_req_pending[blkreq_idx]; + pending->dev = xsegbd_dev; + pending->request = NULL; + pending->comp = ∁ + + + xreq->priv = (uint64_t) blkreq_idx; + + target = xseg_get_target(xsegbd_dev->xseg, xreq); + strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen); + xreq->size = xreq->datalen; + xreq->offset = 0; + xreq->op = X_CLOSE; + + xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); + p = xseg_submit(xsegbd_dev->xseg, xreq, + xsegbd_dev->src_portno, X_ALLOC); + if ( p == NoPort) { + XSEGLOG("couldn't submit request"); + WARN_ON(1); + goto out_queue; + } + WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0); + wait_for_completion_interruptible(&comp); + ret = 0; + if (xreq->state & XS_FAILED) + XSEGLOG("Couldn't close disk on mapper"); out_queue: pending->dev = NULL; pending->comp = NULL; xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1); - - goto out; +out_put: + WARN_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1); +out: + return ret; } static void xseg_callback(xport portno) @@ -541,7 +650,6 @@ static void xseg_callback(xport portno) unsigned long flags; xqindex blkreq_idx, ridx; int err; - void *data; xsegbd_dev = __xsegbd_get_dev(portno); if (!xsegbd_dev) { @@ -552,11 +660,11 @@ static void xseg_callback(xport portno) for (;;) { xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); - xreq = xseg_receive(xsegbd_dev->xseg, portno); + xreq = xseg_receive(xsegbd_dev->xseg, portno, 0); if (!xreq) break; - xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); +// xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno); blkreq_idx = (xqindex) xreq->priv; if (blkreq_idx >= xsegbd_dev->nr_requests) { @@ -581,14 +689,14 @@ static void xseg_callback(xport portno) if (xsegbd_dev != pending->dev) { //FIXME maybe put request? XSEGLOG("xsegbd_dev != pending->dev"); - BUG_ON(1); + WARN_ON(1); continue; } pending->dev = NULL; if (!blkreq){ //FIXME maybe put request? XSEGLOG("blkreq does not exist"); - BUG_ON(1); + WARN_ON(1); continue; } @@ -600,13 +708,12 @@ static void xseg_callback(xport portno) goto blk_end; err = 0; - /* unlock for data transfer? */ if (!rq_data_dir(blkreq)){ xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq); - } + } blk_end: blk_end_request_all(blkreq, err); - + ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, xsegbd_dev->src_portno); if (ridx == Noneidx) { @@ -617,10 +724,9 @@ blk_end: if (xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) < 0){ XSEGLOG("couldn't put req"); - BUG_ON(1); + WARN_ON(1); } } - if (xsegbd_dev) { spin_lock_irqsave(&xsegbd_dev->rqlock, flags); xseg_request_fn(xsegbd_dev->blk_queue); @@ -714,6 +820,8 @@ out: return ret; } +//FIXME +//maybe try callback, first and then do a more invasive cleanup static ssize_t xsegbd_cleanup(struct device *dev, struct device_attribute *attr, const char *buf, @@ -726,23 +834,27 @@ static ssize_t xsegbd_cleanup(struct device *dev, struct completion *comp = NULL; mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING); - for (i = 0; i < xsegbd_dev->nr_requests; i++) { - xlock_acquire(&xsegbd_dev->blk_queue_pending.lock, + xlock_acquire(&xsegbd_dev->blk_queue_pending.lock, xsegbd_dev->src_portno); + for (i = 0; i < xsegbd_dev->nr_requests; i++) { if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) { pending = &xsegbd_dev->blk_req_pending[i]; blkreq = pending->request; pending->request = NULL; comp = pending->comp; pending->comp = NULL; - if (blkreq) + if (blkreq){ + XSEGLOG("Cleaning up blkreq %lx [%d]", (unsigned long) blkreq, i); blk_end_request_all(blkreq, -EIO); - if (comp) + } + if (comp){ + XSEGLOG("Cleaning up comp %lx [%d]", (unsigned long) comp, i); complete(comp); + } __xq_append_tail(&xsegbd_dev->blk_queue_pending, i); } - xlock_release(&xsegbd_dev->blk_queue_pending.lock); } + xlock_release(&xsegbd_dev->blk_queue_pending.lock); mutex_unlock(&xsegbd_mutex); return ret; @@ -849,30 +961,28 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count) } xsegbd_dev->targetlen = strlen(xsegbd_dev->target); + if (xsegbd_dev->src_portno < start_portno || xsegbd_dev->src_portno > end_portno){ + XSEGLOG("Invadid portno"); + ret = -EINVAL; + goto out_dev; + } + xsegbd_dev->id = src_portno_to_id(xsegbd_dev->src_portno); + spin_lock(&xsegbd_devices_lock); - if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) { + if (xsegbd_devices[xsegbd_dev->id] != NULL) { ret = -EINVAL; goto out_unlock; } - xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev; - xsegbd_dev->id = xsegbd_dev->src_portno; + xsegbd_devices[xsegbd_dev->id] = xsegbd_dev; spin_unlock(&xsegbd_devices_lock); - XSEGLOG("registering block device major %d", major); - ret = register_blkdev(major, XSEGBD_NAME); - if (ret < 0) { - XSEGLOG("cannot register block device!"); - ret = -EBUSY; - goto out_delentry; - } - xsegbd_dev->major = ret; - XSEGLOG("registered block device major %d", xsegbd_dev->major); + xsegbd_dev->major = major; ret = xsegbd_bus_add_dev(xsegbd_dev); if (ret) - goto out_blkdev; + goto out_delentry; - if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, + if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, xsegbd_dev->nr_requests, xsegbd_dev->nr_requests)) goto out_bus; @@ -881,9 +991,9 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count) xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending), GFP_KERNEL); if (!xsegbd_dev->blk_req_pending) - goto out_freeq; + goto out_bus; + - XSEGLOG("joining segment"); //FIXME use xsebd module config for now xsegbd_dev->xseg = xseg_join( xsegbd.config.type, @@ -891,55 +1001,44 @@ static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count) "segdev", xseg_callback ); if (!xsegbd_dev->xseg) - goto out_freepending; - + goto out_bus; - XSEGLOG("binding to source port %u (destination %u)", + XSEGLOG("%s binding to source port %u (destination %u)", xsegbd_dev->target, xsegbd_dev->src_portno, xsegbd_dev->dst_portno); - port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno); + port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno, NULL); if (!port) { XSEGLOG("cannot bind to port"); ret = -EFAULT; - goto out_xseg; + goto out_bus; } if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) { XSEGLOG("portno != xsegbd_dev->src_portno"); - BUG_ON(1); + WARN_ON(1); ret = -EFAULT; - goto out_xseg; + goto out_bus; } - + xseg_init_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno); + + /* make sure we don't get any requests until we're ready to handle them */ xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port)); ret = xsegbd_dev_init(xsegbd_dev); if (ret) - goto out_xseg; + goto out_bus; xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port)); return count; -out_xseg: - xseg_leave(xsegbd_dev->xseg); - -out_freepending: - kfree(xsegbd_dev->blk_req_pending); - -out_freeq: - xq_free(&xsegbd_dev->blk_queue_pending); - out_bus: xsegbd_bus_del_dev(xsegbd_dev); return ret; -out_blkdev: - unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME); - out_delentry: spin_lock(&xsegbd_devices_lock); - xsegbd_devices[xsegbd_dev->src_portno] = NULL; + xsegbd_devices[xsegbd_dev->id] = NULL; out_unlock: spin_unlock(&xsegbd_devices_lock); @@ -948,6 +1047,7 @@ out_dev: kfree(xsegbd_dev); out: + module_put(THIS_MODULE); return ret; } @@ -1015,16 +1115,32 @@ static void xsegbd_sysfs_cleanup(void) static int __init xsegbd_init(void) { int ret = -ENOMEM; + max_dev = end_portno - start_portno; + if (max_dev < 0){ + XSEGLOG("invalid port numbers"); + ret = -EINVAL; + goto out; + } xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL); if (!xsegbd_devices) goto out; spin_lock_init(&xsegbd_devices_lock); + XSEGLOG("registering block device major %d", major); + ret = register_blkdev(major, XSEGBD_NAME); + if (ret < 0) { + XSEGLOG("cannot register block device!"); + ret = -EBUSY; + goto out_free; + } + major = ret; + XSEGLOG("registered block device major %d", major); + ret = -ENOSYS; ret = xsegbd_xseg_init(); if (ret) - goto out_free; + goto out_unregister; ret = xsegbd_sysfs_init(); if (ret) @@ -1037,7 +1153,10 @@ out: out_xseg: xsegbd_xseg_quit(); - + +out_unregister: + unregister_blkdev(major, XSEGBD_NAME); + out_free: kfree(xsegbd_devices); @@ -1048,6 +1167,7 @@ static void __exit xsegbd_exit(void) { xsegbd_sysfs_cleanup(); xsegbd_xseg_quit(); + unregister_blkdev(major, XSEGBD_NAME); } module_init(xsegbd_init);