5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
22 #include <sys/kernel/segdev.h>
25 #define XSEGBD_MINORS 1
26 /* define max request size to be used in xsegbd */
27 //FIXME should we make this 4MB instead of 256KB ?
28 #define XSEGBD_MAX_REQUEST_SIZE 262144U
30 MODULE_DESCRIPTION("xsegbd");
31 MODULE_AUTHOR("XSEG");
32 MODULE_LICENSE("GPL");
34 static long sector_size = 0;
35 static long blksize = 512;
37 static int max_dev = 1024;
38 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
39 static char spec[256] = "segdev:xsegbd:4:1024:12";
41 module_param(sector_size, long, 0644);
42 module_param(blksize, long, 0644);
43 module_param(max_dev, int, 0644);
44 module_param(major, int, 0644);
45 module_param_string(name, name, sizeof(name), 0644);
46 module_param_string(spec, spec, sizeof(spec), 0644);
48 //static spinlock_t __lock;
49 static struct xsegbd xsegbd;
50 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
51 static DEFINE_MUTEX(xsegbd_mutex);
52 static DEFINE_SPINLOCK(xsegbd_devices_lock);
56 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
58 struct xsegbd_device *xsegbd_dev = NULL;
60 spin_lock(&xsegbd_devices_lock);
61 xsegbd_dev = xsegbd_devices[id];
62 spin_unlock(&xsegbd_devices_lock);
67 /* ************************* */
68 /* ***** sysfs helpers ***** */
69 /* ************************* */
71 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
73 return container_of(dev, struct xsegbd_device, dev);
76 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
79 return get_device(&xsegbd_dev->dev);
82 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
84 put_device(&xsegbd_dev->dev);
87 /* ************************* */
88 /* ** XSEG Initialization ** */
89 /* ************************* */
91 static void xseg_callback(struct xseg *xseg, uint32_t portno);
93 int xsegbd_xseg_init(void)
98 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
100 r = xseg_initialize();
102 XSEGLOG("cannot initialize 'segdev' peer");
106 r = xseg_parse_spec(spec, &xsegbd.config);
110 if (strncmp(xsegbd.config.type, "segdev", 16))
111 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
114 /* leave it here for now */
115 XSEGLOG("joining segment");
116 xsegbd.xseg = xseg_join( xsegbd.config.type,
121 XSEGLOG("cannot find segment");
132 int xsegbd_xseg_quit(void)
134 struct segdev *segdev;
136 /* make sure to unmap the segment first */
137 segdev = segdev_get(0);
138 clear_bit(SEGDEV_RESERVED, &segdev->flags);
139 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
146 /* ***************************** */
147 /* ** Block Device Operations ** */
148 /* ***************************** */
150 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
152 struct gendisk *disk = bdev->bd_disk;
153 struct xsegbd_device *xsegbd_dev = disk->private_data;
155 xsegbd_get_dev(xsegbd_dev);
160 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
162 struct xsegbd_device *xsegbd_dev = gd->private_data;
164 xsegbd_put_dev(xsegbd_dev);
169 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
170 unsigned int cmd, unsigned long arg)
175 static const struct block_device_operations xsegbd_ops = {
176 .owner = THIS_MODULE,
178 .release = xsegbd_release,
179 .ioctl = xsegbd_ioctl
183 /* *************************** */
184 /* ** Device Initialization ** */
185 /* *************************** */
187 static void xseg_request_fn(struct request_queue *rq);
188 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
190 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
193 struct gendisk *disk;
194 unsigned int max_request_size_bytes;
196 spin_lock_init(&xsegbd_dev->rqlock);
198 xsegbd_dev->xsegbd = &xsegbd;
200 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
201 if (!xsegbd_dev->blk_queue)
204 blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->rqlock);
205 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
207 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
208 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
209 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
210 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
212 //blk_queue_max_segments(dev->blk_queue, 512);
214 max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
215 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
216 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
217 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
218 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
220 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
222 /* vkoukis says we don't need partitions */
223 xsegbd_dev->gd = disk = alloc_disk(1);
225 /* FIXME: We call xsegbd_dev_release if something goes wrong, to cleanup
227 * Would it be better to do the cleanup here, and conditionally cleanup
232 disk->major = xsegbd_dev->major;
233 disk->first_minor = 0; // id * XSEGBD_MINORS;
234 disk->fops = &xsegbd_ops;
235 disk->queue = xsegbd_dev->blk_queue;
236 disk->private_data = xsegbd_dev;
237 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
238 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
242 /* allow a non-zero sector_size parameter to override the disk size */
244 xsegbd_dev->sectors = sector_size;
246 ret = xsegbd_get_size(xsegbd_dev);
251 set_capacity(disk, xsegbd_dev->sectors);
252 XSEGLOG("xsegbd active...");
253 add_disk(disk); /* immediately activates the device */
261 static void xsegbd_dev_release(struct device *dev)
263 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
265 /* cleanup gendisk and blk_queue the right way */
266 if (xsegbd_dev->gd) {
267 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
268 del_gendisk(xsegbd_dev->gd);
270 blk_cleanup_queue(xsegbd_dev->blk_queue);
271 put_disk(xsegbd_dev->gd);
274 /* xsegbd actually does not need to use waiting.
275 * maybe we can use xseg_cancel_wait for clarity
276 * with the xseg_segdev kernel driver to convert
279 xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
281 if (xseg_free_requests(xsegbd_dev->xseg,
282 xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
283 XSEGLOG("Error trying to free requests!\n");
286 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
288 spin_lock(&xsegbd_devices_lock);
289 BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
290 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
291 spin_unlock(&xsegbd_devices_lock);
293 if (xsegbd_dev->blk_req_pending)
294 kfree(xsegbd_dev->blk_req_pending);
295 xq_free(&xsegbd_dev->blk_queue_pending);
299 module_put(THIS_MODULE);
302 /* ******************* */
303 /* ** Critical Path ** */
304 /* ******************* */
306 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
307 struct request *blkreq)
309 struct bio_vec *bvec;
310 struct req_iterator iter;
312 char *data = xseg_get_data(xseg, xreq);
313 rq_for_each_segment(bvec, blkreq, iter) {
314 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
315 memcpy(data + off, bdata, bvec->bv_len);
317 kunmap_atomic(bdata);
321 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
322 struct request *blkreq)
324 struct bio_vec *bvec;
325 struct req_iterator iter;
327 char *data = xseg_get_data(xseg, xreq);
328 rq_for_each_segment(bvec, blkreq, iter) {
329 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
330 memcpy(bdata, data + off, bvec->bv_len);
332 kunmap_atomic(bdata);
336 static void xseg_request_fn(struct request_queue *rq)
338 struct xseg_request *xreq;
339 struct xsegbd_device *xsegbd_dev = rq->queuedata;
340 struct request *blkreq;
341 struct xsegbd_pending *pending;
349 blkreq_idx = Noneidx;
350 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
351 xsegbd_dev->dst_portno, X_ALLOC);
355 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending,
356 xsegbd_dev->src_portno);
357 if (blkreq_idx == Noneidx)
360 blkreq = blk_fetch_request(rq);
364 if (blkreq->cmd_type != REQ_TYPE_FS) {
366 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
367 __blk_end_request_all(blkreq, 0);
371 datalen = blk_rq_bytes(blkreq);
372 r = xseg_prep_request(xsegbd_dev->xseg, xreq,
373 xsegbd_dev->targetlen, datalen);
375 XSEGLOG("couldn't prep request");
376 __blk_end_request_err(blkreq, r);
380 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
381 XSEGLOG("malformed req buffers");
382 __blk_end_request_err(blkreq, r);
387 target = xseg_get_target(xsegbd_dev->xseg, xreq);
388 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
389 if (blkreq_idx >= xsegbd_dev->nr_requests) {
390 XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
392 __blk_end_request_err(blkreq, -1);
395 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
396 pending->dev = xsegbd_dev;
397 pending->request = blkreq;
398 pending->comp = NULL;
399 xreq->size = datalen;
400 xreq->offset = blk_rq_pos(blkreq) << 9;
402 if (xreq->offset >= (sector_size << 9))
403 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
404 blk_rq_pos(blkreq), sector_size,
405 blkreq->cmd_flags & REQ_FLUSH,
406 blkreq->cmd_flags & REQ_FUA);
409 if (blkreq->cmd_flags & REQ_FLUSH)
410 xreq->flags |= XF_FLUSH;
412 if (blkreq->cmd_flags & REQ_FUA)
413 xreq->flags |= XF_FUA;
415 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu",
416 // xreq, xreq->size, xreq->offset, blkreq_idx);
418 if (rq_data_dir(blkreq)) {
419 /* unlock for data transfers? */
420 blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
421 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu completed blk_to_xseg",
422 // xreq, xreq->size, xreq->offset, blkreq_idx);
428 //maybe put this in loop start, and on break,
429 //just do xseg_get_req_data
430 spin_lock(&xsegbd_dev->reqdatalock);
431 r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
432 spin_unlock(&xsegbd_dev->reqdatalock);
434 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu set req data",
435 // xreq, xreq->size, xreq->offset, blkreq_idx);
437 p = xseg_submit(xsegbd_dev->xseg, xreq,
438 xsegbd_dev->src_portno, X_ALLOC);
440 //no unsetting req data;
441 XSEGLOG("coundn't submit req");
443 __blk_end_request_err(blkreq, -1);
446 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu submitted",
447 // xreq, xreq->size, xreq->offset, blkreq_idx);
448 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
451 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq,
452 xsegbd_dev->src_portno) == -1);
453 if (blkreq_idx != Noneidx)
454 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending,
455 blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
458 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
459 struct xseg_request *xreq )
463 if (xreq->state & XS_FAILED)
466 if (!(xreq->state & XS_SERVED))
469 data = xseg_get_data(xsegbd_dev->xseg, xreq);
470 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
474 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
476 struct xseg_request *xreq;
480 struct xsegbd_pending *pending;
481 struct completion comp;
485 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
486 xsegbd_dev->dst_portno, X_ALLOC);
490 datalen = sizeof(uint64_t);
491 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen));
492 BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
494 init_completion(&comp);
495 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
496 if (blkreq_idx == Noneidx)
499 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
500 pending->dev = xsegbd_dev;
501 pending->request = NULL;
502 pending->comp = ∁
505 spin_lock(&xsegbd_dev->reqdatalock);
506 r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
507 spin_unlock(&xsegbd_dev->reqdatalock);
510 //XSEGLOG("for req: %lx, set data %llu (lx: %lx)", xreq, blkreq_idx, (void *) blkreq_idx);
512 target = xseg_get_target(xsegbd_dev->xseg, xreq);
513 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
514 xreq->size = datalen;
519 /* waiting is not needed.
520 * but it should be better to use xseg_prepare_wait
521 * and the xseg_segdev kernel driver, would be a no op
524 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
525 p = xseg_submit(xsegbd_dev->xseg, xreq,
526 xsegbd_dev->src_portno, X_ALLOC);
531 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
533 wait_for_completion_interruptible(&comp);
534 //XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
535 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
536 //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
538 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) < 0);
542 spin_lock(&xsegbd_dev->reqdatalock);
543 r = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data);
544 spin_unlock(&xsegbd_dev->reqdatalock);
546 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
551 static void xseg_callback(struct xseg *xseg, xport portno)
553 struct xsegbd_device *xsegbd_dev;
554 struct xseg_request *xreq;
555 struct request *blkreq;
556 struct xsegbd_pending *pending;
558 xqindex blkreq_idx, ridx;
562 xsegbd_dev = __xsegbd_get_dev(portno);
569 xreq = xseg_receive(xsegbd_dev->xseg, portno);
573 spin_lock(&xsegbd_dev->reqdatalock);
574 err = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data);
575 spin_unlock(&xsegbd_dev->reqdatalock);
576 //XSEGLOG("for req: %lx, got data %llu (lx %lx)", xreq, (xqindex) data, data);
583 blkreq_idx = (xqindex) data;
584 if (blkreq_idx >= xsegbd_dev->nr_requests) {
590 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
592 /* someone is blocking on this request
593 and will handle it when we wake them up. */
594 complete(pending->comp);
595 /* the request is blocker's responsibility so
596 we will not put_request(); */
600 /* this is now treated as a block I/O request to end */
601 blkreq = pending->request;
602 pending->request = NULL;
603 //xsegbd_dev = pending->dev;
604 if (xsegbd_dev != pending->dev) {
605 XSEGLOG("xsegbd_dev != pending->dev");
612 XSEGLOG("blkreq does not exist");
618 if (!(xreq->state & XS_SERVED))
621 if (xreq->serviced != blk_rq_bytes(blkreq))
624 /* unlock for data transfer? */
625 if (!rq_data_dir(blkreq)){
626 xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
627 //XSEGLOG("for req: %lx, completed xseg_to_blk", xreq);
632 blk_end_request_all(blkreq, err);
633 //XSEGLOG("for req: %lx, completed", xreq);
634 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
635 if (ridx == Noneidx) {
636 XSEGLOG("couldnt append blkreq_idx");
640 err = xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno);
642 XSEGLOG("couldn't put req");
648 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
649 xseg_request_fn(xsegbd_dev->blk_queue);
650 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
655 /* sysfs interface */
657 static struct bus_type xsegbd_bus_type = {
661 static ssize_t xsegbd_size_show(struct device *dev,
662 struct device_attribute *attr, char *buf)
664 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
666 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
669 static ssize_t xsegbd_major_show(struct device *dev,
670 struct device_attribute *attr, char *buf)
672 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
674 return sprintf(buf, "%d\n", xsegbd_dev->major);
677 static ssize_t xsegbd_srcport_show(struct device *dev,
678 struct device_attribute *attr, char *buf)
680 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
682 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
685 static ssize_t xsegbd_dstport_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
688 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
690 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
693 static ssize_t xsegbd_id_show(struct device *dev,
694 struct device_attribute *attr, char *buf)
696 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
698 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
701 static ssize_t xsegbd_reqs_show(struct device *dev,
702 struct device_attribute *attr, char *buf)
704 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
706 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
709 static ssize_t xsegbd_target_show(struct device *dev,
710 struct device_attribute *attr, char *buf)
712 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
714 return sprintf(buf, "%s\n", xsegbd_dev->target);
717 static ssize_t xsegbd_image_refresh(struct device *dev,
718 struct device_attribute *attr,
722 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
725 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
727 rc = xsegbd_get_size(xsegbd_dev);
733 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
736 mutex_unlock(&xsegbd_mutex);
740 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
741 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
742 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
743 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
744 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
745 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
746 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
747 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
749 static struct attribute *xsegbd_attrs[] = {
751 &dev_attr_major.attr,
752 &dev_attr_srcport.attr,
753 &dev_attr_dstport.attr,
756 &dev_attr_target.attr,
757 &dev_attr_refresh.attr,
761 static struct attribute_group xsegbd_attr_group = {
762 .attrs = xsegbd_attrs,
765 static const struct attribute_group *xsegbd_attr_groups[] = {
770 static void xsegbd_sysfs_dev_release(struct device *dev)
774 static struct device_type xsegbd_device_type = {
776 .groups = xsegbd_attr_groups,
777 .release = xsegbd_sysfs_dev_release,
780 static void xsegbd_root_dev_release(struct device *dev)
784 static struct device xsegbd_root_dev = {
785 .init_name = "xsegbd",
786 .release = xsegbd_root_dev_release,
789 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
794 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
795 dev = &xsegbd_dev->dev;
797 dev->bus = &xsegbd_bus_type;
798 dev->type = &xsegbd_device_type;
799 dev->parent = &xsegbd_root_dev;
800 dev->release = xsegbd_dev_release;
801 dev_set_name(dev, "%d", xsegbd_dev->id);
803 ret = device_register(dev);
805 mutex_unlock(&xsegbd_mutex);
809 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
811 device_unregister(&xsegbd_dev->dev);
814 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
816 struct xsegbd_device *xsegbd_dev;
817 struct xseg_port *port;
818 ssize_t ret = -ENOMEM;
820 if (!try_module_get(THIS_MODULE))
823 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
827 spin_lock_init(&xsegbd_dev->rqlock);
828 spin_lock_init(&xsegbd_dev->reqdatalock);
829 INIT_LIST_HEAD(&xsegbd_dev->node);
832 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
833 "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
834 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
838 xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
840 spin_lock(&xsegbd_devices_lock);
841 if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
845 xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
846 xsegbd_dev->id = xsegbd_dev->src_portno;
847 spin_unlock(&xsegbd_devices_lock);
849 XSEGLOG("registering block device major %d", major);
850 ret = register_blkdev(major, XSEGBD_NAME);
852 XSEGLOG("cannot register block device!");
856 xsegbd_dev->major = ret;
857 XSEGLOG("registered block device major %d", xsegbd_dev->major);
859 ret = xsegbd_bus_add_dev(xsegbd_dev);
863 if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending,
864 xsegbd_dev->nr_requests,
865 xsegbd_dev->nr_requests))
868 xsegbd_dev->blk_req_pending = kzalloc(
869 xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
871 if (!xsegbd_dev->blk_req_pending)
875 XSEGLOG("joining segment");
876 //FIXME use xsebd module config for now
877 xsegbd_dev->xseg = xseg_join( xsegbd.config.type,
881 if (!xsegbd_dev->xseg)
882 goto out_freepending;
885 XSEGLOG("binding to source port %u (destination %u)",
886 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
887 port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno);
889 XSEGLOG("cannot bind to port");
894 //FIXME rollback here
895 BUG_ON(xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port));
897 /* make sure we don't get any requests until we're ready to handle them */
898 xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
900 ret = xsegbd_dev_init(xsegbd_dev);
907 xseg_leave(xsegbd_dev->xseg);
910 kfree(xsegbd_dev->blk_req_pending);
913 xq_free(&xsegbd_dev->blk_queue_pending);
916 xsegbd_bus_del_dev(xsegbd_dev);
921 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
924 spin_lock(&xsegbd_devices_lock);
925 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
928 spin_unlock(&xsegbd_devices_lock);
937 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
939 struct xsegbd_device *xsegbd_dev = NULL;
943 ret = strict_strtoul(buf, 10, &ul_id);
951 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
954 xsegbd_dev = __xsegbd_get_dev(id);
959 xsegbd_bus_del_dev(xsegbd_dev);
962 mutex_unlock(&xsegbd_mutex);
966 static struct bus_attribute xsegbd_bus_attrs[] = {
967 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
968 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
972 static int xsegbd_sysfs_init(void)
976 ret = device_register(&xsegbd_root_dev);
980 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
981 ret = bus_register(&xsegbd_bus_type);
983 device_unregister(&xsegbd_root_dev);
988 static void xsegbd_sysfs_cleanup(void)
990 bus_unregister(&xsegbd_bus_type);
991 device_unregister(&xsegbd_root_dev);
994 /* *************************** */
995 /* ** Module Initialization ** */
996 /* *************************** */
998 static int __init xsegbd_init(void)
1001 xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1002 if (!xsegbd_devices)
1005 spin_lock_init(&xsegbd_devices_lock);
1008 ret = xsegbd_xseg_init();
1012 ret = xsegbd_sysfs_init();
1016 XSEGLOG("initialization complete");
1025 kfree(xsegbd_devices);
1030 static void __exit xsegbd_exit(void)
1032 xsegbd_sysfs_cleanup();
1036 module_init(xsegbd_init);
1037 module_exit(xsegbd_exit);