5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
25 #define XSEGBD_MINORS 1
27 MODULE_DESCRIPTION("xsegbd");
28 MODULE_AUTHOR("XSEG");
29 MODULE_LICENSE("GPL");
31 static long sector_size = 0;
32 static long blksize = 512;
34 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
35 static char spec[256] = "xsegdev:xsegbd:4:512:64:1024:12";
37 module_param(sector_size, long, 0644);
38 module_param(blksize, long, 0644);
39 module_param(major, int, 0644);
40 module_param_string(name, name, sizeof(name), 0644);
41 module_param_string(spec, spec, sizeof(spec), 0644);
43 static struct xsegbd xsegbd;
44 static DEFINE_MUTEX(xsegbd_mutex);
45 static LIST_HEAD(xsegbd_dev_list);
47 /* ********************* */
48 /* ** XSEG Operations ** */
49 /* ********************* */
51 static void *xsegdev_malloc(uint64_t size)
53 return kmalloc((size_t)size, GFP_KERNEL);
56 static void *xsegdev_realloc(void *mem, uint64_t size)
58 return krealloc(mem, (size_t)size, GFP_KERNEL);
61 static void xsegdev_mfree(void *ptr)
66 static long xsegdev_allocate(const char *name, uint64_t size)
69 struct xsegdev *xsegdev = xsegdev_get(0);
71 r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
73 XSEGLOG("cannot acquire xsegdev");
77 if (xsegdev->segment) {
78 XSEGLOG("destroying existing xsegdev segment");
79 r = xsegdev_destroy_segment(xsegdev);
84 XSEGLOG("creating xsegdev segment size %llu", size);
85 r = xsegdev_create_segment(xsegdev, size, 1);
89 xsegdev->segsize = size;
97 static long xsegdev_deallocate(const char *name)
99 struct xsegdev *xsegdev = xsegdev_get(0);
100 int r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
104 clear_bit(XSEGDEV_RESERVED, &xsegdev->flags);
105 XSEGLOG("destroying segment");
106 r = xsegdev_destroy_segment(xsegdev);
108 XSEGLOG(" ...failed");
109 xsegdev_put(xsegdev);
113 static long xseg_callback(void *arg);
115 static void *xsegdev_map(const char *name, uint64_t size)
117 struct xseg *xseg = NULL;
118 struct xsegdev *dev = xsegdev_get(0);
120 r = IS_ERR(dev) ? PTR_ERR(dev) : 0;
127 if (size > dev->segsize)
130 if (dev->callback) /* in use */
133 dev->callback = xseg_callback;
134 xseg = (void *)dev->segment;
142 static void xsegdev_unmap(void *ptr, uint64_t size)
144 struct xsegdev *xsegdev = xsegdev_get(0);
145 int r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
149 //xsegdev->callarg = NULL;
150 xsegdev->callback = NULL;
151 xsegdev_put(xsegdev);
154 static struct xseg_type xseg_xsegdev = {
155 /* xseg operations */
157 .malloc = xsegdev_malloc,
158 .realloc = xsegdev_realloc,
159 .mfree = xsegdev_mfree,
160 .allocate = xsegdev_allocate,
161 .deallocate = xsegdev_deallocate,
163 .unmap = xsegdev_unmap
169 static int posix_signal_init(void)
174 static void posix_signal_quit(void) { }
176 static int posix_prepare_wait(struct xseg_port *port)
181 static int posix_cancel_wait(struct xseg_port *port)
186 static int posix_wait_signal(struct xseg_port *port, uint32_t timeout)
191 static int posix_signal(struct xseg_port *port)
194 struct task_struct *task;
198 pid = find_vpid((pid_t)port->waitcue);
201 task = pid_task(pid, PIDTYPE_PID);
205 ret = send_sig(SIGIO, task, 1);
211 static void *posix_malloc(uint64_t size)
216 static void *posix_realloc(void *mem, uint64_t size)
221 static void posix_mfree(void *mem) { }
223 static struct xseg_peer xseg_peer_posix = {
224 /* xseg signal operations */
226 .signal_init = posix_signal_init,
227 .signal_quit = posix_signal_quit,
228 .cancel_wait = posix_cancel_wait,
229 .prepare_wait = posix_prepare_wait,
230 .wait_signal = posix_wait_signal,
231 .signal = posix_signal,
232 .malloc = posix_malloc,
233 .realloc = posix_realloc,
240 static int xsegdev_signal_init(void)
245 static void xsegdev_signal_quit(void) { }
247 static int xsegdev_prepare_wait(struct xseg_port *port)
252 static int xsegdev_cancel_wait(struct xseg_port *port)
257 static int xsegdev_wait_signal(struct xseg_port *port, uint32_t timeout)
262 static int xsegdev_signal(struct xseg_port *port)
267 static struct xseg_peer xseg_peer_xsegdev = {
268 /* xseg signal operations */
270 .signal_init = xsegdev_signal_init,
271 .signal_quit = xsegdev_signal_quit,
272 .cancel_wait = xsegdev_cancel_wait,
273 .prepare_wait = xsegdev_prepare_wait,
274 .wait_signal = xsegdev_wait_signal,
275 .signal = xsegdev_signal,
276 .malloc = xsegdev_malloc,
277 .realloc = xsegdev_realloc,
278 .mfree = xsegdev_mfree
285 /* ************************* */
286 /* ***** sysfs helpers ***** */
287 /* ************************* */
289 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
291 return container_of(dev, struct xsegbd_device, dev);
294 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
297 return get_device(&xsegbd_dev->dev);
300 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
302 put_device(&xsegbd_dev->dev);
305 /* ************************* */
306 /* ** XSEG Initialization ** */
307 /* ************************* */
309 int xsegbd_xseg_init(void)
311 struct xsegdev *xsegdev;
315 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
317 XSEGLOG("registering xseg types");
318 xsegbd.namesize = strlen(xsegbd.name);
320 r = xseg_register_type(&xseg_xsegdev);
324 r = xseg_register_peer(&xseg_peer_posix);
328 r = xseg_register_peer(&xseg_peer_xsegdev);
332 r = xseg_initialize("xsegdev");
334 XSEGLOG("cannot initialize 'xsegdev' peer");
338 r = xseg_parse_spec(spec, &xsegbd.config);
342 if (strncmp(xsegbd.config.type, "xsegdev", 16))
343 XSEGLOG("WARNING: unexpected segment type '%s' vs 'xsegdev'",
346 xsegdev = xsegdev_get(0);
347 if (!xsegdev->segment) {
348 XSEGLOG("creating segment");
349 r = xseg_create(&xsegbd.config);
351 XSEGLOG("cannot create segment");
355 xsegdev_put(xsegdev);
357 XSEGLOG("joining segment");
358 xsegbd.xseg = xseg_join("xsegdev", "xsegdev");
360 XSEGLOG("cannot join segment");
367 xseg_unregister_peer(xseg_peer_xsegdev.name);
369 xseg_unregister_peer(xseg_peer_posix.name);
371 xseg_unregister_type(xseg_xsegdev.name);
377 int xsegbd_xseg_quit(void)
379 /* make sure to unmap the segment first */
380 xsegbd.xseg->type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
382 xseg_unregister_peer(xseg_peer_xsegdev.name);
383 xseg_unregister_peer(xseg_peer_posix.name);
384 xseg_unregister_type(xseg_xsegdev.name);
390 /* ***************************** */
391 /* ** Block Device Operations ** */
392 /* ***************************** */
394 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
396 struct gendisk *disk = bdev->bd_disk;
397 struct xsegbd_device *xsegbd_dev = disk->private_data;
399 xsegbd_get_dev(xsegbd_dev);
404 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
406 struct xsegbd_device *xsegbd_dev = gd->private_data;
408 xsegbd_put_dev(xsegbd_dev);
413 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
414 unsigned int cmd, unsigned long arg)
419 static const struct block_device_operations xsegbd_ops = {
420 .owner = THIS_MODULE,
422 .release = xsegbd_release,
423 .ioctl = xsegbd_ioctl
427 /* *************************** */
428 /* ** Device Initialization ** */
429 /* *************************** */
431 static void xseg_request_fn(struct request_queue *rq);
432 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
434 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
437 struct gendisk *disk;
438 unsigned int max_request_size_bytes;
440 spin_lock_init(&xsegbd_dev->lock);
442 xsegbd_dev->xsegbd = &xsegbd;
444 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
445 if (!xsegbd_dev->blk_queue)
448 blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->lock);
449 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
451 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
452 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
453 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
454 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
456 //blk_queue_max_segments(dev->blk_queue, 512);
457 /* calculate maximum block request size
458 * request size in pages * page_size
459 * leave one page in buffer for name
461 max_request_size_bytes =
462 (unsigned int) (xsegbd.config.request_size - 1) *
463 ( 1 << xsegbd.config.page_shift) ;
464 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
465 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
466 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
467 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
469 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
471 /* vkoukis says we don't need partitions */
472 xsegbd_dev->gd = disk = alloc_disk(1);
476 disk->major = xsegbd_dev->major;
477 disk->first_minor = 0; // id * XSEGBD_MINORS;
478 disk->fops = &xsegbd_ops;
479 disk->queue = xsegbd_dev->blk_queue;
480 disk->private_data = xsegbd_dev;
481 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
482 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
484 if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, xsegbd_dev->nr_requests, xsegbd_dev->nr_requests))
487 xsegbd_dev->blk_req_pending = kzalloc(sizeof(struct request *) * xsegbd_dev->nr_requests, GFP_KERNEL);
488 if (!xsegbd_dev->blk_req_pending)
491 /* allow a non-zero sector_size parameter to override the disk size */
493 xsegbd_dev->sectors = sector_size;
495 ret = xsegbd_get_size(xsegbd_dev);
500 set_capacity(disk, xsegbd_dev->sectors);
501 XSEGLOG("xsegbd active...");
502 add_disk(disk); /* immediately activates the device */
512 static void xsegbd_dev_release(struct device *dev)
514 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
515 struct xseg_port *port;
517 /* cleanup gendisk and blk_queue the right way */
518 if (xsegbd_dev->gd) {
519 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
520 del_gendisk(xsegbd_dev->gd);
522 blk_cleanup_queue(xsegbd_dev->blk_queue);
523 put_disk(xsegbd_dev->gd);
526 /* reset the port's waitcue (aka cancel_wait) */
527 port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
528 port->waitcue = (long) NULL;
530 xseg_free_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests);
532 kfree(xsegbd_dev->blk_req_pending);
533 xq_free(&xsegbd_dev->blk_queue_pending);
535 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
539 module_put(THIS_MODULE);
542 /* ******************* */
543 /* ** Critical Path ** */
544 /* ******************* */
546 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
547 struct request *blkreq)
549 struct bio_vec *bvec;
550 struct req_iterator iter;
552 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
553 rq_for_each_segment(bvec, blkreq, iter) {
554 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
555 memcpy(data + off, bdata, bvec->bv_len);
557 kunmap_atomic(bdata);
561 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
562 struct request *blkreq)
564 struct bio_vec *bvec;
565 struct req_iterator iter;
567 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
568 rq_for_each_segment(bvec, blkreq, iter) {
569 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
570 memcpy(bdata, data + off, bvec->bv_len);
572 kunmap_atomic(bdata);
576 static void xseg_request_fn(struct request_queue *rq)
578 struct xseg_request *xreq;
579 struct xsegbd_device *xsegbd_dev = rq->queuedata;
580 struct xseg_port *port;
581 struct request *blkreq;
587 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
591 blkreq = blk_fetch_request(rq);
595 if (blkreq->cmd_type != REQ_TYPE_FS) {
596 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
597 __blk_end_request_all(blkreq, 0);
601 datasize = blk_rq_bytes(blkreq);
602 BUG_ON(xreq->buffersize - xsegbd_dev->namesize < datasize);
603 BUG_ON(xseg_prep_request(xreq, xsegbd_dev->namesize, datasize));
605 name = XSEG_TAKE_PTR(xreq->name, xsegbd.xseg->segment);
606 strncpy(name, xsegbd_dev->name, xsegbd_dev->namesize);
607 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending);
608 BUG_ON(blkreq_idx == None);
609 /* WARN_ON(xsebd_dev->blk_req_pending[blkreq_idx] */
610 xsegbd_dev->blk_req_pending[blkreq_idx] = blkreq;
611 xreq->priv = (uint64_t)blkreq_idx;
612 xreq->size = datasize;
613 xreq->offset = blk_rq_pos(blkreq) << 9;
615 if (xreq->offset >= (sector_size << 9))
616 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
617 blk_rq_pos(blkreq), sector_size,
618 blkreq->cmd_flags & REQ_FLUSH,
619 blkreq->cmd_flags & REQ_FUA);
622 if (blkreq->cmd_flags & REQ_FLUSH)
623 xreq->flags |= XF_FLUSH;
625 if (blkreq->cmd_flags & REQ_FUA)
626 xreq->flags |= XF_FUA;
628 if (rq_data_dir(blkreq)) {
629 /* unlock for data transfers? */
630 blk_to_xseg(xsegbd.xseg, xreq, blkreq);
637 * Temp/ugly hack, add support for it in prepare_wait instead
639 port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
640 port->waitcue = (long) xsegbd_dev;
642 BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
646 * This is going to happen at least once.
647 * Add a WARN_ON when debugging find out why it happens more than once.
649 xseg_signal(xsegbd_dev->xsegbd->xseg, xsegbd_dev->dst_portno);
651 xseg_put_request(xsegbd_dev->xsegbd->xseg, xsegbd_dev->src_portno, xreq);
654 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
655 struct xseg_request *xreq )
659 if (xreq->state & XS_ERROR)
662 if (!(xreq->state & XS_SERVED))
665 data = XSEG_TAKE_PTR(xreq->data, xsegbd.xseg->segment);
666 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
670 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
672 struct xseg_request *xreq;
673 struct xseg_port *port;
676 struct completion comp;
679 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
683 datasize = sizeof(uint64_t);
684 BUG_ON((uint64_t)&comp < xsegbd_dev->nr_requests);
685 BUG_ON(xreq->buffersize - xsegbd_dev->namesize < datasize);
686 BUG_ON(xseg_prep_request(xreq, xsegbd_dev->namesize, datasize));
688 init_completion(&comp);
689 xreq->priv = (uint64_t)(long)∁
691 name = XSEG_TAKE_PTR(xreq->name, xsegbd.xseg->segment);
692 strncpy(name, xsegbd_dev->name, xsegbd_dev->namesize);
693 xreq->size = datasize;
698 port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
699 port->waitcue = (uint64_t)(long)xsegbd_dev;
701 BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
702 xseg_signal(xsegbd.xseg, xsegbd_dev->dst_portno);
704 wait_for_completion_interruptible(&comp);
705 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
707 xseg_put_request(xsegbd.xseg, xsegbd_dev->src_portno, xreq);
711 static long xseg_callback(void *arg)
713 struct xsegbd_device *xsegbd_dev = NULL;
714 struct xseg_request *xreq;
715 struct xseg_port *port;
716 struct request *blkreq;
721 port = XSEG_TAKE_PTR(arg, xsegbd.xseg->segment);
722 xsegbd_dev = (struct xsegbd_device *) port->waitcue;
728 xreq = xseg_receive(xsegbd.xseg, xsegbd_dev->src_portno);
732 /* we rely upon our peers to not have touched ->priv */
733 blkreq_idx = (uint64_t)xreq->priv;
734 if (blkreq_idx >= xsegbd_dev->nr_requests) {
735 /* someone is blocking on this request
736 and will handle it when we wake them up. */
737 complete((void *)(long)xreq->priv);
738 /* the request is blocker's responsibility so
739 we will not put_request(); */
743 /* this is now treated as a block I/O request to end */
744 blkreq = xsegbd_dev->blk_req_pending[blkreq_idx];
745 /* WARN_ON(!blkreq); */
748 if (!(xreq->state & XS_SERVED))
751 if (xreq->serviced != blk_rq_bytes(blkreq))
754 /* unlock for data transfer? */
755 if (!rq_data_dir(blkreq))
756 xseg_to_blk(xsegbd.xseg, xreq, blkreq);
760 blk_end_request_all(blkreq, err);
761 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx);
762 xseg_put_request(xsegbd.xseg, xreq->portno, xreq);
765 spin_lock_irqsave(&xsegbd_dev->lock, flags);
766 xseg_request_fn(xsegbd_dev->blk_queue);
767 spin_unlock_irqrestore(&xsegbd_dev->lock, flags);
772 /* sysfs interface */
774 static struct bus_type xsegbd_bus_type = {
778 static ssize_t xsegbd_size_show(struct device *dev,
779 struct device_attribute *attr, char *buf)
781 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
783 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
786 static ssize_t xsegbd_major_show(struct device *dev,
787 struct device_attribute *attr, char *buf)
789 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
791 return sprintf(buf, "%d\n", xsegbd_dev->major);
794 static ssize_t xsegbd_srcport_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
797 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
799 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
802 static ssize_t xsegbd_dstport_show(struct device *dev,
803 struct device_attribute *attr, char *buf)
805 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
807 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
810 static ssize_t xsegbd_id_show(struct device *dev,
811 struct device_attribute *attr, char *buf)
813 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
815 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
818 static ssize_t xsegbd_reqs_show(struct device *dev,
819 struct device_attribute *attr, char *buf)
821 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
823 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
826 static ssize_t xsegbd_name_show(struct device *dev,
827 struct device_attribute *attr, char *buf)
829 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
831 return sprintf(buf, "%s\n", xsegbd_dev->name);
834 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
835 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
836 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
837 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
838 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
839 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
840 static DEVICE_ATTR(name , S_IRUGO, xsegbd_name_show, NULL);
842 static struct attribute *xsegbd_attrs[] = {
844 &dev_attr_major.attr,
845 &dev_attr_srcport.attr,
846 &dev_attr_dstport.attr,
853 static struct attribute_group xsegbd_attr_group = {
854 .attrs = xsegbd_attrs,
857 static const struct attribute_group *xsegbd_attr_groups[] = {
862 static void xsegbd_sysfs_dev_release(struct device *dev)
866 static struct device_type xsegbd_device_type = {
868 .groups = xsegbd_attr_groups,
869 .release = xsegbd_sysfs_dev_release,
872 static void xsegbd_root_dev_release(struct device *dev)
876 static struct device xsegbd_root_dev = {
877 .init_name = "xsegbd",
878 .release = xsegbd_root_dev_release,
881 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
886 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
887 dev = &xsegbd_dev->dev;
889 dev->bus = &xsegbd_bus_type;
890 dev->type = &xsegbd_device_type;
891 dev->parent = &xsegbd_root_dev;
892 dev->release = xsegbd_dev_release;
893 dev_set_name(dev, "%d", xsegbd_dev->id);
895 ret = device_register(dev);
897 mutex_unlock(&xsegbd_mutex);
901 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
903 device_unregister(&xsegbd_dev->dev);
906 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
908 struct xsegbd_device *xsegbd_dev;
909 struct xseg_port *xport;
910 ssize_t ret = -ENOMEM;
912 struct list_head *tmp;
914 if (!try_module_get(THIS_MODULE))
917 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
921 spin_lock_init(&xsegbd_dev->lock);
922 INIT_LIST_HEAD(&xsegbd_dev->node);
925 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
926 "%d:%d:%d", xsegbd_dev->name, &xsegbd_dev->src_portno,
927 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
931 xsegbd_dev->namesize = strlen(xsegbd_dev->name);
933 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
935 list_for_each(tmp, &xsegbd_dev_list) {
936 struct xsegbd_device *entry;
938 entry = list_entry(tmp, struct xsegbd_device, node);
940 if (entry->src_portno == xsegbd_dev->src_portno) {
945 if (entry->id >= new_id)
946 new_id = entry->id + 1;
949 xsegbd_dev->id = new_id;
951 list_add_tail(&xsegbd_dev->node, &xsegbd_dev_list);
953 mutex_unlock(&xsegbd_mutex);
955 XSEGLOG("registering block device major %d", major);
956 ret = register_blkdev(major, XSEGBD_NAME);
958 XSEGLOG("cannot register block device!");
962 xsegbd_dev->major = ret;
963 XSEGLOG("registered block device major %d", xsegbd_dev->major);
965 ret = xsegbd_bus_add_dev(xsegbd_dev);
969 XSEGLOG("binding to source port %u (destination %u)",
970 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
971 xport = xseg_bind_port(xsegbd.xseg, xsegbd_dev->src_portno);
973 XSEGLOG("cannot bind to port");
978 /* make sure we don't get any requests until we're ready to handle them */
979 xport->waitcue = (long) NULL;
981 XSEGLOG("allocating %u requests", xsegbd_dev->nr_requests);
982 if (xseg_alloc_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests)) {
983 XSEGLOG("cannot allocate requests");
989 ret = xsegbd_dev_init(xsegbd_dev);
996 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
998 list_del_init(&xsegbd_dev->node);
999 xsegbd_bus_del_dev(xsegbd_dev);
1001 mutex_unlock(&xsegbd_mutex);
1006 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
1009 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
1010 list_del_init(&xsegbd_dev->node);
1013 mutex_unlock(&xsegbd_mutex);
1022 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
1024 struct list_head *tmp;
1025 struct xsegbd_device *xsegbd_dev;
1027 list_for_each(tmp, &xsegbd_dev_list) {
1028 xsegbd_dev = list_entry(tmp, struct xsegbd_device, node);
1029 if (xsegbd_dev->id == id)
1037 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
1039 struct xsegbd_device *xsegbd_dev = NULL;
1041 unsigned long ul_id;
1043 ret = kstrtoul(buf, 10, &ul_id);
1051 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
1054 xsegbd_dev = __xsegbd_get_dev(id);
1060 list_del_init(&xsegbd_dev->node);
1062 xsegbd_bus_del_dev(xsegbd_dev);
1065 mutex_unlock(&xsegbd_mutex);
1069 static struct bus_attribute xsegbd_bus_attrs[] = {
1070 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
1071 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
1075 static int xsegbd_sysfs_init(void)
1079 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
1081 ret = bus_register(&xsegbd_bus_type);
1085 ret = device_register(&xsegbd_root_dev);
1090 static void xsegbd_sysfs_cleanup(void)
1092 device_unregister(&xsegbd_root_dev);
1093 bus_unregister(&xsegbd_bus_type);
1096 /* *************************** */
1097 /* ** Module Initialization ** */
1098 /* *************************** */
1100 static int __init xsegbd_init(void)
1104 ret = xsegbd_xseg_init();
1108 ret = xsegbd_sysfs_init();
1110 goto out_xseg_destroy;
1112 XSEGLOG("initialization complete");
1122 static void __exit xsegbd_exit(void)
1124 xsegbd_sysfs_cleanup();
1128 module_init(xsegbd_init);
1129 module_exit(xsegbd_exit);