5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
22 #include <sys/kernel/segdev.h>
25 #define XSEGBD_MINORS 1
26 /* define max request size to be used in xsegbd */
27 //FIXME should we make this 4MB instead of 256KB ?
28 #define XSEGBD_MAX_REQUEST_SIZE 262144U
30 MODULE_DESCRIPTION("xsegbd");
31 MODULE_AUTHOR("XSEG");
32 MODULE_LICENSE("GPL");
34 static long sector_size = 0;
35 static long blksize = 512;
37 static int max_dev = 1024;
38 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
39 static char spec[256] = "segdev:xsegbd:4:1024:12";
41 module_param(sector_size, long, 0644);
42 module_param(blksize, long, 0644);
43 module_param(max_dev, int, 0644);
44 module_param(major, int, 0644);
45 module_param_string(name, name, sizeof(name), 0644);
46 module_param_string(spec, spec, sizeof(spec), 0644);
48 static struct xsegbd xsegbd;
49 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
50 static DEFINE_MUTEX(xsegbd_mutex);
51 static DEFINE_SPINLOCK(xsegbd_devices_lock);
55 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
57 struct xsegbd_device *xsegbd_dev = NULL;
59 spin_lock(&xsegbd_devices_lock);
60 xsegbd_dev = xsegbd_devices[id];
61 spin_unlock(&xsegbd_devices_lock);
66 /* ************************* */
67 /* ***** sysfs helpers ***** */
68 /* ************************* */
70 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
72 return container_of(dev, struct xsegbd_device, dev);
75 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
78 return get_device(&xsegbd_dev->dev);
81 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
83 put_device(&xsegbd_dev->dev);
86 /* ************************* */
87 /* ** XSEG Initialization ** */
88 /* ************************* */
90 static void xseg_callback(uint32_t portno);
92 int xsegbd_xseg_init(void)
97 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
99 r = xseg_initialize();
101 XSEGLOG("cannot initialize 'segdev' peer");
105 r = xseg_parse_spec(spec, &xsegbd.config);
109 if (strncmp(xsegbd.config.type, "segdev", 16))
110 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
113 /* leave it here for now */
114 XSEGLOG("joining segment");
115 xsegbd.xseg = xseg_join( xsegbd.config.type,
120 XSEGLOG("cannot find segment");
131 int xsegbd_xseg_quit(void)
133 struct segdev *segdev;
135 /* make sure to unmap the segment first */
136 segdev = segdev_get(0);
137 clear_bit(SEGDEV_RESERVED, &segdev->flags);
138 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
145 /* ***************************** */
146 /* ** Block Device Operations ** */
147 /* ***************************** */
149 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
151 struct gendisk *disk = bdev->bd_disk;
152 struct xsegbd_device *xsegbd_dev = disk->private_data;
154 xsegbd_get_dev(xsegbd_dev);
159 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
161 struct xsegbd_device *xsegbd_dev = gd->private_data;
163 xsegbd_put_dev(xsegbd_dev);
168 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
169 unsigned int cmd, unsigned long arg)
174 static const struct block_device_operations xsegbd_ops = {
175 .owner = THIS_MODULE,
177 .release = xsegbd_release,
178 .ioctl = xsegbd_ioctl
182 /* *************************** */
183 /* ** Device Initialization ** */
184 /* *************************** */
186 static void xseg_request_fn(struct request_queue *rq);
187 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
189 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
192 struct gendisk *disk;
193 unsigned int max_request_size_bytes;
195 spin_lock_init(&xsegbd_dev->rqlock);
197 xsegbd_dev->xsegbd = &xsegbd;
199 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
200 if (!xsegbd_dev->blk_queue)
203 if (!blk_init_allocated_queue(xsegbd_dev->blk_queue,
204 xseg_request_fn, &xsegbd_dev->rqlock))
207 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
209 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
210 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
211 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
212 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
214 //blk_queue_max_segments(dev->blk_queue, 512);
216 max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
217 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
218 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
219 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
220 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
222 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
224 /* vkoukis says we don't need partitions */
225 xsegbd_dev->gd = disk = alloc_disk(1);
229 disk->major = xsegbd_dev->major;
230 disk->first_minor = 0; // id * XSEGBD_MINORS;
231 disk->fops = &xsegbd_ops;
232 disk->queue = xsegbd_dev->blk_queue;
233 disk->private_data = xsegbd_dev;
234 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
235 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
239 /* allow a non-zero sector_size parameter to override the disk size */
241 xsegbd_dev->sectors = sector_size;
243 ret = xsegbd_get_size(xsegbd_dev);
248 set_capacity(disk, xsegbd_dev->sectors);
249 XSEGLOG("xsegbd active...");
250 add_disk(disk); /* immediately activates the device */
256 put_disk(xsegbd_dev->gd);
258 blk_cleanup_queue(xsegbd_dev->blk_queue);
260 xsegbd_dev->gd = NULL;
264 static void xsegbd_dev_release(struct device *dev)
266 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
268 xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
270 /* cleanup gendisk and blk_queue the right way */
271 if (xsegbd_dev->gd) {
272 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
273 del_gendisk(xsegbd_dev->gd);
275 blk_cleanup_queue(xsegbd_dev->blk_queue);
276 put_disk(xsegbd_dev->gd);
279 // if (xseg_free_requests(xsegbd_dev->xseg,
280 // xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
281 // XSEGLOG("Error trying to free requests!\n");
284 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
286 spin_lock(&xsegbd_devices_lock);
287 BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
288 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
289 spin_unlock(&xsegbd_devices_lock);
291 if (xsegbd_dev->blk_req_pending)
292 kfree(xsegbd_dev->blk_req_pending);
293 xq_free(&xsegbd_dev->blk_queue_pending);
297 module_put(THIS_MODULE);
300 /* ******************* */
301 /* ** Critical Path ** */
302 /* ******************* */
304 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
305 struct request *blkreq)
307 struct bio_vec *bvec;
308 struct req_iterator iter;
310 char *data = xseg_get_data(xseg, xreq);
311 rq_for_each_segment(bvec, blkreq, iter) {
312 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
313 memcpy(data + off, bdata, bvec->bv_len);
315 kunmap_atomic(bdata);
319 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
320 struct request *blkreq)
322 struct bio_vec *bvec;
323 struct req_iterator iter;
325 char *data = xseg_get_data(xseg, xreq);
326 rq_for_each_segment(bvec, blkreq, iter) {
327 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
328 memcpy(bdata, data + off, bvec->bv_len);
330 kunmap_atomic(bdata);
334 static void xseg_request_fn(struct request_queue *rq)
336 struct xseg_request *xreq;
337 struct xsegbd_device *xsegbd_dev = rq->queuedata;
338 struct request *blkreq;
339 struct xsegbd_pending *pending;
347 blkreq_idx = Noneidx;
348 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
349 xsegbd_dev->dst_portno, X_ALLOC);
353 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending,
354 xsegbd_dev->src_portno);
355 if (blkreq_idx == Noneidx)
358 if (blkreq_idx >= xsegbd_dev->nr_requests) {
359 XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
364 blkreq = blk_fetch_request(rq);
368 if (blkreq->cmd_type != REQ_TYPE_FS) {
370 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
371 __blk_end_request_all(blkreq, 0);
375 datalen = blk_rq_bytes(blkreq);
376 r = xseg_prep_request(xsegbd_dev->xseg, xreq,
377 xsegbd_dev->targetlen, datalen);
379 XSEGLOG("couldn't prep request");
380 __blk_end_request_err(blkreq, r);
385 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
386 XSEGLOG("malformed req buffers");
387 __blk_end_request_err(blkreq, r);
392 target = xseg_get_target(xsegbd_dev->xseg, xreq);
393 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
395 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
396 pending->dev = xsegbd_dev;
397 pending->request = blkreq;
398 pending->comp = NULL;
400 xreq->size = datalen;
401 xreq->offset = blk_rq_pos(blkreq) << 9;
402 xreq->priv = (uint64_t) blkreq_idx;
405 if (xreq->offset >= (sector_size << 9))
406 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
407 blk_rq_pos(blkreq), sector_size,
408 blkreq->cmd_flags & REQ_FLUSH,
409 blkreq->cmd_flags & REQ_FUA);
412 if (blkreq->cmd_flags & REQ_FLUSH)
413 xreq->flags |= XF_FLUSH;
415 if (blkreq->cmd_flags & REQ_FUA)
416 xreq->flags |= XF_FUA;
418 if (rq_data_dir(blkreq)) {
419 /* unlock for data transfers? */
420 blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
428 p = xseg_submit(xsegbd_dev->xseg, xreq,
429 xsegbd_dev->src_portno, X_ALLOC);
431 XSEGLOG("coundn't submit req");
433 __blk_end_request_err(blkreq, r);
436 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
439 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq,
440 xsegbd_dev->src_portno) == -1);
441 if (blkreq_idx != Noneidx)
442 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending,
443 blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
446 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
447 struct xseg_request *xreq )
451 XSEGLOG("Invalid xreq");
455 if (xreq->state & XS_FAILED)
458 if (!(xreq->state & XS_SERVED))
461 data = xseg_get_data(xsegbd_dev->xseg, xreq);
463 XSEGLOG("Invalid req data");
466 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
470 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
472 struct xseg_request *xreq;
476 struct xsegbd_pending *pending;
477 struct completion comp;
481 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
482 xsegbd_dev->dst_portno, X_ALLOC);
486 datalen = sizeof(uint64_t);
487 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen));
488 BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
490 init_completion(&comp);
491 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
492 if (blkreq_idx == Noneidx)
495 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
496 pending->dev = xsegbd_dev;
497 pending->request = NULL;
498 pending->comp = ∁
501 xreq->priv = (uint64_t) blkreq_idx;
503 target = xseg_get_target(xsegbd_dev->xseg, xreq);
504 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
505 xreq->size = datalen;
509 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
510 p = xseg_submit(xsegbd_dev->xseg, xreq,
511 xsegbd_dev->src_portno, X_ALLOC);
513 XSEGLOG("couldn't submit request");
517 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
518 XSEGLOG("Before wait for completion, xreq %lx", (unsigned long) xreq);
519 wait_for_completion_interruptible(&comp);
520 XSEGLOG("Woken up after wait_for_completion_interruptible(), xreq: %lx", (unsigned long) xreq);
521 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
522 //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
524 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
528 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
533 static void xseg_callback(xport portno)
535 struct xsegbd_device *xsegbd_dev;
536 struct xseg_request *xreq;
537 struct request *blkreq;
538 struct xsegbd_pending *pending;
540 xqindex blkreq_idx, ridx;
544 xsegbd_dev = __xsegbd_get_dev(portno);
546 XSEGLOG("portno: %u has no xsegbd device assigned", portno);
552 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
553 xreq = xseg_receive(xsegbd_dev->xseg, portno);
557 xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
559 blkreq_idx = (xqindex) xreq->priv;
560 if (blkreq_idx >= xsegbd_dev->nr_requests) {
562 //FIXME maybe put request?
566 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
568 /* someone is blocking on this request
569 and will handle it when we wake them up. */
570 complete(pending->comp);
571 /* the request is blocker's responsibility so
572 we will not put_request(); */
576 /* this is now treated as a block I/O request to end */
577 blkreq = pending->request;
578 pending->request = NULL;
579 if (xsegbd_dev != pending->dev) {
580 //FIXME maybe put request?
581 XSEGLOG("xsegbd_dev != pending->dev");
587 //FIXME maybe put request?
588 XSEGLOG("blkreq does not exist");
594 if (!(xreq->state & XS_SERVED))
597 if (xreq->serviced != blk_rq_bytes(blkreq))
601 /* unlock for data transfer? */
602 if (!rq_data_dir(blkreq)){
603 xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
606 blk_end_request_all(blkreq, err);
608 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending,
609 blkreq_idx, xsegbd_dev->src_portno);
610 if (ridx == Noneidx) {
611 XSEGLOG("couldnt append blkreq_idx");
615 if (xseg_put_request(xsegbd_dev->xseg, xreq,
616 xsegbd_dev->src_portno) < 0){
617 XSEGLOG("couldn't put req");
623 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
624 xseg_request_fn(xsegbd_dev->blk_queue);
625 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
630 /* sysfs interface */
632 static struct bus_type xsegbd_bus_type = {
636 static ssize_t xsegbd_size_show(struct device *dev,
637 struct device_attribute *attr, char *buf)
639 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
641 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
644 static ssize_t xsegbd_major_show(struct device *dev,
645 struct device_attribute *attr, char *buf)
647 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
649 return sprintf(buf, "%d\n", xsegbd_dev->major);
652 static ssize_t xsegbd_srcport_show(struct device *dev,
653 struct device_attribute *attr, char *buf)
655 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
657 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
660 static ssize_t xsegbd_dstport_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
663 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
665 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
668 static ssize_t xsegbd_id_show(struct device *dev,
669 struct device_attribute *attr, char *buf)
671 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
673 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
676 static ssize_t xsegbd_reqs_show(struct device *dev,
677 struct device_attribute *attr, char *buf)
679 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
681 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
684 static ssize_t xsegbd_target_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
687 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
689 return sprintf(buf, "%s\n", xsegbd_dev->target);
692 static ssize_t xsegbd_image_refresh(struct device *dev,
693 struct device_attribute *attr,
697 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
700 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
702 rc = xsegbd_get_size(xsegbd_dev);
708 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
711 mutex_unlock(&xsegbd_mutex);
715 static ssize_t xsegbd_cleanup(struct device *dev,
716 struct device_attribute *attr,
720 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
722 struct request *blkreq = NULL;
723 struct xsegbd_pending *pending = NULL;
724 struct completion *comp = NULL;
726 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
727 for (i = 0; i < xsegbd_dev->nr_requests; i++) {
728 xlock_acquire(&xsegbd_dev->blk_queue_pending.lock,
729 xsegbd_dev->src_portno);
730 if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
731 pending = &xsegbd_dev->blk_req_pending[i];
732 blkreq = pending->request;
733 pending->request = NULL;
734 comp = pending->comp;
735 pending->comp = NULL;
737 blk_end_request_all(blkreq, -EIO);
741 xlock_release(&xsegbd_dev->blk_queue_pending.lock);
744 mutex_unlock(&xsegbd_mutex);
748 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
749 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
750 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
751 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
752 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
753 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
754 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
755 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
756 static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
758 static struct attribute *xsegbd_attrs[] = {
760 &dev_attr_major.attr,
761 &dev_attr_srcport.attr,
762 &dev_attr_dstport.attr,
765 &dev_attr_target.attr,
766 &dev_attr_refresh.attr,
767 &dev_attr_cleanup.attr,
771 static struct attribute_group xsegbd_attr_group = {
772 .attrs = xsegbd_attrs,
775 static const struct attribute_group *xsegbd_attr_groups[] = {
780 static void xsegbd_sysfs_dev_release(struct device *dev)
784 static struct device_type xsegbd_device_type = {
786 .groups = xsegbd_attr_groups,
787 .release = xsegbd_sysfs_dev_release,
790 static void xsegbd_root_dev_release(struct device *dev)
794 static struct device xsegbd_root_dev = {
795 .init_name = "xsegbd",
796 .release = xsegbd_root_dev_release,
799 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
804 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
805 dev = &xsegbd_dev->dev;
807 dev->bus = &xsegbd_bus_type;
808 dev->type = &xsegbd_device_type;
809 dev->parent = &xsegbd_root_dev;
810 dev->release = xsegbd_dev_release;
811 dev_set_name(dev, "%d", xsegbd_dev->id);
813 ret = device_register(dev);
815 mutex_unlock(&xsegbd_mutex);
819 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
821 device_unregister(&xsegbd_dev->dev);
824 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
826 struct xsegbd_device *xsegbd_dev;
827 struct xseg_port *port;
828 ssize_t ret = -ENOMEM;
830 if (!try_module_get(THIS_MODULE))
833 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
837 spin_lock_init(&xsegbd_dev->rqlock);
838 INIT_LIST_HEAD(&xsegbd_dev->node);
841 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
842 "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
843 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
847 xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
849 spin_lock(&xsegbd_devices_lock);
850 if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
854 xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
855 xsegbd_dev->id = xsegbd_dev->src_portno;
856 spin_unlock(&xsegbd_devices_lock);
858 XSEGLOG("registering block device major %d", major);
859 ret = register_blkdev(major, XSEGBD_NAME);
861 XSEGLOG("cannot register block device!");
865 xsegbd_dev->major = ret;
866 XSEGLOG("registered block device major %d", xsegbd_dev->major);
868 ret = xsegbd_bus_add_dev(xsegbd_dev);
872 if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending,
873 xsegbd_dev->nr_requests,
874 xsegbd_dev->nr_requests))
877 xsegbd_dev->blk_req_pending = kzalloc(
878 xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
880 if (!xsegbd_dev->blk_req_pending)
884 XSEGLOG("joining segment");
885 //FIXME use xsebd module config for now
886 xsegbd_dev->xseg = xseg_join( xsegbd.config.type,
890 if (!xsegbd_dev->xseg)
891 goto out_freepending;
894 XSEGLOG("binding to source port %u (destination %u)",
895 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
896 port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno);
898 XSEGLOG("cannot bind to port");
904 if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
905 XSEGLOG("portno != xsegbd_dev->src_portno");
911 /* make sure we don't get any requests until we're ready to handle them */
912 xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
914 ret = xsegbd_dev_init(xsegbd_dev);
918 xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
922 xseg_leave(xsegbd_dev->xseg);
925 kfree(xsegbd_dev->blk_req_pending);
928 xq_free(&xsegbd_dev->blk_queue_pending);
931 xsegbd_bus_del_dev(xsegbd_dev);
935 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
938 spin_lock(&xsegbd_devices_lock);
939 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
942 spin_unlock(&xsegbd_devices_lock);
951 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
953 struct xsegbd_device *xsegbd_dev = NULL;
957 ret = strict_strtoul(buf, 10, &ul_id);
965 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
968 xsegbd_dev = __xsegbd_get_dev(id);
973 xsegbd_bus_del_dev(xsegbd_dev);
976 mutex_unlock(&xsegbd_mutex);
980 static struct bus_attribute xsegbd_bus_attrs[] = {
981 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
982 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
986 static int xsegbd_sysfs_init(void)
990 ret = device_register(&xsegbd_root_dev);
994 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
995 ret = bus_register(&xsegbd_bus_type);
997 device_unregister(&xsegbd_root_dev);
1002 static void xsegbd_sysfs_cleanup(void)
1004 bus_unregister(&xsegbd_bus_type);
1005 device_unregister(&xsegbd_root_dev);
1008 /* *************************** */
1009 /* ** Module Initialization ** */
1010 /* *************************** */
1012 static int __init xsegbd_init(void)
1015 xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1016 if (!xsegbd_devices)
1019 spin_lock_init(&xsegbd_devices_lock);
1022 ret = xsegbd_xseg_init();
1026 ret = xsegbd_sysfs_init();
1030 XSEGLOG("initialization complete");
1039 kfree(xsegbd_devices);
1044 static void __exit xsegbd_exit(void)
1046 xsegbd_sysfs_cleanup();
1050 module_init(xsegbd_init);
1051 module_exit(xsegbd_exit);