5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
22 #include <sys/kernel/segdev.h>
25 #define XSEGBD_MINORS 1
27 MODULE_DESCRIPTION("xsegbd");
28 MODULE_AUTHOR("XSEG");
29 MODULE_LICENSE("GPL");
31 static long sector_size = 0;
32 static long blksize = 512;
34 static int max_nr_pending = 1024;
35 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
36 static char spec[256] = "segdev:xsegbd:4:512:64:1024:12";
38 module_param(sector_size, long, 0644);
39 module_param(blksize, long, 0644);
40 module_param(max_nr_pending, int, 0644);
41 module_param(major, int, 0644);
42 module_param_string(name, name, sizeof(name), 0644);
43 module_param_string(spec, spec, sizeof(spec), 0644);
46 struct request *request;
47 struct completion *comp;
48 struct xsegbd_device *dev;
51 static struct xq blk_queue_pending;
52 static struct pending *blk_req_pending;
53 static unsigned int nr_pending;
54 static spinlock_t __lock;
55 static struct xsegbd xsegbd;
56 static DEFINE_MUTEX(xsegbd_mutex);
57 static LIST_HEAD(xsegbd_dev_list);
58 static DEFINE_SPINLOCK(xsegbd_dev_list_lock);
60 /* ************************* */
61 /* ***** sysfs helpers ***** */
62 /* ************************* */
64 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
66 return container_of(dev, struct xsegbd_device, dev);
69 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
72 return get_device(&xsegbd_dev->dev);
75 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
77 put_device(&xsegbd_dev->dev);
80 /* ************************* */
81 /* ** XSEG Initialization ** */
82 /* ************************* */
84 static void xseg_callback(struct xseg *xseg, uint32_t portno);
86 int xsegbd_xseg_init(void)
91 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
93 r = xseg_initialize();
95 XSEGLOG("cannot initialize 'segdev' peer");
99 r = xseg_parse_spec(spec, &xsegbd.config);
103 if (strncmp(xsegbd.config.type, "segdev", 16))
104 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
107 XSEGLOG("joining segment");
108 xsegbd.xseg = xseg_join( xsegbd.config.type,
113 XSEGLOG("cannot find segment");
124 int xsegbd_xseg_quit(void)
126 struct segdev *segdev;
128 /* make sure to unmap the segment first */
129 segdev = segdev_get(0);
130 clear_bit(SEGDEV_RESERVED, &segdev->flags);
131 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
138 /* ***************************** */
139 /* ** Block Device Operations ** */
140 /* ***************************** */
142 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
144 struct gendisk *disk = bdev->bd_disk;
145 struct xsegbd_device *xsegbd_dev = disk->private_data;
147 xsegbd_get_dev(xsegbd_dev);
152 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
154 struct xsegbd_device *xsegbd_dev = gd->private_data;
156 xsegbd_put_dev(xsegbd_dev);
161 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
162 unsigned int cmd, unsigned long arg)
167 static const struct block_device_operations xsegbd_ops = {
168 .owner = THIS_MODULE,
170 .release = xsegbd_release,
171 .ioctl = xsegbd_ioctl
175 /* *************************** */
176 /* ** Device Initialization ** */
177 /* *************************** */
179 static void xseg_request_fn(struct request_queue *rq);
180 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
182 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
185 struct gendisk *disk;
186 unsigned int max_request_size_bytes;
188 spin_lock_init(&xsegbd_dev->lock);
190 xsegbd_dev->xsegbd = &xsegbd;
192 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
193 if (!xsegbd_dev->blk_queue)
196 blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->lock);
197 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
199 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
200 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
201 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
202 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
204 //blk_queue_max_segments(dev->blk_queue, 512);
205 /* calculate maximum block request size
206 * request size in pages * page_size
207 * leave one page in buffer for name
209 max_request_size_bytes =
210 (unsigned int) (xsegbd.config.request_size - 1) *
211 ( 1 << xsegbd.config.page_shift) ;
212 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
213 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
214 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
215 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
217 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
219 /* vkoukis says we don't need partitions */
220 xsegbd_dev->gd = disk = alloc_disk(1);
224 disk->major = xsegbd_dev->major;
225 disk->first_minor = 0; // id * XSEGBD_MINORS;
226 disk->fops = &xsegbd_ops;
227 disk->queue = xsegbd_dev->blk_queue;
228 disk->private_data = xsegbd_dev;
229 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
230 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
233 spin_lock_irq(&__lock);
234 if (nr_pending + xsegbd_dev->nr_requests > max_nr_pending)
237 nr_pending += xsegbd_dev->nr_requests;
238 spin_unlock_irq(&__lock);
243 /* allow a non-zero sector_size parameter to override the disk size */
245 xsegbd_dev->sectors = sector_size;
247 ret = xsegbd_get_size(xsegbd_dev);
253 set_capacity(disk, xsegbd_dev->sectors);
254 XSEGLOG("xsegbd active...");
255 add_disk(disk); /* immediately activates the device */
265 static void xsegbd_dev_release(struct device *dev)
267 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
268 struct xseg_port *port;
270 /* cleanup gendisk and blk_queue the right way */
271 if (xsegbd_dev->gd) {
272 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
273 del_gendisk(xsegbd_dev->gd);
275 blk_cleanup_queue(xsegbd_dev->blk_queue);
276 put_disk(xsegbd_dev->gd);
279 /* reset the port's waitcue (aka cancel_wait) */
280 port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
281 port->waitcue = (long) NULL;
283 xseg_free_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests);
285 WARN_ON(nr_pending < xsegbd_dev->nr_requests);
286 spin_lock_irq(&__lock);
287 nr_pending -= xsegbd_dev->nr_requests;
288 spin_unlock_irq(&__lock);
290 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
292 spin_lock(&xsegbd_dev_list_lock);
293 list_del_init(&xsegbd_dev->node);
294 spin_unlock(&xsegbd_dev_list_lock);
297 module_put(THIS_MODULE);
300 /* ******************* */
301 /* ** Critical Path ** */
302 /* ******************* */
304 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
305 struct request *blkreq)
307 struct bio_vec *bvec;
308 struct req_iterator iter;
310 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
311 rq_for_each_segment(bvec, blkreq, iter) {
312 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
313 memcpy(data + off, bdata, bvec->bv_len);
315 kunmap_atomic(bdata);
319 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
320 struct request *blkreq)
322 struct bio_vec *bvec;
323 struct req_iterator iter;
325 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
326 rq_for_each_segment(bvec, blkreq, iter) {
327 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
328 memcpy(bdata, data + off, bvec->bv_len);
330 kunmap_atomic(bdata);
334 static void xseg_request_fn(struct request_queue *rq)
336 struct xseg_request *xreq;
337 struct xsegbd_device *xsegbd_dev = rq->queuedata;
338 struct request *blkreq;
339 struct pending *pending;
345 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
349 blkreq = blk_fetch_request(rq);
353 if (blkreq->cmd_type != REQ_TYPE_FS) {
354 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
355 __blk_end_request_all(blkreq, 0);
359 datasize = blk_rq_bytes(blkreq);
360 BUG_ON(xreq->buffersize - xsegbd_dev->namesize < datasize);
361 BUG_ON(xseg_prep_request(xreq, xsegbd_dev->namesize, datasize));
363 name = XSEG_TAKE_PTR(xreq->name, xsegbd.xseg->segment);
364 strncpy(name, xsegbd_dev->name, xsegbd_dev->namesize);
365 blkreq_idx = xq_pop_head(&blk_queue_pending);
366 BUG_ON(blkreq_idx == None);
367 pending = &blk_req_pending[blkreq_idx];
368 pending->dev = xsegbd_dev;
369 pending->request = blkreq;
370 pending->comp = NULL;
371 xreq->priv = (uint64_t)blkreq_idx;
372 xreq->size = datasize;
373 xreq->offset = blk_rq_pos(blkreq) << 9;
375 if (xreq->offset >= (sector_size << 9))
376 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
377 blk_rq_pos(blkreq), sector_size,
378 blkreq->cmd_flags & REQ_FLUSH,
379 blkreq->cmd_flags & REQ_FUA);
382 if (blkreq->cmd_flags & REQ_FLUSH)
383 xreq->flags |= XF_FLUSH;
385 if (blkreq->cmd_flags & REQ_FUA)
386 xreq->flags |= XF_FUA;
388 if (rq_data_dir(blkreq)) {
389 /* unlock for data transfers? */
390 blk_to_xseg(xsegbd.xseg, xreq, blkreq);
396 BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
400 * This is going to happen at least once.
401 * Add a WARN_ON when debugging find out why it happens more than once.
403 xseg_signal(xsegbd_dev->xsegbd->xseg, xsegbd_dev->dst_portno);
405 xseg_put_request(xsegbd_dev->xsegbd->xseg, xsegbd_dev->src_portno, xreq);
408 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
409 struct xseg_request *xreq )
413 if (xreq->state & XS_FAILED)
416 if (!(xreq->state & XS_SERVED))
419 data = XSEG_TAKE_PTR(xreq->data, xsegbd.xseg->segment);
420 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
424 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
426 struct xseg_request *xreq;
427 struct xseg_port *port;
431 struct pending *pending;
432 struct completion comp;
435 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno);
439 datasize = sizeof(uint64_t);
440 BUG_ON(xreq->buffersize - xsegbd_dev->namesize < datasize);
441 BUG_ON(xseg_prep_request(xreq, xsegbd_dev->namesize, datasize));
443 init_completion(&comp);
444 blkreq_idx = xq_pop_head(&blk_queue_pending);
445 BUG_ON(blkreq_idx == None);
446 pending = &blk_req_pending[blkreq_idx];
447 pending->dev = xsegbd_dev;
448 pending->request = NULL;
449 pending->comp = ∁
450 xreq->priv = (uint64_t)blkreq_idx;
452 name = XSEG_TAKE_PTR(xreq->name, xsegbd.xseg->segment);
453 strncpy(name, xsegbd_dev->name, xsegbd_dev->namesize);
454 xreq->size = datasize;
459 port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
460 port->waitcue = (uint64_t)(long)xsegbd_dev;
462 BUG_ON(xseg_submit(xsegbd.xseg, xsegbd_dev->dst_portno, xreq) == NoSerial);
463 xseg_signal(xsegbd.xseg, xsegbd_dev->dst_portno);
465 wait_for_completion_interruptible(&comp);
466 XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
467 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
468 XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
470 xseg_put_request(xsegbd.xseg, xsegbd_dev->src_portno, xreq);
474 static void xseg_callback(struct xseg *xseg, uint32_t portno)
476 struct xsegbd_device *xsegbd_dev = NULL, *old_dev = NULL;
477 struct xseg_request *xreq;
478 struct request *blkreq;
479 struct pending *pending;
485 xreq = xseg_receive(xseg, portno);
489 /* we rely upon our peers to not have touched ->priv */
490 blkreq_idx = (uint64_t)xreq->priv;
491 if (blkreq_idx >= max_nr_pending) {
496 pending = &blk_req_pending[blkreq_idx];
498 /* someone is blocking on this request
499 and will handle it when we wake them up. */
500 complete(pending->comp);
501 /* the request is blocker's responsibility so
502 we will not put_request(); */
506 /* this is now treated as a block I/O request to end */
507 blkreq = pending->request;
508 pending->request = NULL;
509 xsegbd_dev = pending->dev;
513 if ((xsegbd_dev != old_dev) && old_dev) {
514 spin_lock_irqsave(&old_dev->lock, flags);
515 xseg_request_fn(old_dev->blk_queue);
516 spin_unlock_irqrestore(&old_dev->lock, flags);
519 old_dev = xsegbd_dev;
521 if (!(xreq->state & XS_SERVED))
524 if (xreq->serviced != blk_rq_bytes(blkreq))
527 /* unlock for data transfer? */
528 if (!rq_data_dir(blkreq))
529 xseg_to_blk(xseg, xreq, blkreq);
533 blk_end_request_all(blkreq, err);
534 xq_append_head(&blk_queue_pending, blkreq_idx);
535 xseg_put_request(xseg, xreq->portno, xreq);
539 spin_lock_irqsave(&xsegbd_dev->lock, flags);
540 xseg_request_fn(xsegbd_dev->blk_queue);
541 spin_unlock_irqrestore(&xsegbd_dev->lock, flags);
546 /* sysfs interface */
548 static struct bus_type xsegbd_bus_type = {
552 static ssize_t xsegbd_size_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
555 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
557 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
560 static ssize_t xsegbd_major_show(struct device *dev,
561 struct device_attribute *attr, char *buf)
563 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
565 return sprintf(buf, "%d\n", xsegbd_dev->major);
568 static ssize_t xsegbd_srcport_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
571 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
573 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
576 static ssize_t xsegbd_dstport_show(struct device *dev,
577 struct device_attribute *attr, char *buf)
579 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
581 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
584 static ssize_t xsegbd_id_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
587 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
589 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
592 static ssize_t xsegbd_reqs_show(struct device *dev,
593 struct device_attribute *attr, char *buf)
595 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
597 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
600 static ssize_t xsegbd_name_show(struct device *dev,
601 struct device_attribute *attr, char *buf)
603 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
605 return sprintf(buf, "%s\n", xsegbd_dev->name);
608 static ssize_t xsegbd_image_refresh(struct device *dev,
609 struct device_attribute *attr,
613 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
616 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
618 rc = xsegbd_get_size(xsegbd_dev);
624 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
627 mutex_unlock(&xsegbd_mutex);
631 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
632 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
633 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
634 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
635 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
636 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
637 static DEVICE_ATTR(name , S_IRUGO, xsegbd_name_show, NULL);
638 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
640 static struct attribute *xsegbd_attrs[] = {
642 &dev_attr_major.attr,
643 &dev_attr_srcport.attr,
644 &dev_attr_dstport.attr,
648 &dev_attr_refresh.attr,
652 static struct attribute_group xsegbd_attr_group = {
653 .attrs = xsegbd_attrs,
656 static const struct attribute_group *xsegbd_attr_groups[] = {
661 static void xsegbd_sysfs_dev_release(struct device *dev)
665 static struct device_type xsegbd_device_type = {
667 .groups = xsegbd_attr_groups,
668 .release = xsegbd_sysfs_dev_release,
671 static void xsegbd_root_dev_release(struct device *dev)
675 static struct device xsegbd_root_dev = {
676 .init_name = "xsegbd",
677 .release = xsegbd_root_dev_release,
680 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
685 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
686 dev = &xsegbd_dev->dev;
688 dev->bus = &xsegbd_bus_type;
689 dev->type = &xsegbd_device_type;
690 dev->parent = &xsegbd_root_dev;
691 dev->release = xsegbd_dev_release;
692 dev_set_name(dev, "%d", xsegbd_dev->id);
694 ret = device_register(dev);
696 mutex_unlock(&xsegbd_mutex);
700 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
702 device_unregister(&xsegbd_dev->dev);
705 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
707 struct xsegbd_device *xsegbd_dev;
708 struct xseg_port *xport;
709 ssize_t ret = -ENOMEM;
711 struct list_head *tmp;
713 if (!try_module_get(THIS_MODULE))
716 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
720 spin_lock_init(&xsegbd_dev->lock);
721 INIT_LIST_HEAD(&xsegbd_dev->node);
724 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
725 "%d:%d:%d", xsegbd_dev->name, &xsegbd_dev->src_portno,
726 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
730 xsegbd_dev->namesize = strlen(xsegbd_dev->name);
732 spin_lock(&xsegbd_dev_list_lock);
734 list_for_each(tmp, &xsegbd_dev_list) {
735 struct xsegbd_device *entry;
737 entry = list_entry(tmp, struct xsegbd_device, node);
739 if (entry->src_portno == xsegbd_dev->src_portno) {
744 if (entry->id >= new_id)
745 new_id = entry->id + 1;
748 xsegbd_dev->id = new_id;
750 list_add_tail(&xsegbd_dev->node, &xsegbd_dev_list);
752 spin_unlock(&xsegbd_dev_list_lock);
754 XSEGLOG("registering block device major %d", major);
755 ret = register_blkdev(major, XSEGBD_NAME);
757 XSEGLOG("cannot register block device!");
761 xsegbd_dev->major = ret;
762 XSEGLOG("registered block device major %d", xsegbd_dev->major);
764 ret = xsegbd_bus_add_dev(xsegbd_dev);
768 XSEGLOG("binding to source port %u (destination %u)",
769 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
770 xport = xseg_bind_port(xsegbd.xseg, xsegbd_dev->src_portno);
772 XSEGLOG("cannot bind to port");
777 /* make sure we don't get any requests until we're ready to handle them */
778 xport->waitcue = (long) NULL;
780 XSEGLOG("allocating %u requests", xsegbd_dev->nr_requests);
781 if (xseg_alloc_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests)) {
782 XSEGLOG("cannot allocate requests");
788 ret = xsegbd_dev_init(xsegbd_dev);
795 xsegbd_bus_del_dev(xsegbd_dev);
800 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
803 spin_lock(&xsegbd_dev_list_lock);
804 list_del_init(&xsegbd_dev->node);
807 spin_unlock(&xsegbd_dev_list_lock);
816 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
818 struct list_head *tmp;
819 struct xsegbd_device *xsegbd_dev;
822 spin_lock(&xsegbd_dev_list_lock);
823 list_for_each(tmp, &xsegbd_dev_list) {
824 xsegbd_dev = list_entry(tmp, struct xsegbd_device, node);
825 if (xsegbd_dev->id == id) {
826 spin_unlock(&xsegbd_dev_list_lock);
830 spin_unlock(&xsegbd_dev_list_lock);
834 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
836 struct xsegbd_device *xsegbd_dev = NULL;
840 ret = kstrtoul(buf, 10, &ul_id);
848 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
851 xsegbd_dev = __xsegbd_get_dev(id);
857 xsegbd_bus_del_dev(xsegbd_dev);
860 mutex_unlock(&xsegbd_mutex);
864 static struct bus_attribute xsegbd_bus_attrs[] = {
865 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
866 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
870 static int xsegbd_sysfs_init(void)
874 ret = device_register(&xsegbd_root_dev);
878 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
879 ret = bus_register(&xsegbd_bus_type);
881 device_unregister(&xsegbd_root_dev);
886 static void xsegbd_sysfs_cleanup(void)
888 bus_unregister(&xsegbd_bus_type);
889 device_unregister(&xsegbd_root_dev);
892 /* *************************** */
893 /* ** Module Initialization ** */
894 /* *************************** */
896 static int __init xsegbd_init(void)
900 if (!xq_alloc_seq(&blk_queue_pending, max_nr_pending, max_nr_pending))
903 blk_req_pending = kzalloc(sizeof(struct pending) * max_nr_pending, GFP_KERNEL);
904 if (!blk_req_pending)
908 ret = xsegbd_xseg_init();
912 ret = xsegbd_sysfs_init();
916 XSEGLOG("initialization complete");
924 kfree(blk_req_pending);
926 xq_free(&blk_queue_pending);
930 static void __exit xsegbd_exit(void)
932 xsegbd_sysfs_cleanup();
936 module_init(xsegbd_init);
937 module_exit(xsegbd_exit);