5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
22 #include <sys/kernel/segdev.h>
25 #define XSEGBD_MINORS 1
26 /* define max request size to be used in xsegbd */
27 //FIXME should we make this 4MB instead of 256KB ?
28 #define XSEGBD_MAX_REQUEST_SIZE 262144U
30 MODULE_DESCRIPTION("xsegbd");
31 MODULE_AUTHOR("XSEG");
32 MODULE_LICENSE("GPL");
34 static long sector_size = 0;
35 static long blksize = 512;
37 static int max_nr_pending = 1024;
38 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
39 static char spec[256] = "segdev:xsegbd:4:256:12";
41 module_param(sector_size, long, 0644);
42 module_param(blksize, long, 0644);
43 module_param(max_nr_pending, int, 0644);
44 module_param(major, int, 0644);
45 module_param_string(name, name, sizeof(name), 0644);
46 module_param_string(spec, spec, sizeof(spec), 0644);
49 struct request *request;
50 struct completion *comp;
51 struct xsegbd_device *dev;
54 static struct xq blk_queue_pending;
55 static struct pending *blk_req_pending;
56 static unsigned int nr_pending;
57 static spinlock_t __lock;
58 static struct xsegbd xsegbd;
59 static DEFINE_MUTEX(xsegbd_mutex);
60 static LIST_HEAD(xsegbd_dev_list);
61 static DEFINE_SPINLOCK(xsegbd_dev_list_lock);
63 /* ************************* */
64 /* ***** sysfs helpers ***** */
65 /* ************************* */
67 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
69 return container_of(dev, struct xsegbd_device, dev);
72 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
75 return get_device(&xsegbd_dev->dev);
78 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
80 put_device(&xsegbd_dev->dev);
83 /* ************************* */
84 /* ** XSEG Initialization ** */
85 /* ************************* */
87 static void xseg_callback(struct xseg *xseg, uint32_t portno);
89 int xsegbd_xseg_init(void)
94 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
96 r = xseg_initialize();
98 XSEGLOG("cannot initialize 'segdev' peer");
102 r = xseg_parse_spec(spec, &xsegbd.config);
106 if (strncmp(xsegbd.config.type, "segdev", 16))
107 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
110 XSEGLOG("joining segment");
111 xsegbd.xseg = xseg_join( xsegbd.config.type,
116 XSEGLOG("cannot find segment");
127 int xsegbd_xseg_quit(void)
129 struct segdev *segdev;
131 /* make sure to unmap the segment first */
132 segdev = segdev_get(0);
133 clear_bit(SEGDEV_RESERVED, &segdev->flags);
134 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
141 /* ***************************** */
142 /* ** Block Device Operations ** */
143 /* ***************************** */
145 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
147 struct gendisk *disk = bdev->bd_disk;
148 struct xsegbd_device *xsegbd_dev = disk->private_data;
150 xsegbd_get_dev(xsegbd_dev);
155 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
157 struct xsegbd_device *xsegbd_dev = gd->private_data;
159 xsegbd_put_dev(xsegbd_dev);
164 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
165 unsigned int cmd, unsigned long arg)
170 static const struct block_device_operations xsegbd_ops = {
171 .owner = THIS_MODULE,
173 .release = xsegbd_release,
174 .ioctl = xsegbd_ioctl
178 /* *************************** */
179 /* ** Device Initialization ** */
180 /* *************************** */
182 static void xseg_request_fn(struct request_queue *rq);
183 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
185 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
188 struct gendisk *disk;
189 unsigned int max_request_size_bytes;
191 spin_lock_init(&xsegbd_dev->lock);
193 xsegbd_dev->xsegbd = &xsegbd;
195 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
196 if (!xsegbd_dev->blk_queue)
199 blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->lock);
200 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
202 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
203 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
204 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
205 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
207 //blk_queue_max_segments(dev->blk_queue, 512);
209 max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
210 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
211 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
212 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
213 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
215 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
217 /* vkoukis says we don't need partitions */
218 xsegbd_dev->gd = disk = alloc_disk(1);
220 /* FIXME: We call xsegbd_dev_release if something goes wrong, to cleanup
222 * Would it be better to do the cleanup here, and conditionally cleanup
227 disk->major = xsegbd_dev->major;
228 disk->first_minor = 0; // id * XSEGBD_MINORS;
229 disk->fops = &xsegbd_ops;
230 disk->queue = xsegbd_dev->blk_queue;
231 disk->private_data = xsegbd_dev;
232 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
233 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
236 spin_lock_irq(&__lock);
237 if (nr_pending + xsegbd_dev->nr_requests > max_nr_pending)
240 nr_pending += xsegbd_dev->nr_requests;
241 spin_unlock_irq(&__lock);
246 /* allow a non-zero sector_size parameter to override the disk size */
248 xsegbd_dev->sectors = sector_size;
250 ret = xsegbd_get_size(xsegbd_dev);
255 set_capacity(disk, xsegbd_dev->sectors);
256 XSEGLOG("xsegbd active...");
257 add_disk(disk); /* immediately activates the device */
265 static void xsegbd_dev_release(struct device *dev)
267 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
269 /* cleanup gendisk and blk_queue the right way */
270 if (xsegbd_dev->gd) {
271 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
272 del_gendisk(xsegbd_dev->gd);
274 blk_cleanup_queue(xsegbd_dev->blk_queue);
275 put_disk(xsegbd_dev->gd);
278 /* xsegbd actually does not need use waiting.
279 * maybe we use xseg_cancel_wait for clarity
280 * with xseg_segdev kernel driver convert this
283 // xseg_cancel_wait(xseg, xsegbd_dev->src_portno);
285 if (xseg_free_requests(xsegbd.xseg,
286 xsegbd_dev->src_portno, xsegbd_dev->nr_requests) != 0)
287 XSEGLOG("Error trying to free requests!\n");
289 WARN_ON(nr_pending < xsegbd_dev->nr_requests);
290 spin_lock_irq(&__lock);
291 nr_pending -= xsegbd_dev->nr_requests;
292 spin_unlock_irq(&__lock);
294 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
296 spin_lock(&xsegbd_dev_list_lock);
297 list_del_init(&xsegbd_dev->node);
298 spin_unlock(&xsegbd_dev_list_lock);
301 module_put(THIS_MODULE);
304 /* ******************* */
305 /* ** Critical Path ** */
306 /* ******************* */
308 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
309 struct request *blkreq)
311 struct bio_vec *bvec;
312 struct req_iterator iter;
314 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
315 rq_for_each_segment(bvec, blkreq, iter) {
316 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
317 memcpy(data + off, bdata, bvec->bv_len);
319 kunmap_atomic(bdata);
323 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
324 struct request *blkreq)
326 struct bio_vec *bvec;
327 struct req_iterator iter;
329 char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
330 rq_for_each_segment(bvec, blkreq, iter) {
331 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
332 memcpy(bdata, data + off, bvec->bv_len);
334 kunmap_atomic(bdata);
338 static void xseg_request_fn(struct request_queue *rq)
340 struct xseg_request *xreq;
341 struct xsegbd_device *xsegbd_dev = rq->queuedata;
342 struct request *blkreq;
343 struct pending *pending;
350 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno,
351 xsegbd_dev->dst_portno, X_ALLOC);
355 blkreq = blk_fetch_request(rq);
359 if (blkreq->cmd_type != REQ_TYPE_FS) {
360 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
361 __blk_end_request_all(blkreq, 0);
365 datalen = blk_rq_bytes(blkreq);
366 BUG_ON(xseg_prep_request(xsegbd.xseg, xreq,
367 xsegbd_dev->targetlen, datalen));
368 BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
370 target = xseg_get_target(xsegbd.xseg, xreq);
371 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
372 blkreq_idx = xq_pop_head(&blk_queue_pending, 1);
373 BUG_ON(blkreq_idx == Noneidx);
374 pending = &blk_req_pending[blkreq_idx];
375 pending->dev = xsegbd_dev;
376 pending->request = blkreq;
377 pending->comp = NULL;
378 xreq->priv = (uint64_t)blkreq_idx;
379 xreq->size = datalen;
380 xreq->offset = blk_rq_pos(blkreq) << 9;
382 if (xreq->offset >= (sector_size << 9))
383 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
384 blk_rq_pos(blkreq), sector_size,
385 blkreq->cmd_flags & REQ_FLUSH,
386 blkreq->cmd_flags & REQ_FUA);
389 if (blkreq->cmd_flags & REQ_FLUSH)
390 xreq->flags |= XF_FLUSH;
392 if (blkreq->cmd_flags & REQ_FUA)
393 xreq->flags |= XF_FUA;
395 if (rq_data_dir(blkreq)) {
396 /* unlock for data transfers? */
397 blk_to_xseg(xsegbd.xseg, xreq, blkreq);
403 BUG_ON((p = xseg_submit(xsegbd.xseg, xreq,
404 xsegbd_dev->src_portno, X_ALLOC)) == NoPort);
405 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
408 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq,
409 xsegbd_dev->src_portno) == NoSerial);
412 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
413 struct xseg_request *xreq )
417 if (xreq->state & XS_FAILED)
420 if (!(xreq->state & XS_SERVED))
423 data = XSEG_TAKE_PTR(xreq->data, xsegbd.xseg->segment);
424 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
428 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
430 struct xseg_request *xreq;
434 struct pending *pending;
435 struct completion comp;
439 xreq = xseg_get_request(xsegbd.xseg, xsegbd_dev->src_portno,
440 xsegbd_dev->dst_portno, X_ALLOC);
444 datalen = sizeof(uint64_t);
445 BUG_ON(xseg_prep_request(xsegbd.xseg, xreq, xsegbd_dev->targetlen, datalen));
446 BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
448 init_completion(&comp);
449 blkreq_idx = xq_pop_head(&blk_queue_pending, 1);
450 BUG_ON(blkreq_idx == Noneidx);
451 pending = &blk_req_pending[blkreq_idx];
452 pending->dev = xsegbd_dev;
453 pending->request = NULL;
454 pending->comp = ∁
455 xreq->priv = (uint64_t)blkreq_idx;
457 target = xseg_get_target(xsegbd.xseg, xreq);
458 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
459 xreq->size = datalen;
464 /* waiting is not needed.
465 * but it should be better to use xseg_prepare_wait
466 * and the xseg_segdev kernel driver, would be a no op
468 // port = &xsegbd.xseg->ports[xsegbd_dev->src_portno];
469 // port->waitcue = (uint64_t)(long)xsegbd_dev;
471 BUG_ON((p = xseg_submit(xsegbd.xseg, xreq,
472 xsegbd_dev->src_portno, X_ALLOC)) == NoPort);
473 WARN_ON(xseg_signal(xsegbd.xseg, p) < 0);
475 wait_for_completion_interruptible(&comp);
476 XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
477 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
478 XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
480 BUG_ON(xseg_put_request(xsegbd.xseg, xreq, xsegbd_dev->src_portno) == NoSerial);
484 static void xseg_callback(struct xseg *xseg, uint32_t portno)
486 struct xsegbd_device *xsegbd_dev = NULL, *old_dev = NULL;
487 struct xseg_request *xreq;
488 struct request *blkreq;
489 struct pending *pending;
495 xreq = xseg_receive(xseg, portno);
499 /* we rely upon our peers to not have touched ->priv */
500 blkreq_idx = (uint64_t)xreq->priv;
501 if (blkreq_idx >= max_nr_pending) {
506 pending = &blk_req_pending[blkreq_idx];
508 /* someone is blocking on this request
509 and will handle it when we wake them up. */
510 complete(pending->comp);
511 /* the request is blocker's responsibility so
512 we will not put_request(); */
516 /* this is now treated as a block I/O request to end */
517 blkreq = pending->request;
518 pending->request = NULL;
519 xsegbd_dev = pending->dev;
523 if ((xsegbd_dev != old_dev) && old_dev) {
524 spin_lock_irqsave(&old_dev->lock, flags);
525 xseg_request_fn(old_dev->blk_queue);
526 spin_unlock_irqrestore(&old_dev->lock, flags);
529 old_dev = xsegbd_dev;
531 if (!(xreq->state & XS_SERVED))
534 if (xreq->serviced != blk_rq_bytes(blkreq))
537 /* unlock for data transfer? */
538 if (!rq_data_dir(blkreq))
539 xseg_to_blk(xseg, xreq, blkreq);
543 blk_end_request_all(blkreq, err);
544 xq_append_head(&blk_queue_pending, blkreq_idx, 1);
545 BUG_ON(xseg_put_request(xseg, xreq, xsegbd_dev->src_portno) == NoSerial);
549 spin_lock_irqsave(&xsegbd_dev->lock, flags);
550 xseg_request_fn(xsegbd_dev->blk_queue);
551 spin_unlock_irqrestore(&xsegbd_dev->lock, flags);
556 /* sysfs interface */
558 static struct bus_type xsegbd_bus_type = {
562 static ssize_t xsegbd_size_show(struct device *dev,
563 struct device_attribute *attr, char *buf)
565 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
567 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
570 static ssize_t xsegbd_major_show(struct device *dev,
571 struct device_attribute *attr, char *buf)
573 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
575 return sprintf(buf, "%d\n", xsegbd_dev->major);
578 static ssize_t xsegbd_srcport_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
581 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
583 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
586 static ssize_t xsegbd_dstport_show(struct device *dev,
587 struct device_attribute *attr, char *buf)
589 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
591 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
594 static ssize_t xsegbd_id_show(struct device *dev,
595 struct device_attribute *attr, char *buf)
597 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
599 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
602 static ssize_t xsegbd_reqs_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
605 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
607 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
610 static ssize_t xsegbd_target_show(struct device *dev,
611 struct device_attribute *attr, char *buf)
613 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
615 return sprintf(buf, "%s\n", xsegbd_dev->target);
618 static ssize_t xsegbd_image_refresh(struct device *dev,
619 struct device_attribute *attr,
623 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
626 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
628 rc = xsegbd_get_size(xsegbd_dev);
634 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
637 mutex_unlock(&xsegbd_mutex);
641 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
642 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
643 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
644 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
645 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
646 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
647 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
648 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
650 static struct attribute *xsegbd_attrs[] = {
652 &dev_attr_major.attr,
653 &dev_attr_srcport.attr,
654 &dev_attr_dstport.attr,
657 &dev_attr_target.attr,
658 &dev_attr_refresh.attr,
662 static struct attribute_group xsegbd_attr_group = {
663 .attrs = xsegbd_attrs,
666 static const struct attribute_group *xsegbd_attr_groups[] = {
671 static void xsegbd_sysfs_dev_release(struct device *dev)
675 static struct device_type xsegbd_device_type = {
677 .groups = xsegbd_attr_groups,
678 .release = xsegbd_sysfs_dev_release,
681 static void xsegbd_root_dev_release(struct device *dev)
685 static struct device xsegbd_root_dev = {
686 .init_name = "xsegbd",
687 .release = xsegbd_root_dev_release,
690 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
695 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
696 dev = &xsegbd_dev->dev;
698 dev->bus = &xsegbd_bus_type;
699 dev->type = &xsegbd_device_type;
700 dev->parent = &xsegbd_root_dev;
701 dev->release = xsegbd_dev_release;
702 dev_set_name(dev, "%d", xsegbd_dev->id);
704 ret = device_register(dev);
706 mutex_unlock(&xsegbd_mutex);
710 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
712 device_unregister(&xsegbd_dev->dev);
715 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
717 struct xsegbd_device *xsegbd_dev;
718 struct xseg_port *xport;
719 ssize_t ret = -ENOMEM;
721 struct list_head *tmp;
723 if (!try_module_get(THIS_MODULE))
726 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
730 spin_lock_init(&xsegbd_dev->lock);
731 INIT_LIST_HEAD(&xsegbd_dev->node);
734 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
735 "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
736 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
740 xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
742 spin_lock(&xsegbd_dev_list_lock);
744 list_for_each(tmp, &xsegbd_dev_list) {
745 struct xsegbd_device *entry;
747 entry = list_entry(tmp, struct xsegbd_device, node);
749 if (entry->src_portno == xsegbd_dev->src_portno) {
754 if (entry->id >= new_id)
755 new_id = entry->id + 1;
758 xsegbd_dev->id = new_id;
760 list_add_tail(&xsegbd_dev->node, &xsegbd_dev_list);
762 spin_unlock(&xsegbd_dev_list_lock);
764 XSEGLOG("registering block device major %d", major);
765 ret = register_blkdev(major, XSEGBD_NAME);
767 XSEGLOG("cannot register block device!");
771 xsegbd_dev->major = ret;
772 XSEGLOG("registered block device major %d", xsegbd_dev->major);
774 ret = xsegbd_bus_add_dev(xsegbd_dev);
778 XSEGLOG("binding to source port %u (destination %u)",
779 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
780 xport = xseg_bind_port(xsegbd.xseg, xsegbd_dev->src_portno);
782 XSEGLOG("cannot bind to port");
787 /* make sure we don't get any requests until we're ready to handle them */
788 xport->waitcue = (long) NULL;
790 XSEGLOG("allocating %u requests", xsegbd_dev->nr_requests);
791 if (xseg_alloc_requests(xsegbd.xseg, xsegbd_dev->src_portno, xsegbd_dev->nr_requests)) {
792 XSEGLOG("cannot allocate requests");
798 ret = xsegbd_dev_init(xsegbd_dev);
805 xsegbd_bus_del_dev(xsegbd_dev);
810 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
813 spin_lock(&xsegbd_dev_list_lock);
814 list_del_init(&xsegbd_dev->node);
817 spin_unlock(&xsegbd_dev_list_lock);
826 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
828 struct list_head *tmp;
829 struct xsegbd_device *xsegbd_dev;
832 spin_lock(&xsegbd_dev_list_lock);
833 list_for_each(tmp, &xsegbd_dev_list) {
834 xsegbd_dev = list_entry(tmp, struct xsegbd_device, node);
835 if (xsegbd_dev->id == id) {
836 spin_unlock(&xsegbd_dev_list_lock);
840 spin_unlock(&xsegbd_dev_list_lock);
844 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
846 struct xsegbd_device *xsegbd_dev = NULL;
850 ret = strict_strtoul(buf, 10, &ul_id);
858 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
861 xsegbd_dev = __xsegbd_get_dev(id);
867 xsegbd_bus_del_dev(xsegbd_dev);
870 mutex_unlock(&xsegbd_mutex);
874 static struct bus_attribute xsegbd_bus_attrs[] = {
875 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
876 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
880 static int xsegbd_sysfs_init(void)
884 ret = device_register(&xsegbd_root_dev);
888 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
889 ret = bus_register(&xsegbd_bus_type);
891 device_unregister(&xsegbd_root_dev);
896 static void xsegbd_sysfs_cleanup(void)
898 bus_unregister(&xsegbd_bus_type);
899 device_unregister(&xsegbd_root_dev);
902 /* *************************** */
903 /* ** Module Initialization ** */
904 /* *************************** */
906 static int __init xsegbd_init(void)
910 if (!xq_alloc_seq(&blk_queue_pending, max_nr_pending, max_nr_pending))
913 blk_req_pending = kzalloc(sizeof(struct pending) * max_nr_pending, GFP_KERNEL);
914 if (!blk_req_pending)
918 ret = xsegbd_xseg_init();
922 ret = xsegbd_sysfs_init();
926 XSEGLOG("initialization complete");
934 kfree(blk_req_pending);
936 xq_free(&blk_queue_pending);
940 static void __exit xsegbd_exit(void)
942 xsegbd_sysfs_cleanup();
946 module_init(xsegbd_init);
947 module_exit(xsegbd_exit);