5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
21 #include <linux/wait.h>
22 #include <sys/kernel/segdev.h>
24 #include <xseg/protocol.h>
26 #define XSEGBD_MINORS 1
27 /* define max request size to be used in xsegbd */
28 //FIXME should we make this 4MB instead of 256KB ?
29 #define XSEGBD_MAX_REQUEST_SIZE 262144U
31 MODULE_DESCRIPTION("xsegbd");
32 MODULE_AUTHOR("XSEG");
33 MODULE_LICENSE("GPL");
35 static long sector_size = 0;
36 static long blksize = 512;
38 static int max_dev = 1024;
39 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
40 static char spec[256] = "segdev:xsegbd:4:1024:12";
42 module_param(sector_size, long, 0644);
43 module_param(blksize, long, 0644);
44 module_param(max_dev, int, 0644);
45 module_param(major, int, 0644);
46 module_param_string(name, name, sizeof(name), 0644);
47 module_param_string(spec, spec, sizeof(spec), 0644);
49 static struct xsegbd xsegbd;
50 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
51 static DEFINE_MUTEX(xsegbd_mutex);
52 static DEFINE_SPINLOCK(xsegbd_devices_lock);
55 void __xsegbd_get(struct xsegbd_device *xsegbd_dev)
57 atomic_inc(&xsegbd_dev->usercount);
60 void __xsegbd_put(struct xsegbd_device *xsegbd_dev)
62 if (atomic_dec_and_test(&xsegbd_dev->usercount))
63 wake_up(&xsegbd_dev->wq);
66 struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
68 struct xsegbd_device *xsegbd_dev = NULL;
70 spin_lock(&xsegbd_devices_lock);
71 xsegbd_dev = xsegbd_devices[id];
73 __xsegbd_get(xsegbd_dev);
74 spin_unlock(&xsegbd_devices_lock);
79 /* ************************* */
80 /* ***** sysfs helpers ***** */
81 /* ************************* */
83 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
85 return container_of(dev, struct xsegbd_device, dev);
88 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
91 return get_device(&xsegbd_dev->dev);
94 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
96 put_device(&xsegbd_dev->dev);
99 /* ************************* */
100 /* ** XSEG Initialization ** */
101 /* ************************* */
103 static void xseg_callback(uint32_t portno);
105 int xsegbd_xseg_init(void)
110 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
112 r = xseg_initialize();
114 XSEGLOG("cannot initialize 'segdev' peer");
118 r = xseg_parse_spec(spec, &xsegbd.config);
122 if (strncmp(xsegbd.config.type, "segdev", 16))
123 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
126 /* leave it here for now */
127 XSEGLOG("joining segment");
128 xsegbd.xseg = xseg_join( xsegbd.config.type,
133 XSEGLOG("cannot find segment");
144 int xsegbd_xseg_quit(void)
146 struct segdev *segdev;
148 /* make sure to unmap the segment first */
149 segdev = segdev_get(0);
150 clear_bit(SEGDEV_RESERVED, &segdev->flags);
151 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
158 /* ***************************** */
159 /* ** Block Device Operations ** */
160 /* ***************************** */
162 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
164 struct gendisk *disk = bdev->bd_disk;
165 struct xsegbd_device *xsegbd_dev = disk->private_data;
167 xsegbd_get_dev(xsegbd_dev);
172 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
174 struct xsegbd_device *xsegbd_dev = gd->private_data;
176 xsegbd_put_dev(xsegbd_dev);
181 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
182 unsigned int cmd, unsigned long arg)
187 static const struct block_device_operations xsegbd_ops = {
188 .owner = THIS_MODULE,
190 .release = xsegbd_release,
191 .ioctl = xsegbd_ioctl
195 /* *************************** */
196 /* ** Device Initialization ** */
197 /* *************************** */
199 static void xseg_request_fn(struct request_queue *rq);
200 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
201 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev);
203 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
206 struct gendisk *disk;
207 unsigned int max_request_size_bytes;
209 spin_lock_init(&xsegbd_dev->rqlock);
211 xsegbd_dev->xsegbd = &xsegbd;
213 xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
214 if (!xsegbd_dev->blk_queue)
217 if (!blk_init_allocated_queue(xsegbd_dev->blk_queue,
218 xseg_request_fn, &xsegbd_dev->rqlock))
221 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
223 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
224 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
225 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
226 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
228 //blk_queue_max_segments(dev->blk_queue, 512);
230 max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
231 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
232 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
233 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
234 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
236 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
238 /* vkoukis says we don't need partitions */
239 xsegbd_dev->gd = disk = alloc_disk(1);
243 disk->major = xsegbd_dev->major;
244 disk->first_minor = 0; // id * XSEGBD_MINORS;
245 disk->fops = &xsegbd_ops;
246 disk->queue = xsegbd_dev->blk_queue;
247 disk->private_data = xsegbd_dev;
248 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
249 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
253 /* allow a non-zero sector_size parameter to override the disk size */
255 xsegbd_dev->sectors = sector_size;
257 ret = xsegbd_get_size(xsegbd_dev);
262 set_capacity(disk, xsegbd_dev->sectors);
263 XSEGLOG("xsegbd active...");
264 add_disk(disk); /* immediately activates the device */
270 put_disk(xsegbd_dev->gd);
272 blk_cleanup_queue(xsegbd_dev->blk_queue);
274 xsegbd_dev->blk_queue = NULL;
275 xsegbd_dev->gd = NULL;
279 static void xsegbd_dev_release(struct device *dev)
282 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
285 /* cleanup gendisk and blk_queue the right way */
286 if (xsegbd_dev->gd) {
287 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
288 del_gendisk(xsegbd_dev->gd);
290 put_disk(xsegbd_dev->gd);
291 xsegbd_mapclose(xsegbd_dev);
294 spin_lock(&xsegbd_devices_lock);
295 BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
296 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
297 spin_unlock(&xsegbd_devices_lock);
299 // xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
300 xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
301 /* wait for all pending operations on device to end */
302 wait_event(xsegbd_dev->wq, atomic_read(&xsegbd_dev->usercount) <= 0);
303 XSEGLOG("releasing id: %d", xsegbd_dev->id);
304 if (xsegbd_dev->blk_queue)
305 blk_cleanup_queue(xsegbd_dev->blk_queue);
308 // if (xseg_free_requests(xsegbd_dev->xseg,
309 // xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
310 // XSEGLOG("Error trying to free requests!\n");
313 //FIXME xseg_leave to free_up resources ?
314 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
316 if (xsegbd_dev->blk_req_pending)
317 kfree(xsegbd_dev->blk_req_pending);
318 xq_free(&xsegbd_dev->blk_queue_pending);
322 module_put(THIS_MODULE);
325 /* ******************* */
326 /* ** Critical Path ** */
327 /* ******************* */
329 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
330 struct request *blkreq)
332 struct bio_vec *bvec;
333 struct req_iterator iter;
335 char *data = xseg_get_data(xseg, xreq);
336 rq_for_each_segment(bvec, blkreq, iter) {
337 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
338 memcpy(data + off, bdata, bvec->bv_len);
340 kunmap_atomic(bdata);
344 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
345 struct request *blkreq)
347 struct bio_vec *bvec;
348 struct req_iterator iter;
350 char *data = xseg_get_data(xseg, xreq);
351 rq_for_each_segment(bvec, blkreq, iter) {
352 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
353 memcpy(bdata, data + off, bvec->bv_len);
355 kunmap_atomic(bdata);
359 static void xseg_request_fn(struct request_queue *rq)
361 struct xseg_request *xreq;
362 struct xsegbd_device *xsegbd_dev = rq->queuedata;
363 struct request *blkreq;
364 struct xsegbd_pending *pending;
372 __xsegbd_get(xsegbd_dev);
374 spin_unlock_irq(&xsegbd_dev->rqlock);
376 if (current_thread_info()->preempt_count || irqs_disabled()){
377 XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
378 current_thread_info()->preempt_count, irqs_disabled());
380 //XSEGLOG("Priority: %d", current_thread_info()->task->prio);
381 //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio);
382 //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio);
383 //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority);
384 blkreq_idx = Noneidx;
385 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
386 xsegbd_dev->dst_portno, X_ALLOC);
390 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending,
391 xsegbd_dev->src_portno);
392 if (blkreq_idx == Noneidx)
395 if (blkreq_idx >= xsegbd_dev->nr_requests) {
396 XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
402 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
403 blkreq = blk_fetch_request(rq);
405 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
409 if (blkreq->cmd_type != REQ_TYPE_FS) {
411 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
412 __blk_end_request_all(blkreq, 0);
413 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
416 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
417 if (current_thread_info()->preempt_count || irqs_disabled()){
418 XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
419 current_thread_info()->preempt_count, irqs_disabled());
422 datalen = blk_rq_bytes(blkreq);
423 r = xseg_prep_request(xsegbd_dev->xseg, xreq,
424 xsegbd_dev->targetlen, datalen);
426 XSEGLOG("couldn't prep request");
427 blk_end_request_err(blkreq, r);
432 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
433 XSEGLOG("malformed req buffers");
434 blk_end_request_err(blkreq, r);
439 target = xseg_get_target(xsegbd_dev->xseg, xreq);
440 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
442 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
443 pending->dev = xsegbd_dev;
444 pending->request = blkreq;
445 pending->comp = NULL;
447 xreq->size = datalen;
448 xreq->offset = blk_rq_pos(blkreq) << 9;
449 xreq->priv = (uint64_t) blkreq_idx;
452 if (xreq->offset >= (sector_size << 9))
453 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
454 blk_rq_pos(blkreq), sector_size,
455 blkreq->cmd_flags & REQ_FLUSH,
456 blkreq->cmd_flags & REQ_FUA);
459 if (blkreq->cmd_flags & REQ_FLUSH)
460 xreq->flags |= XF_FLUSH;
462 if (blkreq->cmd_flags & REQ_FUA)
463 xreq->flags |= XF_FUA;
465 if (rq_data_dir(blkreq)) {
466 /* unlock for data transfers? */
467 blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
475 /* xsegbd_get here. will be put on receive */
476 __xsegbd_get(xsegbd_dev);
477 p = xseg_submit(xsegbd_dev->xseg, xreq,
478 xsegbd_dev->src_portno, X_ALLOC);
480 XSEGLOG("coundn't submit req");
482 blk_end_request_err(blkreq, r);
483 __xsegbd_put(xsegbd_dev);
486 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
489 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq,
490 xsegbd_dev->src_portno) == -1);
491 if (blkreq_idx != Noneidx)
492 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending,
493 blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
494 spin_lock_irq(&xsegbd_dev->rqlock);
495 __xsegbd_put(xsegbd_dev);
498 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
499 struct xseg_request *xreq )
503 XSEGLOG("Invalid xreq");
507 if (xreq->state & XS_FAILED)
510 if (!(xreq->state & XS_SERVED))
513 data = xseg_get_data(xsegbd_dev->xseg, xreq);
515 XSEGLOG("Invalid req data");
519 XSEGLOG("Invalid xsegbd_dev");
522 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
526 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
528 struct xseg_request *xreq;
532 struct xsegbd_pending *pending;
533 struct completion comp;
538 __xsegbd_get(xsegbd_dev);
540 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
541 xsegbd_dev->dst_portno, X_ALLOC);
545 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen,
546 sizeof(struct xseg_reply_info)));
548 init_completion(&comp);
549 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
550 if (blkreq_idx == Noneidx)
553 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
554 pending->dev = xsegbd_dev;
555 pending->request = NULL;
556 pending->comp = ∁
559 xreq->priv = (uint64_t) blkreq_idx;
561 target = xseg_get_target(xsegbd_dev->xseg, xreq);
562 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
563 xreq->size = xreq->datalen;
567 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
568 p = xseg_submit(xsegbd_dev->xseg, xreq,
569 xsegbd_dev->src_portno, X_ALLOC);
571 XSEGLOG("couldn't submit request");
575 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
576 XSEGLOG("Before wait for completion, comp %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
577 wait_for_completion_interruptible(&comp);
578 XSEGLOG("Woken up after wait_for_completion_interruptible(), comp: %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
579 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
580 //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
582 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
584 __xsegbd_put(xsegbd_dev);
589 pending->comp = NULL;
590 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
595 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev)
597 struct xseg_request *xreq;
601 struct xsegbd_pending *pending;
602 struct completion comp;
607 __xsegbd_get(xsegbd_dev);
608 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
609 xsegbd_dev->dst_portno, X_ALLOC);
613 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 0));
615 init_completion(&comp);
616 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
617 if (blkreq_idx == Noneidx)
620 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
621 pending->dev = xsegbd_dev;
622 pending->request = NULL;
623 pending->comp = ∁
626 xreq->priv = (uint64_t) blkreq_idx;
628 target = xseg_get_target(xsegbd_dev->xseg, xreq);
629 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
630 xreq->size = xreq->datalen;
634 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
635 p = xseg_submit(xsegbd_dev->xseg, xreq,
636 xsegbd_dev->src_portno, X_ALLOC);
638 XSEGLOG("couldn't submit request");
642 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
643 wait_for_completion_interruptible(&comp);
645 if (xreq->state & XS_FAILED)
646 XSEGLOG("Couldn't close disk on mapper");
648 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
650 __xsegbd_put(xsegbd_dev);
655 pending->comp = NULL;
656 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
661 static void xseg_callback(xport portno)
663 struct xsegbd_device *xsegbd_dev;
664 struct xseg_request *xreq;
665 struct request *blkreq;
666 struct xsegbd_pending *pending;
668 xqindex blkreq_idx, ridx;
672 xsegbd_dev = __xsegbd_get_dev(portno);
674 XSEGLOG("portno: %u has no xsegbd device assigned", portno);
680 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
681 xreq = xseg_receive(xsegbd_dev->xseg, portno, 0);
685 // xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
687 blkreq_idx = (xqindex) xreq->priv;
688 if (blkreq_idx >= xsegbd_dev->nr_requests) {
690 //FIXME maybe put request?
694 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
696 /* someone is blocking on this request
697 and will handle it when we wake them up. */
698 complete(pending->comp);
699 /* the request is blocker's responsibility so
700 we will not put_request(); */
705 /* this is now treated as a block I/O request to end */
706 blkreq = pending->request;
707 pending->request = NULL;
708 if (xsegbd_dev != pending->dev) {
709 //FIXME maybe put request?
710 XSEGLOG("xsegbd_dev != pending->dev");
716 //FIXME maybe put request?
717 XSEGLOG("blkreq does not exist");
723 if (!(xreq->state & XS_SERVED))
726 if (xreq->serviced != blk_rq_bytes(blkreq))
730 if (!rq_data_dir(blkreq)){
731 xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
734 blk_end_request_all(blkreq, err);
736 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending,
737 blkreq_idx, xsegbd_dev->src_portno);
738 if (ridx == Noneidx) {
739 XSEGLOG("couldnt append blkreq_idx");
743 if (xseg_put_request(xsegbd_dev->xseg, xreq,
744 xsegbd_dev->src_portno) < 0){
745 XSEGLOG("couldn't put req");
748 __xsegbd_put(xsegbd_dev);
751 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
752 xseg_request_fn(xsegbd_dev->blk_queue);
753 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
754 __xsegbd_put(xsegbd_dev);
759 /* sysfs interface */
761 static struct bus_type xsegbd_bus_type = {
765 static ssize_t xsegbd_size_show(struct device *dev,
766 struct device_attribute *attr, char *buf)
768 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
770 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
773 static ssize_t xsegbd_major_show(struct device *dev,
774 struct device_attribute *attr, char *buf)
776 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
778 return sprintf(buf, "%d\n", xsegbd_dev->major);
781 static ssize_t xsegbd_srcport_show(struct device *dev,
782 struct device_attribute *attr, char *buf)
784 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
786 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
789 static ssize_t xsegbd_dstport_show(struct device *dev,
790 struct device_attribute *attr, char *buf)
792 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
794 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
797 static ssize_t xsegbd_id_show(struct device *dev,
798 struct device_attribute *attr, char *buf)
800 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
802 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
805 static ssize_t xsegbd_reqs_show(struct device *dev,
806 struct device_attribute *attr, char *buf)
808 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
810 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
813 static ssize_t xsegbd_target_show(struct device *dev,
814 struct device_attribute *attr, char *buf)
816 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
818 return sprintf(buf, "%s\n", xsegbd_dev->target);
821 static ssize_t xsegbd_image_refresh(struct device *dev,
822 struct device_attribute *attr,
826 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
829 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
831 rc = xsegbd_get_size(xsegbd_dev);
837 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
840 mutex_unlock(&xsegbd_mutex);
845 static ssize_t xsegbd_cleanup(struct device *dev,
846 struct device_attribute *attr,
850 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
852 struct request *blkreq = NULL;
853 struct xsegbd_pending *pending = NULL;
854 struct completion *comp = NULL;
856 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
857 xlock_acquire(&xsegbd_dev->blk_queue_pending.lock,
858 xsegbd_dev->src_portno);
859 for (i = 0; i < xsegbd_dev->nr_requests; i++) {
860 if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
861 pending = &xsegbd_dev->blk_req_pending[i];
862 blkreq = pending->request;
863 pending->request = NULL;
864 comp = pending->comp;
865 pending->comp = NULL;
867 XSEGLOG("Cleaning up blkreq %lx [%d]", (unsigned long) blkreq, i);
868 blk_end_request_all(blkreq, -EIO);
871 XSEGLOG("Cleaning up comp %lx [%d]", (unsigned long) comp, i);
874 __xq_append_tail(&xsegbd_dev->blk_queue_pending, i);
877 xlock_release(&xsegbd_dev->blk_queue_pending.lock);
879 mutex_unlock(&xsegbd_mutex);
883 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
884 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
885 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
886 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
887 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
888 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
889 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
890 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
891 static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
893 static struct attribute *xsegbd_attrs[] = {
895 &dev_attr_major.attr,
896 &dev_attr_srcport.attr,
897 &dev_attr_dstport.attr,
900 &dev_attr_target.attr,
901 &dev_attr_refresh.attr,
902 &dev_attr_cleanup.attr,
906 static struct attribute_group xsegbd_attr_group = {
907 .attrs = xsegbd_attrs,
910 static const struct attribute_group *xsegbd_attr_groups[] = {
915 static void xsegbd_sysfs_dev_release(struct device *dev)
919 static struct device_type xsegbd_device_type = {
921 .groups = xsegbd_attr_groups,
922 .release = xsegbd_sysfs_dev_release,
925 static void xsegbd_root_dev_release(struct device *dev)
929 static struct device xsegbd_root_dev = {
930 .init_name = "xsegbd",
931 .release = xsegbd_root_dev_release,
934 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
939 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
940 dev = &xsegbd_dev->dev;
942 dev->bus = &xsegbd_bus_type;
943 dev->type = &xsegbd_device_type;
944 dev->parent = &xsegbd_root_dev;
945 dev->release = xsegbd_dev_release;
946 dev_set_name(dev, "%d", xsegbd_dev->id);
948 ret = device_register(dev);
950 mutex_unlock(&xsegbd_mutex);
954 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
956 device_unregister(&xsegbd_dev->dev);
959 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
961 struct xsegbd_device *xsegbd_dev;
962 struct xseg_port *port;
963 ssize_t ret = -ENOMEM;
965 if (!try_module_get(THIS_MODULE))
968 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
972 spin_lock_init(&xsegbd_dev->rqlock);
973 INIT_LIST_HEAD(&xsegbd_dev->node);
974 init_waitqueue_head(&xsegbd_dev->wq);
975 atomic_set(&xsegbd_dev->usercount, 0);
978 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
979 "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
980 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
984 xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
986 spin_lock(&xsegbd_devices_lock);
987 if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
991 xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
992 xsegbd_dev->id = xsegbd_dev->src_portno;
993 spin_unlock(&xsegbd_devices_lock);
995 XSEGLOG("registering block device major %d", major);
996 ret = register_blkdev(major, XSEGBD_NAME);
998 XSEGLOG("cannot register block device!");
1002 xsegbd_dev->major = ret;
1003 XSEGLOG("registered block device major %d", xsegbd_dev->major);
1005 ret = xsegbd_bus_add_dev(xsegbd_dev);
1009 if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending,
1010 xsegbd_dev->nr_requests,
1011 xsegbd_dev->nr_requests))
1014 xsegbd_dev->blk_req_pending = kzalloc(
1015 xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
1017 if (!xsegbd_dev->blk_req_pending)
1021 XSEGLOG("joining segment");
1022 //FIXME use xsebd module config for now
1023 xsegbd_dev->xseg = xseg_join( xsegbd.config.type,
1027 if (!xsegbd_dev->xseg)
1028 goto out_freepending;
1029 __sync_synchronize();
1031 XSEGLOG("%s binding to source port %u (destination %u)", xsegbd_dev->target,
1032 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
1033 port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno, NULL);
1035 XSEGLOG("cannot bind to port");
1041 if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
1042 XSEGLOG("portno != xsegbd_dev->src_portno");
1047 xseg_init_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
1050 /* make sure we don't get any requests until we're ready to handle them */
1051 xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1053 ret = xsegbd_dev_init(xsegbd_dev);
1057 xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1061 xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
1063 xseg_leave(xsegbd_dev->xseg);
1066 kfree(xsegbd_dev->blk_req_pending);
1069 xq_free(&xsegbd_dev->blk_queue_pending);
1072 xsegbd_bus_del_dev(xsegbd_dev);
1076 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
1079 spin_lock(&xsegbd_devices_lock);
1080 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
1083 spin_unlock(&xsegbd_devices_lock);
1092 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
1094 struct xsegbd_device *xsegbd_dev = NULL;
1096 unsigned long ul_id;
1098 ret = strict_strtoul(buf, 10, &ul_id);
1106 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
1109 xsegbd_dev = __xsegbd_get_dev(id);
1114 __xsegbd_put(xsegbd_dev);
1115 xsegbd_bus_del_dev(xsegbd_dev);
1118 mutex_unlock(&xsegbd_mutex);
1122 static struct bus_attribute xsegbd_bus_attrs[] = {
1123 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
1124 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
1128 static int xsegbd_sysfs_init(void)
1132 ret = device_register(&xsegbd_root_dev);
1136 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
1137 ret = bus_register(&xsegbd_bus_type);
1139 device_unregister(&xsegbd_root_dev);
1144 static void xsegbd_sysfs_cleanup(void)
1146 bus_unregister(&xsegbd_bus_type);
1147 device_unregister(&xsegbd_root_dev);
1150 /* *************************** */
1151 /* ** Module Initialization ** */
1152 /* *************************** */
1154 static int __init xsegbd_init(void)
1157 xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1158 if (!xsegbd_devices)
1161 spin_lock_init(&xsegbd_devices_lock);
1164 ret = xsegbd_xseg_init();
1168 ret = xsegbd_sysfs_init();
1172 XSEGLOG("initialization complete");
1181 kfree(xsegbd_devices);
1186 static void __exit xsegbd_exit(void)
1188 xsegbd_sysfs_cleanup();
1192 module_init(xsegbd_init);
1193 module_exit(xsegbd_exit);