5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
21 #include <linux/wait.h>
22 #include <sys/kernel/segdev.h>
24 #include <xseg/protocol.h>
26 #define XSEGBD_MINORS 1
27 /* define max request size to be used in xsegbd */
28 //FIXME should we make this 4MB instead of 256KB ?
29 //#define XSEGBD_MAX_REQUEST_SIZE 262144U
30 #define XSEGBD_MAX_REQUEST_SIZE 4194304U
32 MODULE_DESCRIPTION("xsegbd");
33 MODULE_AUTHOR("XSEG");
34 MODULE_LICENSE("GPL");
36 static long sector_size = 0;
37 static long blksize = 512;
39 static int max_dev = 1024;
40 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
41 static char spec[256] = "segdev:xsegbd:4:1024:12";
43 module_param(sector_size, long, 0644);
44 module_param(blksize, long, 0644);
45 module_param(max_dev, int, 0644);
46 module_param(major, int, 0644);
47 module_param_string(name, name, sizeof(name), 0644);
48 module_param_string(spec, spec, sizeof(spec), 0644);
50 static struct xsegbd xsegbd;
51 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
52 static DEFINE_MUTEX(xsegbd_mutex);
53 static DEFINE_SPINLOCK(xsegbd_devices_lock);
56 struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
58 struct xsegbd_device *xsegbd_dev = NULL;
60 spin_lock(&xsegbd_devices_lock);
61 xsegbd_dev = xsegbd_devices[id];
62 spin_unlock(&xsegbd_devices_lock);
67 /* ************************* */
68 /* ***** sysfs helpers ***** */
69 /* ************************* */
71 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
73 return container_of(dev, struct xsegbd_device, dev);
76 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
79 return get_device(&xsegbd_dev->dev);
82 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
84 put_device(&xsegbd_dev->dev);
87 /* ************************* */
88 /* ** XSEG Initialization ** */
89 /* ************************* */
91 static void xseg_callback(uint32_t portno);
93 int xsegbd_xseg_init(void)
98 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
100 r = xseg_initialize();
102 XSEGLOG("cannot initialize 'segdev' peer");
106 r = xseg_parse_spec(spec, &xsegbd.config);
110 if (strncmp(xsegbd.config.type, "segdev", 16))
111 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
114 /* leave it here for now */
115 XSEGLOG("joining segment");
116 xsegbd.xseg = xseg_join( xsegbd.config.type,
121 XSEGLOG("cannot find segment");
132 int xsegbd_xseg_quit(void)
134 struct segdev *segdev;
136 /* make sure to unmap the segment first */
137 segdev = segdev_get(0);
138 clear_bit(SEGDEV_RESERVED, &segdev->flags);
139 xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
146 /* ***************************** */
147 /* ** Block Device Operations ** */
148 /* ***************************** */
150 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
152 struct gendisk *disk = bdev->bd_disk;
153 struct xsegbd_device *xsegbd_dev = disk->private_data;
155 xsegbd_get_dev(xsegbd_dev);
160 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
162 struct xsegbd_device *xsegbd_dev = gd->private_data;
164 xsegbd_put_dev(xsegbd_dev);
169 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
170 unsigned int cmd, unsigned long arg)
175 static const struct block_device_operations xsegbd_ops = {
176 .owner = THIS_MODULE,
178 .release = xsegbd_release,
179 .ioctl = xsegbd_ioctl
183 /* *************************** */
184 /* ** Device Initialization ** */
185 /* *************************** */
187 static void xseg_request_fn(struct request_queue *rq);
188 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
189 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev);
191 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
194 struct gendisk *disk;
195 unsigned int max_request_size_bytes;
197 spin_lock_init(&xsegbd_dev->rqlock);
199 xsegbd_dev->xsegbd = &xsegbd;
201 /* allocates and initializes queue */
202 xsegbd_dev->blk_queue = blk_init_queue(xseg_request_fn, &xsegbd_dev->rqlock);
203 if (!xsegbd_dev->blk_queue)
206 xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
208 blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
209 blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
210 blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
211 blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
214 max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
215 blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
216 // blk_queue_max_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 10);
217 blk_queue_max_segments(xsegbd_dev->blk_queue, 1024);
218 blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
219 blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
220 blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
222 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
224 /* vkoukis says we don't need partitions */
225 xsegbd_dev->gd = disk = alloc_disk(1);
229 disk->major = xsegbd_dev->major;
230 disk->first_minor = 0; // id * XSEGBD_MINORS;
231 disk->fops = &xsegbd_ops;
232 disk->queue = xsegbd_dev->blk_queue;
233 disk->private_data = xsegbd_dev;
234 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
235 snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
239 /* allow a non-zero sector_size parameter to override the disk size */
241 xsegbd_dev->sectors = sector_size;
243 ret = xsegbd_get_size(xsegbd_dev);
248 set_capacity(disk, xsegbd_dev->sectors);
249 XSEGLOG("xsegbd active...");
250 add_disk(disk); /* immediately activates the device */
253 /* on error, everything is cleaned up in xsegbd_dev_release */
257 static void xsegbd_dev_release(struct device *dev)
259 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
262 /* cleanup gendisk and blk_queue the right way */
263 if (xsegbd_dev->gd) {
264 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
265 del_gendisk(xsegbd_dev->gd);
267 xsegbd_mapclose(xsegbd_dev);
270 spin_lock(&xsegbd_devices_lock);
271 BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
272 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
273 spin_unlock(&xsegbd_devices_lock);
275 XSEGLOG("releasing id: %d", xsegbd_dev->id);
276 // xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
277 xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
279 if (xsegbd_dev->blk_queue)
280 blk_cleanup_queue(xsegbd_dev->blk_queue);
282 put_disk(xsegbd_dev->gd);
284 // if (xseg_free_requests(xsegbd_dev->xseg,
285 // xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
286 // XSEGLOG("Error trying to free requests!\n");
288 if (xsegbd_dev->xseg){
289 xseg_leave(xsegbd_dev->xseg);
290 xsegbd_dev->xseg = NULL;
293 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
295 if (xsegbd_dev->blk_req_pending){
296 kfree(xsegbd_dev->blk_req_pending);
297 xsegbd_dev->blk_req_pending = NULL;
299 xq_free(&xsegbd_dev->blk_queue_pending);
301 module_put(THIS_MODULE);
304 /* ******************* */
305 /* ** Critical Path ** */
306 /* ******************* */
308 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
309 struct request *blkreq)
311 struct bio_vec *bvec;
312 struct req_iterator iter;
314 char *data = xseg_get_data(xseg, xreq);
315 rq_for_each_segment(bvec, blkreq, iter) {
316 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
317 memcpy(data + off, bdata, bvec->bv_len);
319 kunmap_atomic(bdata);
323 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
324 struct request *blkreq)
326 struct bio_vec *bvec;
327 struct req_iterator iter;
329 char *data = xseg_get_data(xseg, xreq);
330 rq_for_each_segment(bvec, blkreq, iter) {
331 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
332 memcpy(bdata, data + off, bvec->bv_len);
334 kunmap_atomic(bdata);
338 static void xseg_request_fn(struct request_queue *rq)
340 struct xseg_request *xreq;
341 struct xsegbd_device *xsegbd_dev = rq->queuedata;
342 struct request *blkreq;
343 struct xsegbd_pending *pending;
351 spin_unlock_irq(&xsegbd_dev->rqlock);
353 if (current_thread_info()->preempt_count || irqs_disabled()){
354 XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
355 current_thread_info()->preempt_count, irqs_disabled());
357 //XSEGLOG("Priority: %d", current_thread_info()->task->prio);
358 //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio);
359 //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio);
360 //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority);
361 blkreq_idx = Noneidx;
362 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
363 xsegbd_dev->dst_portno, X_ALLOC);
367 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending,
368 xsegbd_dev->src_portno);
369 if (blkreq_idx == Noneidx)
372 if (blkreq_idx >= xsegbd_dev->nr_requests) {
373 XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
379 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
380 blkreq = blk_fetch_request(rq);
382 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
386 if (blkreq->cmd_type != REQ_TYPE_FS) {
387 //FIXME we lose xreq here
388 XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
389 __blk_end_request_all(blkreq, 0);
390 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
393 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
394 if (current_thread_info()->preempt_count || irqs_disabled()){
395 XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
396 current_thread_info()->preempt_count, irqs_disabled());
399 datalen = blk_rq_bytes(blkreq);
400 r = xseg_prep_request(xsegbd_dev->xseg, xreq,
401 xsegbd_dev->targetlen, datalen);
403 XSEGLOG("couldn't prep request");
404 blk_end_request_err(blkreq, r);
409 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
410 XSEGLOG("malformed req buffers");
411 blk_end_request_err(blkreq, r);
416 target = xseg_get_target(xsegbd_dev->xseg, xreq);
417 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
419 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
420 pending->dev = xsegbd_dev;
421 pending->request = blkreq;
422 pending->comp = NULL;
424 xreq->size = datalen;
425 xreq->offset = blk_rq_pos(blkreq) << 9;
426 xreq->priv = (uint64_t) blkreq_idx;
429 if (xreq->offset >= (sector_size << 9))
430 XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
431 blk_rq_pos(blkreq), sector_size,
432 blkreq->cmd_flags & REQ_FLUSH,
433 blkreq->cmd_flags & REQ_FUA);
436 if (blkreq->cmd_flags & REQ_FLUSH)
437 xreq->flags |= XF_FLUSH;
439 if (blkreq->cmd_flags & REQ_FUA)
440 xreq->flags |= XF_FUA;
442 if (rq_data_dir(blkreq)) {
443 blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
450 // XSEGLOG("%s : %lu (%lu)", xsegbd_dev->target, xreq->offset, xreq->datalen);
452 p = xseg_submit(xsegbd_dev->xseg, xreq,
453 xsegbd_dev->src_portno, X_ALLOC);
455 XSEGLOG("coundn't submit req");
457 blk_end_request_err(blkreq, r);
460 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
463 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq,
464 xsegbd_dev->src_portno) == -1);
465 if (blkreq_idx != Noneidx)
466 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending,
467 blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
468 spin_lock_irq(&xsegbd_dev->rqlock);
471 int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
472 struct xseg_request *xreq )
476 XSEGLOG("Invalid xreq");
480 if (xreq->state & XS_FAILED)
483 if (!(xreq->state & XS_SERVED))
486 data = xseg_get_data(xsegbd_dev->xseg, xreq);
488 XSEGLOG("Invalid req data");
492 XSEGLOG("Invalid xsegbd_dev");
495 xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
499 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
501 struct xseg_request *xreq;
504 struct xsegbd_pending *pending;
505 struct completion comp;
509 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
510 xsegbd_dev->dst_portno, X_ALLOC);
514 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen,
515 sizeof(struct xseg_reply_info)));
517 init_completion(&comp);
518 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
519 if (blkreq_idx == Noneidx)
522 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
523 pending->dev = xsegbd_dev;
524 pending->request = NULL;
525 pending->comp = ∁
528 xreq->priv = (uint64_t) blkreq_idx;
530 target = xseg_get_target(xsegbd_dev->xseg, xreq);
531 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
532 xreq->size = xreq->datalen;
536 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
537 p = xseg_submit(xsegbd_dev->xseg, xreq,
538 xsegbd_dev->src_portno, X_ALLOC);
540 XSEGLOG("couldn't submit request");
544 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
545 XSEGLOG("Before wait for completion, comp %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
546 wait_for_completion_interruptible(&comp);
547 XSEGLOG("Woken up after wait_for_completion_interruptible(), comp: %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
548 ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
549 XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
551 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
557 pending->comp = NULL;
558 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
563 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev)
565 struct xseg_request *xreq;
568 struct xsegbd_pending *pending;
569 struct completion comp;
573 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
574 xsegbd_dev->dst_portno, X_ALLOC);
578 BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 0));
580 init_completion(&comp);
581 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
582 if (blkreq_idx == Noneidx)
585 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
586 pending->dev = xsegbd_dev;
587 pending->request = NULL;
588 pending->comp = ∁
591 xreq->priv = (uint64_t) blkreq_idx;
593 target = xseg_get_target(xsegbd_dev->xseg, xreq);
594 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
595 xreq->size = xreq->datalen;
599 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
600 p = xseg_submit(xsegbd_dev->xseg, xreq,
601 xsegbd_dev->src_portno, X_ALLOC);
603 XSEGLOG("couldn't submit request");
607 WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
608 wait_for_completion_interruptible(&comp);
610 if (xreq->state & XS_FAILED)
611 XSEGLOG("Couldn't close disk on mapper");
613 BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
619 pending->comp = NULL;
620 xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
625 static void xseg_callback(xport portno)
627 struct xsegbd_device *xsegbd_dev;
628 struct xseg_request *xreq;
629 struct request *blkreq;
630 struct xsegbd_pending *pending;
632 xqindex blkreq_idx, ridx;
635 xsegbd_dev = __xsegbd_get_dev(portno);
637 XSEGLOG("portno: %u has no xsegbd device assigned", portno);
643 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
644 xreq = xseg_receive(xsegbd_dev->xseg, portno, 0);
648 // xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
650 blkreq_idx = (xqindex) xreq->priv;
651 if (blkreq_idx >= xsegbd_dev->nr_requests) {
653 //FIXME maybe put request?
657 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
659 /* someone is blocking on this request
660 and will handle it when we wake them up. */
661 complete(pending->comp);
662 /* the request is blocker's responsibility so
663 we will not put_request(); */
667 /* this is now treated as a block I/O request to end */
668 blkreq = pending->request;
669 pending->request = NULL;
670 if (xsegbd_dev != pending->dev) {
671 //FIXME maybe put request?
672 XSEGLOG("xsegbd_dev != pending->dev");
678 //FIXME maybe put request?
679 XSEGLOG("blkreq does not exist");
685 if (!(xreq->state & XS_SERVED))
688 if (xreq->serviced != blk_rq_bytes(blkreq))
692 if (!rq_data_dir(blkreq)){
693 xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
696 blk_end_request_all(blkreq, err);
698 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending,
699 blkreq_idx, xsegbd_dev->src_portno);
700 if (ridx == Noneidx) {
701 XSEGLOG("couldnt append blkreq_idx");
705 if (xseg_put_request(xsegbd_dev->xseg, xreq,
706 xsegbd_dev->src_portno) < 0){
707 XSEGLOG("couldn't put req");
712 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
713 xseg_request_fn(xsegbd_dev->blk_queue);
714 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
719 /* sysfs interface */
721 static struct bus_type xsegbd_bus_type = {
725 static ssize_t xsegbd_size_show(struct device *dev,
726 struct device_attribute *attr, char *buf)
728 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
730 return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
733 static ssize_t xsegbd_major_show(struct device *dev,
734 struct device_attribute *attr, char *buf)
736 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
738 return sprintf(buf, "%d\n", xsegbd_dev->major);
741 static ssize_t xsegbd_srcport_show(struct device *dev,
742 struct device_attribute *attr, char *buf)
744 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
746 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
749 static ssize_t xsegbd_dstport_show(struct device *dev,
750 struct device_attribute *attr, char *buf)
752 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
754 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
757 static ssize_t xsegbd_id_show(struct device *dev,
758 struct device_attribute *attr, char *buf)
760 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
762 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
765 static ssize_t xsegbd_reqs_show(struct device *dev,
766 struct device_attribute *attr, char *buf)
768 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
770 return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
773 static ssize_t xsegbd_target_show(struct device *dev,
774 struct device_attribute *attr, char *buf)
776 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
778 return sprintf(buf, "%s\n", xsegbd_dev->target);
781 static ssize_t xsegbd_image_refresh(struct device *dev,
782 struct device_attribute *attr,
786 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
789 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
791 rc = xsegbd_get_size(xsegbd_dev);
797 set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
800 mutex_unlock(&xsegbd_mutex);
805 //maybe try callback, first and then do a more invasive cleanup
806 static ssize_t xsegbd_cleanup(struct device *dev,
807 struct device_attribute *attr,
811 struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
813 struct request *blkreq = NULL;
814 struct xsegbd_pending *pending = NULL;
815 struct completion *comp = NULL;
817 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
818 xlock_acquire(&xsegbd_dev->blk_queue_pending.lock,
819 xsegbd_dev->src_portno);
820 for (i = 0; i < xsegbd_dev->nr_requests; i++) {
821 if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
822 pending = &xsegbd_dev->blk_req_pending[i];
823 blkreq = pending->request;
824 pending->request = NULL;
825 comp = pending->comp;
826 pending->comp = NULL;
828 XSEGLOG("Cleaning up blkreq %lx [%d]", (unsigned long) blkreq, i);
829 blk_end_request_all(blkreq, -EIO);
832 XSEGLOG("Cleaning up comp %lx [%d]", (unsigned long) comp, i);
835 __xq_append_tail(&xsegbd_dev->blk_queue_pending, i);
838 xlock_release(&xsegbd_dev->blk_queue_pending.lock);
840 mutex_unlock(&xsegbd_mutex);
844 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
845 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
846 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
847 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
848 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
849 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
850 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
851 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
852 static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
854 static struct attribute *xsegbd_attrs[] = {
856 &dev_attr_major.attr,
857 &dev_attr_srcport.attr,
858 &dev_attr_dstport.attr,
861 &dev_attr_target.attr,
862 &dev_attr_refresh.attr,
863 &dev_attr_cleanup.attr,
867 static struct attribute_group xsegbd_attr_group = {
868 .attrs = xsegbd_attrs,
871 static const struct attribute_group *xsegbd_attr_groups[] = {
876 static void xsegbd_sysfs_dev_release(struct device *dev)
880 static struct device_type xsegbd_device_type = {
882 .groups = xsegbd_attr_groups,
883 .release = xsegbd_sysfs_dev_release,
886 static void xsegbd_root_dev_release(struct device *dev)
890 static struct device xsegbd_root_dev = {
891 .init_name = "xsegbd",
892 .release = xsegbd_root_dev_release,
895 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
900 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
901 dev = &xsegbd_dev->dev;
903 dev->bus = &xsegbd_bus_type;
904 dev->type = &xsegbd_device_type;
905 dev->parent = &xsegbd_root_dev;
906 dev->release = xsegbd_dev_release;
907 dev_set_name(dev, "%d", xsegbd_dev->id);
909 ret = device_register(dev);
911 mutex_unlock(&xsegbd_mutex);
915 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
917 device_unregister(&xsegbd_dev->dev);
920 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
922 struct xsegbd_device *xsegbd_dev;
923 struct xseg_port *port;
924 ssize_t ret = -ENOMEM;
926 if (!try_module_get(THIS_MODULE))
929 xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
933 spin_lock_init(&xsegbd_dev->rqlock);
934 INIT_LIST_HEAD(&xsegbd_dev->node);
937 if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
938 "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
939 &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
943 xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
945 spin_lock(&xsegbd_devices_lock);
946 if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
950 xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
951 xsegbd_dev->id = xsegbd_dev->src_portno;
952 spin_unlock(&xsegbd_devices_lock);
954 XSEGLOG("registering block device major %d", major);
955 ret = register_blkdev(major, XSEGBD_NAME);
957 XSEGLOG("cannot register block device!");
961 xsegbd_dev->major = ret;
962 XSEGLOG("registered block device major %d", xsegbd_dev->major);
964 ret = xsegbd_bus_add_dev(xsegbd_dev);
968 if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending,
969 xsegbd_dev->nr_requests,
970 xsegbd_dev->nr_requests))
973 xsegbd_dev->blk_req_pending = kzalloc(
974 xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
976 if (!xsegbd_dev->blk_req_pending)
980 XSEGLOG("joining segment");
981 //FIXME use xsebd module config for now
982 xsegbd_dev->xseg = xseg_join( xsegbd.config.type,
986 if (!xsegbd_dev->xseg)
989 XSEGLOG("%s binding to source port %u (destination %u)", xsegbd_dev->target,
990 xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
991 port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno, NULL);
993 XSEGLOG("cannot bind to port");
999 if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
1000 XSEGLOG("portno != xsegbd_dev->src_portno");
1005 xseg_init_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
1008 /* make sure we don't get any requests until we're ready to handle them */
1009 xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1011 ret = xsegbd_dev_init(xsegbd_dev);
1015 xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1019 xsegbd_bus_del_dev(xsegbd_dev);
1023 unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
1026 spin_lock(&xsegbd_devices_lock);
1027 xsegbd_devices[xsegbd_dev->src_portno] = NULL;
1030 spin_unlock(&xsegbd_devices_lock);
1039 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
1041 struct xsegbd_device *xsegbd_dev = NULL;
1043 unsigned long ul_id;
1045 ret = strict_strtoul(buf, 10, &ul_id);
1053 mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
1056 xsegbd_dev = __xsegbd_get_dev(id);
1061 xsegbd_bus_del_dev(xsegbd_dev);
1064 mutex_unlock(&xsegbd_mutex);
1068 static struct bus_attribute xsegbd_bus_attrs[] = {
1069 __ATTR(add, S_IWUSR, NULL, xsegbd_add),
1070 __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
1074 static int xsegbd_sysfs_init(void)
1078 ret = device_register(&xsegbd_root_dev);
1082 xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
1083 ret = bus_register(&xsegbd_bus_type);
1085 device_unregister(&xsegbd_root_dev);
1090 static void xsegbd_sysfs_cleanup(void)
1092 bus_unregister(&xsegbd_bus_type);
1093 device_unregister(&xsegbd_root_dev);
1096 /* *************************** */
1097 /* ** Module Initialization ** */
1098 /* *************************** */
1100 static int __init xsegbd_init(void)
1103 xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1104 if (!xsegbd_devices)
1107 spin_lock_init(&xsegbd_devices_lock);
1110 ret = xsegbd_xseg_init();
1114 ret = xsegbd_sysfs_init();
1118 XSEGLOG("initialization complete");
1127 kfree(xsegbd_devices);
1132 static void __exit xsegbd_exit(void)
1134 xsegbd_sysfs_cleanup();
1138 module_init(xsegbd_init);
1139 module_exit(xsegbd_exit);