fix xseg request preparation bug
[archipelago] / xseg / peers / kernel / xsegbd.c
1 /* xsegbd.c
2  *
3  */
4
5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/fs.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
21
22 #include <sys/kernel/segdev.h>
23 #include "xsegbd.h"
24
25 #define XSEGBD_MINORS 1
26 /* define max request size to be used in xsegbd */
27 //FIXME should we make this 4MB instead of 256KB ?
28 #define XSEGBD_MAX_REQUEST_SIZE 262144U
29
30 MODULE_DESCRIPTION("xsegbd");
31 MODULE_AUTHOR("XSEG");
32 MODULE_LICENSE("GPL");
33
34 static long sector_size = 0;
35 static long blksize = 512;
36 static int major = 0;
37 static int max_dev = 1024;
38 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
39 static char spec[256] = "segdev:xsegbd:4:1024:12";
40
41 module_param(sector_size, long, 0644);
42 module_param(blksize, long, 0644);
43 module_param(max_dev, int, 0644);
44 module_param(major, int, 0644);
45 module_param_string(name, name, sizeof(name), 0644);
46 module_param_string(spec, spec, sizeof(spec), 0644);
47
48 //static spinlock_t __lock;
49 static struct xsegbd xsegbd;
50 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
51 static DEFINE_MUTEX(xsegbd_mutex);
52 static DEFINE_SPINLOCK(xsegbd_devices_lock);
53
54
55
56 static struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
57 {
58         struct xsegbd_device *xsegbd_dev = NULL;
59
60         spin_lock(&xsegbd_devices_lock);
61         xsegbd_dev = xsegbd_devices[id];
62         spin_unlock(&xsegbd_devices_lock);
63
64         return xsegbd_dev;
65 }
66
67 /* ************************* */
68 /* ***** sysfs helpers ***** */
69 /* ************************* */
70
71 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
72 {
73         return container_of(dev, struct xsegbd_device, dev);
74 }
75
76 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
77 {
78         /* FIXME */
79         return get_device(&xsegbd_dev->dev);
80 }
81
82 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
83 {
84         put_device(&xsegbd_dev->dev);
85 }
86
87 /* ************************* */
88 /* ** XSEG Initialization ** */
89 /* ************************* */
90
91 static void xseg_callback(struct xseg *xseg, uint32_t portno);
92
93 int xsegbd_xseg_init(void)
94 {
95         int r;
96
97         if (!xsegbd.name[0])
98                 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
99
100         r = xseg_initialize();
101         if (r) {
102                 XSEGLOG("cannot initialize 'segdev' peer");
103                 goto err;
104         }
105
106         r = xseg_parse_spec(spec, &xsegbd.config);
107         if (r)
108                 goto err;
109
110         if (strncmp(xsegbd.config.type, "segdev", 16))
111                 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
112                          xsegbd.config.type);
113
114         /* leave it here for now */
115         XSEGLOG("joining segment");
116         xsegbd.xseg = xseg_join(        xsegbd.config.type,
117                                         xsegbd.config.name,
118                                         "segdev",
119                                         xseg_callback           );
120         if (!xsegbd.xseg) {
121                 XSEGLOG("cannot find segment");
122                 r = -ENODEV;
123                 goto err;
124         }
125
126         return 0;
127 err:
128         return r;
129
130 }
131
132 int xsegbd_xseg_quit(void)
133 {
134         struct segdev *segdev;
135
136         /* make sure to unmap the segment first */
137         segdev = segdev_get(0);
138         clear_bit(SEGDEV_RESERVED, &segdev->flags);
139         xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
140         segdev_put(segdev);
141
142         return 0;
143 }
144
145
146 /* ***************************** */
147 /* ** Block Device Operations ** */
148 /* ***************************** */
149
150 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
151 {
152         struct gendisk *disk = bdev->bd_disk;
153         struct xsegbd_device *xsegbd_dev = disk->private_data;
154
155         xsegbd_get_dev(xsegbd_dev);
156
157         return 0;
158 }
159
160 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
161 {
162         struct xsegbd_device *xsegbd_dev = gd->private_data;
163
164         xsegbd_put_dev(xsegbd_dev);
165
166         return 0;
167 }
168
169 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
170                         unsigned int cmd, unsigned long arg)
171 {
172         return -ENOTTY;
173 }
174
175 static const struct block_device_operations xsegbd_ops = {
176         .owner          = THIS_MODULE,
177         .open           = xsegbd_open,
178         .release        = xsegbd_release,
179         .ioctl          = xsegbd_ioctl 
180 };
181
182
183 /* *************************** */
184 /* ** Device Initialization ** */
185 /* *************************** */
186
187 static void xseg_request_fn(struct request_queue *rq);
188 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
189
190 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
191 {
192         int ret = -ENOMEM;
193         struct gendisk *disk;
194         unsigned int max_request_size_bytes;
195
196         spin_lock_init(&xsegbd_dev->rqlock);
197
198         xsegbd_dev->xsegbd = &xsegbd;
199
200         xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
201         if (!xsegbd_dev->blk_queue)
202                 goto out;
203
204         blk_init_allocated_queue(xsegbd_dev->blk_queue, xseg_request_fn, &xsegbd_dev->rqlock);
205         xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
206
207         blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
208         blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
209         blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
210         blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
211         
212         //blk_queue_max_segments(dev->blk_queue, 512);
213
214         max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
215         blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
216         blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
217         blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
218         blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
219
220         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
221
222         /* vkoukis says we don't need partitions */
223         xsegbd_dev->gd = disk = alloc_disk(1);
224         if (!disk)
225                 /* FIXME: We call xsegbd_dev_release if something goes wrong, to cleanup
226                  * disks/queues/etc.
227                  * Would it be better to do the cleanup here, and conditionally cleanup
228                  * in dev_release?
229                  */
230                 goto out;
231
232         disk->major = xsegbd_dev->major;
233         disk->first_minor = 0; // id * XSEGBD_MINORS;
234         disk->fops = &xsegbd_ops;
235         disk->queue = xsegbd_dev->blk_queue;
236         disk->private_data = xsegbd_dev;
237         disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
238         snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
239
240         ret = 0;
241         
242         /* allow a non-zero sector_size parameter to override the disk size */
243         if (sector_size)
244                 xsegbd_dev->sectors = sector_size;
245         else {
246                 ret = xsegbd_get_size(xsegbd_dev);
247                 if (ret)
248                         goto out;
249         }
250
251         set_capacity(disk, xsegbd_dev->sectors);
252         XSEGLOG("xsegbd active...");
253         add_disk(disk); /* immediately activates the device */
254
255         return 0;
256
257 out:
258         return ret;
259 }
260
261 static void xsegbd_dev_release(struct device *dev)
262 {
263         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
264
265         /* cleanup gendisk and blk_queue the right way */
266         if (xsegbd_dev->gd) {
267                 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
268                         del_gendisk(xsegbd_dev->gd);
269
270                 blk_cleanup_queue(xsegbd_dev->blk_queue);
271                 put_disk(xsegbd_dev->gd);
272         }
273
274         /* xsegbd actually does not need to use waiting. 
275          * maybe we can use xseg_cancel_wait for clarity
276          * with the xseg_segdev kernel driver to convert 
277          * this to a noop
278          */
279         xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
280
281         if (xseg_free_requests(xsegbd_dev->xseg, 
282                         xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
283                 XSEGLOG("Error trying to free requests!\n");
284
285
286         unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
287
288         spin_lock(&xsegbd_devices_lock);
289         BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
290         xsegbd_devices[xsegbd_dev->src_portno] = NULL;
291         spin_unlock(&xsegbd_devices_lock);
292
293         if (xsegbd_dev->blk_req_pending)
294                 kfree(xsegbd_dev->blk_req_pending);
295         xq_free(&xsegbd_dev->blk_queue_pending);
296
297         kfree(xsegbd_dev);
298
299         module_put(THIS_MODULE);
300 }
301
302 /* ******************* */
303 /* ** Critical Path ** */
304 /* ******************* */
305
306 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
307                         struct request *blkreq)
308 {
309         struct bio_vec *bvec;
310         struct req_iterator iter;
311         uint64_t off = 0;
312         char *data = xseg_get_data(xseg, xreq);
313         rq_for_each_segment(bvec, blkreq, iter) {
314                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
315                 memcpy(data + off, bdata, bvec->bv_len);
316                 off += bvec->bv_len;
317                 kunmap_atomic(bdata);
318         }
319 }
320
321 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
322                         struct request *blkreq)
323 {
324         struct bio_vec *bvec;
325         struct req_iterator iter;
326         uint64_t off = 0;
327         char *data = xseg_get_data(xseg, xreq);
328         rq_for_each_segment(bvec, blkreq, iter) {
329                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
330                 memcpy(bdata, data + off, bvec->bv_len);
331                 off += bvec->bv_len;
332                 kunmap_atomic(bdata);
333         }
334 }
335
336 static void xseg_request_fn(struct request_queue *rq)
337 {
338         struct xseg_request *xreq;
339         struct xsegbd_device *xsegbd_dev = rq->queuedata;
340         struct request *blkreq;
341         struct xsegbd_pending *pending;
342         xqindex blkreq_idx;
343         char *target;
344         uint64_t datalen;
345         xport p;
346         int r;
347
348         for (;;) {
349                 blkreq_idx = Noneidx;
350                 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, 
351                                 xsegbd_dev->dst_portno, X_ALLOC);
352                 if (!xreq)
353                         break;
354
355                 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 
356                                                 xsegbd_dev->src_portno);
357                 if (blkreq_idx == Noneidx)
358                         break;
359
360                 blkreq = blk_fetch_request(rq);
361                 if (!blkreq)
362                         break;
363
364                 if (blkreq->cmd_type != REQ_TYPE_FS) {
365                         //we lose xreq here
366                         XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
367                         __blk_end_request_all(blkreq, 0);
368                         continue;
369                 }
370
371                 datalen = blk_rq_bytes(blkreq);
372                 r = xseg_prep_request(xsegbd_dev->xseg, xreq, 
373                                         xsegbd_dev->targetlen, datalen);
374                 if (r < 0) {
375                         XSEGLOG("couldn't prep request");
376                         __blk_end_request_err(blkreq, r);
377                         BUG_ON(1);
378                         break;
379                 }
380                 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
381                         XSEGLOG("malformed req buffers");
382                         __blk_end_request_err(blkreq, r);
383                         BUG_ON(1);
384                         break;
385                 }
386
387                 target = xseg_get_target(xsegbd_dev->xseg, xreq);
388                 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
389                 if (blkreq_idx >= xsegbd_dev->nr_requests) {
390                         XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
391                         BUG_ON(1);
392                         __blk_end_request_err(blkreq, -1);
393                         break;
394                 }
395                 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
396                 pending->dev = xsegbd_dev;
397                 pending->request = blkreq;
398                 pending->comp = NULL;
399                 xreq->size = datalen;
400                 xreq->offset = blk_rq_pos(blkreq) << 9;
401                 /*
402                 if (xreq->offset >= (sector_size << 9))
403                         XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
404                                  blk_rq_pos(blkreq), sector_size,
405                                  blkreq->cmd_flags & REQ_FLUSH,
406                                  blkreq->cmd_flags & REQ_FUA);
407                 */
408
409                 if (blkreq->cmd_flags & REQ_FLUSH)
410                         xreq->flags |= XF_FLUSH;
411
412                 if (blkreq->cmd_flags & REQ_FUA)
413                         xreq->flags |= XF_FUA;
414
415                 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu", 
416                 //              xreq, xreq->size, xreq->offset, blkreq_idx);
417
418                 if (rq_data_dir(blkreq)) {
419                         /* unlock for data transfers? */
420                         blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
421                         //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu completed blk_to_xseg", 
422                         //      xreq, xreq->size, xreq->offset, blkreq_idx);
423                         xreq->op = X_WRITE;
424                 } else {
425                         xreq->op = X_READ;
426                 }
427
428                 //maybe put this in loop start, and on break, 
429                 //just do xseg_get_req_data
430                 spin_lock(&xsegbd_dev->reqdatalock);
431                 r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
432                 spin_unlock(&xsegbd_dev->reqdatalock);
433                 BUG_ON(r < 0);
434                 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu set req data", 
435                 //              xreq, xreq->size, xreq->offset, blkreq_idx);
436
437                 p = xseg_submit(xsegbd_dev->xseg, xreq, 
438                                         xsegbd_dev->src_portno, X_ALLOC);
439                 if (p == NoPort) {
440                         //no unsetting req data;
441                         XSEGLOG("coundn't submit req");
442                         BUG_ON(1);
443                         __blk_end_request_err(blkreq, -1);
444                         break;
445                 }
446                 //XSEGLOG("xreq: %lx size: %llu offset: %llu, blkreq_idx: %llu submitted", 
447                 //              xreq, xreq->size, xreq->offset, blkreq_idx);
448                 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
449         }
450         if (xreq)
451                 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq, 
452                                         xsegbd_dev->src_portno) == -1);
453         if (blkreq_idx != Noneidx)
454                 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, 
455                                         blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
456 }
457
458 int update_dev_sectors_from_request(    struct xsegbd_device *xsegbd_dev,
459                                         struct xseg_request *xreq       )
460 {
461         void *data;
462
463         if (xreq->state & XS_FAILED)
464                 return -ENOENT;
465
466         if (!(xreq->state & XS_SERVED))
467                 return -EIO;
468
469         data = xseg_get_data(xsegbd_dev->xseg, xreq);
470         xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
471         return 0;
472 }
473
474 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
475 {
476         struct xseg_request *xreq;
477         char *target;
478         uint64_t datalen;
479         xqindex blkreq_idx;
480         struct xsegbd_pending *pending;
481         struct completion comp;
482         xport p;
483         void *data;
484         int ret = -EBUSY, r;
485         xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
486                         xsegbd_dev->dst_portno, X_ALLOC);
487         if (!xreq)
488                 goto out;
489
490         datalen = sizeof(uint64_t);
491         BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, datalen));
492         BUG_ON(xreq->bufferlen - xsegbd_dev->targetlen < datalen);
493
494         init_completion(&comp);
495         blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
496         if (blkreq_idx == Noneidx)
497                 goto out;
498         
499         pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
500         pending->dev = xsegbd_dev;
501         pending->request = NULL;
502         pending->comp = &comp;
503
504         
505         spin_lock(&xsegbd_dev->reqdatalock);
506         r = xseg_set_req_data(xsegbd_dev->xseg, xreq, (void *) blkreq_idx);
507         spin_unlock(&xsegbd_dev->reqdatalock);
508         if (r < 0)
509                 goto out_queue;
510         //XSEGLOG("for req: %lx, set data %llu (lx: %lx)", xreq, blkreq_idx, (void *) blkreq_idx);
511
512         target = xseg_get_target(xsegbd_dev->xseg, xreq);
513         strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
514         xreq->size = datalen;
515         xreq->offset = 0;
516
517         xreq->op = X_INFO;
518
519         /* waiting is not needed.
520          * but it should be better to use xseg_prepare_wait
521          * and the xseg_segdev kernel driver, would be a no op
522          */
523
524         xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
525         p = xseg_submit(xsegbd_dev->xseg, xreq, 
526                                 xsegbd_dev->src_portno, X_ALLOC);
527         BUG_ON(p == NoPort);
528         if ( p == NoPort) {
529                 goto out_data;
530         }
531         WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
532
533         wait_for_completion_interruptible(&comp);
534         //XSEGLOG("Woken up after wait_for_completion_interruptible()\n");
535         ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
536         //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
537 out:
538         BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) < 0);
539         return ret;
540
541 out_data:
542         spin_lock(&xsegbd_dev->reqdatalock);
543         r = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data);
544         spin_unlock(&xsegbd_dev->reqdatalock);
545 out_queue:
546         xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
547         
548         goto out;
549 }
550
551 static void xseg_callback(struct xseg *xseg, xport portno)
552 {
553         struct xsegbd_device *xsegbd_dev;
554         struct xseg_request *xreq;
555         struct request *blkreq;
556         struct xsegbd_pending *pending;
557         unsigned long flags;
558         xqindex blkreq_idx, ridx;
559         int err;
560         void *data;
561
562         xsegbd_dev  = __xsegbd_get_dev(portno);
563         if (!xsegbd_dev) {
564                 WARN_ON(1);
565                 return;
566         }
567
568         for (;;) {
569                 xreq = xseg_receive(xsegbd_dev->xseg, portno);
570                 if (!xreq)
571                         break;
572
573                 spin_lock(&xsegbd_dev->reqdatalock);
574                 err = xseg_get_req_data(xsegbd_dev->xseg, xreq, &data); 
575                 spin_unlock(&xsegbd_dev->reqdatalock);
576                 //XSEGLOG("for req: %lx, got data %llu (lx %lx)", xreq, (xqindex) data, data);
577                 if (err < 0) {
578                         WARN_ON(1);
579                         //maybe put request?
580                         continue;
581                 }
582
583                 blkreq_idx = (xqindex) data;
584                 if (blkreq_idx >= xsegbd_dev->nr_requests) {
585                         WARN_ON(1);
586                         //maybe put request?
587                         continue;
588                 }
589
590                 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
591                 if (pending->comp) {
592                         /* someone is blocking on this request
593                            and will handle it when we wake them up. */
594                         complete(pending->comp);
595                         /* the request is blocker's responsibility so
596                            we will not put_request(); */
597                         continue;
598                 }
599
600                 /* this is now treated as a block I/O request to end */
601                 blkreq = pending->request;
602                 pending->request = NULL;
603                 //xsegbd_dev = pending->dev;
604                 if (xsegbd_dev != pending->dev) {
605                         XSEGLOG("xsegbd_dev != pending->dev");
606                         BUG_ON(1);
607                         continue;
608                 }
609                 pending->dev = NULL;
610                 if (!blkreq){
611                         //FIXME
612                         XSEGLOG("blkreq does not exist");
613                         BUG_ON(1);
614                         continue;
615                 }
616
617                 err = -1;
618                 if (!(xreq->state & XS_SERVED))
619                         goto blk_end;
620
621                 if (xreq->serviced != blk_rq_bytes(blkreq))
622                         goto blk_end;
623
624                 /* unlock for data transfer? */
625                 if (!rq_data_dir(blkreq)){
626                         xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
627                         //XSEGLOG("for req: %lx, completed xseg_to_blk", xreq);
628                 }       
629
630                 err = 0;
631 blk_end:
632                 blk_end_request_all(blkreq, err);
633                 //XSEGLOG("for req: %lx, completed", xreq);
634                 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
635                 if (ridx == Noneidx) {
636                         XSEGLOG("couldnt append blkreq_idx");
637                         WARN_ON(1);
638                 }
639
640                 err = xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno);
641                 if (err < 0) {
642                         XSEGLOG("couldn't put req");
643                         BUG_ON(1);
644                 }
645         }
646
647         if (xsegbd_dev) {
648                 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
649                 xseg_request_fn(xsegbd_dev->blk_queue);
650                 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
651         }
652 }
653
654
655 /* sysfs interface */
656
657 static struct bus_type xsegbd_bus_type = {
658         .name   = "xsegbd",
659 };
660
661 static ssize_t xsegbd_size_show(struct device *dev,
662                                         struct device_attribute *attr, char *buf)
663 {
664         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
665
666         return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
667 }
668
669 static ssize_t xsegbd_major_show(struct device *dev,
670                                         struct device_attribute *attr, char *buf)
671 {
672         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
673
674         return sprintf(buf, "%d\n", xsegbd_dev->major);
675 }
676
677 static ssize_t xsegbd_srcport_show(struct device *dev,
678                                         struct device_attribute *attr, char *buf)
679 {
680         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
681
682         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
683 }
684
685 static ssize_t xsegbd_dstport_show(struct device *dev,
686                                         struct device_attribute *attr, char *buf)
687 {
688         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
689
690         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
691 }
692
693 static ssize_t xsegbd_id_show(struct device *dev,
694                                         struct device_attribute *attr, char *buf)
695 {
696         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
697
698         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
699 }
700
701 static ssize_t xsegbd_reqs_show(struct device *dev,
702                                         struct device_attribute *attr, char *buf)
703 {
704         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
705
706         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
707 }
708
709 static ssize_t xsegbd_target_show(struct device *dev,
710                                         struct device_attribute *attr, char *buf)
711 {
712         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
713
714         return sprintf(buf, "%s\n", xsegbd_dev->target);
715 }
716
717 static ssize_t xsegbd_image_refresh(struct device *dev,
718                                         struct device_attribute *attr,
719                                         const char *buf,
720                                         size_t size)
721 {
722         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
723         int rc, ret = size;
724
725         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
726
727         rc = xsegbd_get_size(xsegbd_dev);
728         if (rc < 0) {
729                 ret = rc;
730                 goto out;
731         }
732
733         set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
734
735 out:
736         mutex_unlock(&xsegbd_mutex);
737         return ret;
738 }
739
740 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
741 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
742 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
743 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
744 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
745 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
746 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
747 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
748
749 static struct attribute *xsegbd_attrs[] = {
750         &dev_attr_size.attr,
751         &dev_attr_major.attr,
752         &dev_attr_srcport.attr,
753         &dev_attr_dstport.attr,
754         &dev_attr_id.attr,
755         &dev_attr_reqs.attr,
756         &dev_attr_target.attr,
757         &dev_attr_refresh.attr,
758         NULL
759 };
760
761 static struct attribute_group xsegbd_attr_group = {
762         .attrs = xsegbd_attrs,
763 };
764
765 static const struct attribute_group *xsegbd_attr_groups[] = {
766         &xsegbd_attr_group,
767         NULL
768 };
769
770 static void xsegbd_sysfs_dev_release(struct device *dev)
771 {
772 }
773
774 static struct device_type xsegbd_device_type = {
775         .name           = "xsegbd",
776         .groups         = xsegbd_attr_groups,
777         .release        = xsegbd_sysfs_dev_release,
778 };
779
780 static void xsegbd_root_dev_release(struct device *dev)
781 {
782 }
783
784 static struct device xsegbd_root_dev = {
785         .init_name      = "xsegbd",
786         .release        = xsegbd_root_dev_release,
787 };
788
789 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
790 {
791         int ret = -ENOMEM;
792         struct device *dev;
793
794         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
795         dev = &xsegbd_dev->dev;
796
797         dev->bus = &xsegbd_bus_type;
798         dev->type = &xsegbd_device_type;
799         dev->parent = &xsegbd_root_dev;
800         dev->release = xsegbd_dev_release;
801         dev_set_name(dev, "%d", xsegbd_dev->id);
802
803         ret = device_register(dev);
804
805         mutex_unlock(&xsegbd_mutex);
806         return ret;
807 }
808
809 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
810 {
811         device_unregister(&xsegbd_dev->dev);
812 }
813
814 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
815 {
816         struct xsegbd_device *xsegbd_dev;
817         struct xseg_port *port;
818         ssize_t ret = -ENOMEM;
819
820         if (!try_module_get(THIS_MODULE))
821                 return -ENODEV;
822
823         xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
824         if (!xsegbd_dev)
825                 goto out;
826
827         spin_lock_init(&xsegbd_dev->rqlock);
828         spin_lock_init(&xsegbd_dev->reqdatalock);
829         INIT_LIST_HEAD(&xsegbd_dev->node);
830
831         /* parse cmd */
832         if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
833                         "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
834                         &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
835                 ret = -EINVAL;
836                 goto out_dev;
837         }
838         xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
839
840         spin_lock(&xsegbd_devices_lock);
841         if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
842                 ret = -EINVAL;
843                 goto out_unlock;
844         }
845         xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
846         xsegbd_dev->id = xsegbd_dev->src_portno;
847         spin_unlock(&xsegbd_devices_lock);
848
849         XSEGLOG("registering block device major %d", major);
850         ret = register_blkdev(major, XSEGBD_NAME);
851         if (ret < 0) {
852                 XSEGLOG("cannot register block device!");
853                 ret = -EBUSY;
854                 goto out_delentry;
855         }
856         xsegbd_dev->major = ret;
857         XSEGLOG("registered block device major %d", xsegbd_dev->major);
858
859         ret = xsegbd_bus_add_dev(xsegbd_dev);
860         if (ret)
861                 goto out_blkdev;
862
863         if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, 
864                                 xsegbd_dev->nr_requests,
865                                 xsegbd_dev->nr_requests))
866                 goto out_bus;
867
868         xsegbd_dev->blk_req_pending = kzalloc(
869                         xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
870                                    GFP_KERNEL);
871         if (!xsegbd_dev->blk_req_pending)
872                 goto out_freeq;
873
874         
875         XSEGLOG("joining segment");
876         //FIXME use xsebd module config for now
877         xsegbd_dev->xseg = xseg_join(   xsegbd.config.type,
878                                         xsegbd.config.name,
879                                         "segdev",
880                                         xseg_callback           );
881         if (!xsegbd_dev->xseg)
882                 goto out_freepending;
883         
884
885         XSEGLOG("binding to source port %u (destination %u)",
886                         xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
887         port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno);
888         if (!port) {
889                 XSEGLOG("cannot bind to port");
890                 ret = -EFAULT;
891
892                 goto out_xseg;
893         }
894         //FIXME rollback here
895         BUG_ON(xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port));
896         
897         /* make sure we don't get any requests until we're ready to handle them */
898         xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
899
900         ret = xsegbd_dev_init(xsegbd_dev);
901         if (ret)
902                 goto out_xseg;
903
904         return count;
905
906 out_xseg:
907         xseg_leave(xsegbd_dev->xseg);
908         
909 out_freepending:
910         kfree(xsegbd_dev->blk_req_pending);
911
912 out_freeq:
913         xq_free(&xsegbd_dev->blk_queue_pending);
914
915 out_bus:
916         xsegbd_bus_del_dev(xsegbd_dev);
917
918         return ret;
919
920 out_blkdev:
921         unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
922
923 out_delentry:
924         spin_lock(&xsegbd_devices_lock);
925         xsegbd_devices[xsegbd_dev->src_portno] = NULL;
926
927 out_unlock:
928         spin_unlock(&xsegbd_devices_lock);
929
930 out_dev:
931         kfree(xsegbd_dev);
932
933 out:
934         return ret;
935 }
936
937 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
938 {
939         struct xsegbd_device *xsegbd_dev = NULL;
940         int id, ret;
941         unsigned long ul_id;
942
943         ret = strict_strtoul(buf, 10, &ul_id);
944         if (ret)
945                 return ret;
946
947         id = (int) ul_id;
948         if (id != ul_id)
949                 return -EINVAL;
950
951         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
952
953         ret = count;
954         xsegbd_dev = __xsegbd_get_dev(id);
955         if (!xsegbd_dev) {
956                 ret = -ENOENT;
957                 goto out_unlock;
958         }
959         xsegbd_bus_del_dev(xsegbd_dev);
960
961 out_unlock:
962         mutex_unlock(&xsegbd_mutex);
963         return ret;
964 }
965
966 static struct bus_attribute xsegbd_bus_attrs[] = {
967         __ATTR(add, S_IWUSR, NULL, xsegbd_add),
968         __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
969         __ATTR_NULL
970 };
971
972 static int xsegbd_sysfs_init(void)
973 {
974         int ret;
975
976         ret = device_register(&xsegbd_root_dev);
977         if (ret < 0)
978                 return ret;
979
980         xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
981         ret = bus_register(&xsegbd_bus_type);
982         if (ret < 0)
983                 device_unregister(&xsegbd_root_dev);
984
985         return ret;
986 }
987
988 static void xsegbd_sysfs_cleanup(void)
989 {
990         bus_unregister(&xsegbd_bus_type);
991         device_unregister(&xsegbd_root_dev);
992 }
993
994 /* *************************** */
995 /* ** Module Initialization ** */
996 /* *************************** */
997
998 static int __init xsegbd_init(void)
999 {
1000         int ret = -ENOMEM;
1001         xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1002         if (!xsegbd_devices)
1003                 goto out;
1004
1005         spin_lock_init(&xsegbd_devices_lock);
1006
1007         ret = -ENOSYS;
1008         ret = xsegbd_xseg_init();
1009         if (ret)
1010                 goto out_free;
1011
1012         ret = xsegbd_sysfs_init();
1013         if (ret)
1014                 goto out_xseg;
1015
1016         XSEGLOG("initialization complete");
1017
1018 out:
1019         return ret;
1020
1021 out_xseg:
1022         xsegbd_xseg_quit();
1023         
1024 out_free:
1025         kfree(xsegbd_devices);
1026
1027         goto out;
1028 }
1029
1030 static void __exit xsegbd_exit(void)
1031 {
1032         xsegbd_sysfs_cleanup();
1033         xsegbd_xseg_quit();
1034 }
1035
1036 module_init(xsegbd_init);
1037 module_exit(xsegbd_exit);
1038