fix xsegbd nr of peers problem
[archipelago] / xseg / peers / kernel / xsegbd.c
1 /* xsegbd.c
2  *
3  */
4
5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/fs.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/device.h>
20 #include <linux/completion.h>
21 #include <linux/wait.h>
22 #include <sys/kernel/segdev.h>
23 #include "xsegbd.h"
24 #include <xseg/protocol.h>
25
26 #define XSEGBD_MINORS 1
27 /* define max request size to be used in xsegbd */
28 //FIXME should we make this 4MB instead of 256KB ?
29 #define XSEGBD_MAX_REQUEST_SIZE 262144U
30
31 MODULE_DESCRIPTION("xsegbd");
32 MODULE_AUTHOR("XSEG");
33 MODULE_LICENSE("GPL");
34
35 static long sector_size = 0;
36 static long blksize = 512;
37 static int major = 0;
38 static int max_dev = 1024;
39 static char name[XSEGBD_SEGMENT_NAMELEN] = "xsegbd";
40 static char spec[256] = "segdev:xsegbd:4:1024:12";
41
42 module_param(sector_size, long, 0644);
43 module_param(blksize, long, 0644);
44 module_param(max_dev, int, 0644);
45 module_param(major, int, 0644);
46 module_param_string(name, name, sizeof(name), 0644);
47 module_param_string(spec, spec, sizeof(spec), 0644);
48
49 static struct xsegbd xsegbd;
50 static struct xsegbd_device **xsegbd_devices; /* indexed by portno */
51 static DEFINE_MUTEX(xsegbd_mutex);
52 static DEFINE_SPINLOCK(xsegbd_devices_lock);
53
54
55 void __xsegbd_get(struct xsegbd_device *xsegbd_dev)
56 {
57         atomic_inc(&xsegbd_dev->usercount);
58 }
59
60 void __xsegbd_put(struct xsegbd_device *xsegbd_dev)
61 {
62         if (atomic_dec_and_test(&xsegbd_dev->usercount))
63                 wake_up(&xsegbd_dev->wq);
64 }
65
66 struct xsegbd_device *__xsegbd_get_dev(unsigned long id)
67 {
68         struct xsegbd_device *xsegbd_dev = NULL;
69
70         spin_lock(&xsegbd_devices_lock);
71         xsegbd_dev = xsegbd_devices[id];
72         if (xsegbd_dev)
73                 __xsegbd_get(xsegbd_dev);
74         spin_unlock(&xsegbd_devices_lock);
75
76         return xsegbd_dev;
77 }
78
79 /* ************************* */
80 /* ***** sysfs helpers ***** */
81 /* ************************* */
82
83 static struct xsegbd_device *dev_to_xsegbd(struct device *dev)
84 {
85         return container_of(dev, struct xsegbd_device, dev);
86 }
87
88 static struct device *xsegbd_get_dev(struct xsegbd_device *xsegbd_dev)
89 {
90         /* FIXME */
91         return get_device(&xsegbd_dev->dev);
92 }
93
94 static void xsegbd_put_dev(struct xsegbd_device *xsegbd_dev)
95 {
96         put_device(&xsegbd_dev->dev);
97 }
98
99 /* ************************* */
100 /* ** XSEG Initialization ** */
101 /* ************************* */
102
103 static void xseg_callback(uint32_t portno);
104
105 int xsegbd_xseg_init(void)
106 {
107         int r;
108
109         if (!xsegbd.name[0])
110                 strncpy(xsegbd.name, name, XSEGBD_SEGMENT_NAMELEN);
111
112         r = xseg_initialize();
113         if (r) {
114                 XSEGLOG("cannot initialize 'segdev' peer");
115                 goto err;
116         }
117
118         r = xseg_parse_spec(spec, &xsegbd.config);
119         if (r)
120                 goto err;
121
122         if (strncmp(xsegbd.config.type, "segdev", 16))
123                 XSEGLOG("WARNING: unexpected segment type '%s' vs 'segdev'",
124                          xsegbd.config.type);
125
126         /* leave it here for now */
127         XSEGLOG("joining segment");
128         xsegbd.xseg = xseg_join(        xsegbd.config.type,
129                                         xsegbd.config.name,
130                                         "segdev",
131                                         xseg_callback           );
132         if (!xsegbd.xseg) {
133                 XSEGLOG("cannot find segment");
134                 r = -ENODEV;
135                 goto err;
136         }
137
138         return 0;
139 err:
140         return r;
141
142 }
143
144 int xsegbd_xseg_quit(void)
145 {
146         struct segdev *segdev;
147
148         /* make sure to unmap the segment first */
149         segdev = segdev_get(0);
150         clear_bit(SEGDEV_RESERVED, &segdev->flags);
151         xsegbd.xseg->priv->segment_type.ops.unmap(xsegbd.xseg, xsegbd.xseg->segment_size);
152         segdev_put(segdev);
153
154         return 0;
155 }
156
157
158 /* ***************************** */
159 /* ** Block Device Operations ** */
160 /* ***************************** */
161
162 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
163 {
164         struct gendisk *disk = bdev->bd_disk;
165         struct xsegbd_device *xsegbd_dev = disk->private_data;
166
167         xsegbd_get_dev(xsegbd_dev);
168
169         return 0;
170 }
171
172 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
173 {
174         struct xsegbd_device *xsegbd_dev = gd->private_data;
175
176         xsegbd_put_dev(xsegbd_dev);
177
178         return 0;
179 }
180
181 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
182                         unsigned int cmd, unsigned long arg)
183 {
184         return -ENOTTY;
185 }
186
187 static const struct block_device_operations xsegbd_ops = {
188         .owner          = THIS_MODULE,
189         .open           = xsegbd_open,
190         .release        = xsegbd_release,
191         .ioctl          = xsegbd_ioctl 
192 };
193
194
195 /* *************************** */
196 /* ** Device Initialization ** */
197 /* *************************** */
198
199 static void xseg_request_fn(struct request_queue *rq);
200 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev);
201 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev);
202
203 static int xsegbd_dev_init(struct xsegbd_device *xsegbd_dev)
204 {
205         int ret = -ENOMEM;
206         struct gendisk *disk;
207         unsigned int max_request_size_bytes;
208
209         spin_lock_init(&xsegbd_dev->rqlock);
210
211         xsegbd_dev->xsegbd = &xsegbd;
212
213         xsegbd_dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
214         if (!xsegbd_dev->blk_queue)
215                 goto out;
216
217         if (!blk_init_allocated_queue(xsegbd_dev->blk_queue, 
218                         xseg_request_fn, &xsegbd_dev->rqlock))
219                 goto outqueue;
220
221         xsegbd_dev->blk_queue->queuedata = xsegbd_dev;
222
223         blk_queue_flush(xsegbd_dev->blk_queue, REQ_FLUSH | REQ_FUA);
224         blk_queue_logical_block_size(xsegbd_dev->blk_queue, 512);
225         blk_queue_physical_block_size(xsegbd_dev->blk_queue, blksize);
226         blk_queue_bounce_limit(xsegbd_dev->blk_queue, BLK_BOUNCE_ANY);
227         
228         //blk_queue_max_segments(dev->blk_queue, 512);
229
230         max_request_size_bytes = XSEGBD_MAX_REQUEST_SIZE;
231         blk_queue_max_hw_sectors(xsegbd_dev->blk_queue, max_request_size_bytes >> 9);
232         blk_queue_max_segment_size(xsegbd_dev->blk_queue, max_request_size_bytes);
233         blk_queue_io_min(xsegbd_dev->blk_queue, max_request_size_bytes);
234         blk_queue_io_opt(xsegbd_dev->blk_queue, max_request_size_bytes);
235
236         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xsegbd_dev->blk_queue);
237
238         /* vkoukis says we don't need partitions */
239         xsegbd_dev->gd = disk = alloc_disk(1);
240         if (!disk)
241                 goto outqueue;
242
243         disk->major = xsegbd_dev->major;
244         disk->first_minor = 0; // id * XSEGBD_MINORS;
245         disk->fops = &xsegbd_ops;
246         disk->queue = xsegbd_dev->blk_queue;
247         disk->private_data = xsegbd_dev;
248         disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
249         snprintf(disk->disk_name, 32, "xsegbd%u", xsegbd_dev->id);
250
251         ret = 0;
252         
253         /* allow a non-zero sector_size parameter to override the disk size */
254         if (sector_size)
255                 xsegbd_dev->sectors = sector_size;
256         else {
257                 ret = xsegbd_get_size(xsegbd_dev);
258                 if (ret)
259                         goto outdisk;
260         }
261
262         set_capacity(disk, xsegbd_dev->sectors);
263         XSEGLOG("xsegbd active...");
264         add_disk(disk); /* immediately activates the device */
265
266         return 0;
267
268
269 outdisk:
270         put_disk(xsegbd_dev->gd);
271 outqueue:
272         blk_cleanup_queue(xsegbd_dev->blk_queue);
273 out:
274         xsegbd_dev->blk_queue = NULL;
275         xsegbd_dev->gd = NULL;
276         return ret;
277 }
278
279 static void xsegbd_dev_release(struct device *dev)
280 {
281         int ret;
282         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
283
284
285         /* cleanup gendisk and blk_queue the right way */
286         if (xsegbd_dev->gd) {
287                 if (xsegbd_dev->gd->flags & GENHD_FL_UP)
288                         del_gendisk(xsegbd_dev->gd);
289
290                 put_disk(xsegbd_dev->gd);
291                 xsegbd_mapclose(xsegbd_dev);
292         }
293         
294         spin_lock(&xsegbd_devices_lock);
295         BUG_ON(xsegbd_devices[xsegbd_dev->src_portno] != xsegbd_dev);
296         xsegbd_devices[xsegbd_dev->src_portno] = NULL;
297         spin_unlock(&xsegbd_devices_lock);
298         
299 //      xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
300         xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
301         /* wait for all pending operations on device to end */
302         wait_event(xsegbd_dev->wq, atomic_read(&xsegbd_dev->usercount) <= 0);
303         XSEGLOG("releasing id: %d", xsegbd_dev->id);
304         if (xsegbd_dev->blk_queue)
305                 blk_cleanup_queue(xsegbd_dev->blk_queue);
306
307
308 //      if (xseg_free_requests(xsegbd_dev->xseg, 
309 //                      xsegbd_dev->src_portno, xsegbd_dev->nr_requests) < 0)
310 //              XSEGLOG("Error trying to free requests!\n");
311
312
313         //FIXME xseg_leave to free_up resources ?
314         unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
315
316         if (xsegbd_dev->blk_req_pending)
317                 kfree(xsegbd_dev->blk_req_pending);
318         xq_free(&xsegbd_dev->blk_queue_pending);
319
320         kfree(xsegbd_dev);
321
322         module_put(THIS_MODULE);
323 }
324
325 /* ******************* */
326 /* ** Critical Path ** */
327 /* ******************* */
328
329 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
330                         struct request *blkreq)
331 {
332         struct bio_vec *bvec;
333         struct req_iterator iter;
334         uint64_t off = 0;
335         char *data = xseg_get_data(xseg, xreq);
336         rq_for_each_segment(bvec, blkreq, iter) {
337                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
338                 memcpy(data + off, bdata, bvec->bv_len);
339                 off += bvec->bv_len;
340                 kunmap_atomic(bdata);
341         }
342 }
343
344 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
345                         struct request *blkreq)
346 {
347         struct bio_vec *bvec;
348         struct req_iterator iter;
349         uint64_t off = 0;
350         char *data = xseg_get_data(xseg, xreq);
351         rq_for_each_segment(bvec, blkreq, iter) {
352                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
353                 memcpy(bdata, data + off, bvec->bv_len);
354                 off += bvec->bv_len;
355                 kunmap_atomic(bdata);
356         }
357 }
358
359 static void xseg_request_fn(struct request_queue *rq)
360 {
361         struct xseg_request *xreq;
362         struct xsegbd_device *xsegbd_dev = rq->queuedata;
363         struct request *blkreq;
364         struct xsegbd_pending *pending;
365         xqindex blkreq_idx;
366         char *target;
367         uint64_t datalen;
368         xport p;
369         int r;
370         unsigned long flags;
371
372         __xsegbd_get(xsegbd_dev);
373
374         spin_unlock_irq(&xsegbd_dev->rqlock);
375         for (;;) {
376                 if (current_thread_info()->preempt_count || irqs_disabled()){
377                         XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
378                                         current_thread_info()->preempt_count, irqs_disabled());
379                 }
380                 //XSEGLOG("Priority: %d", current_thread_info()->task->prio);
381                 //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio);
382                 //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio);
383                 //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority);
384                 blkreq_idx = Noneidx;
385                 xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno, 
386                                 xsegbd_dev->dst_portno, X_ALLOC);
387                 if (!xreq)
388                         break;
389
390                 blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 
391                                                 xsegbd_dev->src_portno);
392                 if (blkreq_idx == Noneidx)
393                         break;
394                 
395                 if (blkreq_idx >= xsegbd_dev->nr_requests) {
396                         XSEGLOG("blkreq_idx >= xsegbd_dev->nr_requests");
397                         BUG_ON(1);
398                         break;
399                 }
400
401                 
402                 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
403                 blkreq = blk_fetch_request(rq);
404                 if (!blkreq){
405                         spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
406                         break;
407                 }
408
409                 if (blkreq->cmd_type != REQ_TYPE_FS) {
410                         //we lose xreq here
411                         XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
412                         __blk_end_request_all(blkreq, 0);
413                         spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
414                         continue;
415                 }
416                 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
417                 if (current_thread_info()->preempt_count || irqs_disabled()){
418                         XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
419                                         current_thread_info()->preempt_count, irqs_disabled());
420                 }
421
422                 datalen = blk_rq_bytes(blkreq);
423                 r = xseg_prep_request(xsegbd_dev->xseg, xreq, 
424                                         xsegbd_dev->targetlen, datalen);
425                 if (r < 0) {
426                         XSEGLOG("couldn't prep request");
427                         blk_end_request_err(blkreq, r);
428                         BUG_ON(1);
429                         break;
430                 }
431                 r = -ENOMEM;
432                 if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
433                         XSEGLOG("malformed req buffers");
434                         blk_end_request_err(blkreq, r);
435                         BUG_ON(1);
436                         break;
437                 }
438
439                 target = xseg_get_target(xsegbd_dev->xseg, xreq);
440                 strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
441
442                 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
443                 pending->dev = xsegbd_dev;
444                 pending->request = blkreq;
445                 pending->comp = NULL;
446                 
447                 xreq->size = datalen;
448                 xreq->offset = blk_rq_pos(blkreq) << 9;
449                 xreq->priv = (uint64_t) blkreq_idx;
450
451                 /*
452                 if (xreq->offset >= (sector_size << 9))
453                         XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
454                                  blk_rq_pos(blkreq), sector_size,
455                                  blkreq->cmd_flags & REQ_FLUSH,
456                                  blkreq->cmd_flags & REQ_FUA);
457                 */
458
459                 if (blkreq->cmd_flags & REQ_FLUSH)
460                         xreq->flags |= XF_FLUSH;
461
462                 if (blkreq->cmd_flags & REQ_FUA)
463                         xreq->flags |= XF_FUA;
464
465                 if (rq_data_dir(blkreq)) {
466                         /* unlock for data transfers? */
467                         blk_to_xseg(xsegbd_dev->xseg, xreq, blkreq);
468                         xreq->op = X_WRITE;
469                 } else {
470                         xreq->op = X_READ;
471                 }
472
473
474                 r = -EIO;
475                 /* xsegbd_get here. will be put on receive */
476                 __xsegbd_get(xsegbd_dev);
477                 p = xseg_submit(xsegbd_dev->xseg, xreq, 
478                                         xsegbd_dev->src_portno, X_ALLOC);
479                 if (p == NoPort) {
480                         XSEGLOG("coundn't submit req");
481                         WARN_ON(1);
482                         blk_end_request_err(blkreq, r);
483                         __xsegbd_put(xsegbd_dev);
484                         break;
485                 }
486                 WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
487         }
488         if (xreq)
489                 BUG_ON(xseg_put_request(xsegbd_dev->xsegbd->xseg, xreq, 
490                                         xsegbd_dev->src_portno) == -1);
491         if (blkreq_idx != Noneidx)
492                 BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending, 
493                                 blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
494         spin_lock_irq(&xsegbd_dev->rqlock);
495         __xsegbd_put(xsegbd_dev);
496 }
497
498 int update_dev_sectors_from_request(    struct xsegbd_device *xsegbd_dev,
499                                         struct xseg_request *xreq       )
500 {
501         void *data;
502         if (!xreq) {
503                 XSEGLOG("Invalid xreq");
504                 return -EIO;
505         }
506
507         if (xreq->state & XS_FAILED)
508                 return -ENOENT;
509
510         if (!(xreq->state & XS_SERVED))
511                 return -EIO;
512
513         data = xseg_get_data(xsegbd_dev->xseg, xreq);
514         if (!data) {
515                 XSEGLOG("Invalid req data");
516                 return -EIO;
517         }
518         if (!xsegbd_dev) {
519                 XSEGLOG("Invalid xsegbd_dev");
520                 return -ENOENT;
521         }
522         xsegbd_dev->sectors = *((uint64_t *) data) / 512ULL;
523         return 0;
524 }
525
526 static int xsegbd_get_size(struct xsegbd_device *xsegbd_dev)
527 {
528         struct xseg_request *xreq;
529         char *target;
530         uint64_t datalen;
531         xqindex blkreq_idx;
532         struct xsegbd_pending *pending;
533         struct completion comp;
534         xport p;
535         void *data;
536         int ret = -EBUSY, r;
537
538         __xsegbd_get(xsegbd_dev);
539
540         xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
541                         xsegbd_dev->dst_portno, X_ALLOC);
542         if (!xreq)
543                 goto out;
544
545         BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 
546                                 sizeof(struct xseg_reply_info)));
547
548         init_completion(&comp);
549         blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
550         if (blkreq_idx == Noneidx)
551                 goto out_put;
552         
553         pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
554         pending->dev = xsegbd_dev;
555         pending->request = NULL;
556         pending->comp = &comp;
557
558         
559         xreq->priv = (uint64_t) blkreq_idx;
560
561         target = xseg_get_target(xsegbd_dev->xseg, xreq);
562         strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
563         xreq->size = xreq->datalen;
564         xreq->offset = 0;
565         xreq->op = X_INFO;
566
567         xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
568         p = xseg_submit(xsegbd_dev->xseg, xreq, 
569                                 xsegbd_dev->src_portno, X_ALLOC);
570         if ( p == NoPort) {
571                 XSEGLOG("couldn't submit request");
572                 BUG_ON(1);
573                 goto out_queue;
574         }
575         WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
576         XSEGLOG("Before wait for completion, comp %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
577         wait_for_completion_interruptible(&comp);
578         XSEGLOG("Woken up after wait_for_completion_interruptible(), comp: %lx [%llu]", (unsigned long) pending->comp, (unsigned long long) blkreq_idx);
579         ret = update_dev_sectors_from_request(xsegbd_dev, xreq);
580         //XSEGLOG("get_size: sectors = %ld\n", (long)xsegbd_dev->sectors);
581 out_put:
582         BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
583 out:
584         __xsegbd_put(xsegbd_dev);
585         return ret;
586
587 out_queue:
588         pending->dev = NULL;
589         pending->comp = NULL;
590         xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
591         
592         goto out;
593 }
594
595 static int xsegbd_mapclose(struct xsegbd_device *xsegbd_dev)
596 {
597         struct xseg_request *xreq;
598         char *target;
599         uint64_t datalen;
600         xqindex blkreq_idx;
601         struct xsegbd_pending *pending;
602         struct completion comp;
603         xport p;
604         void *data;
605         int ret = -EBUSY, r;
606
607         __xsegbd_get(xsegbd_dev);
608         xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
609                         xsegbd_dev->dst_portno, X_ALLOC);
610         if (!xreq)
611                 goto out;
612
613         BUG_ON(xseg_prep_request(xsegbd_dev->xseg, xreq, xsegbd_dev->targetlen, 0));
614
615         init_completion(&comp);
616         blkreq_idx = xq_pop_head(&xsegbd_dev->blk_queue_pending, 1);
617         if (blkreq_idx == Noneidx)
618                 goto out_put;
619         
620         pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
621         pending->dev = xsegbd_dev;
622         pending->request = NULL;
623         pending->comp = &comp;
624
625         
626         xreq->priv = (uint64_t) blkreq_idx;
627
628         target = xseg_get_target(xsegbd_dev->xseg, xreq);
629         strncpy(target, xsegbd_dev->target, xsegbd_dev->targetlen);
630         xreq->size = xreq->datalen;
631         xreq->offset = 0;
632         xreq->op = X_CLOSE;
633
634         xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
635         p = xseg_submit(xsegbd_dev->xseg, xreq, 
636                                 xsegbd_dev->src_portno, X_ALLOC);
637         if ( p == NoPort) {
638                 XSEGLOG("couldn't submit request");
639                 BUG_ON(1);
640                 goto out_queue;
641         }
642         WARN_ON(xseg_signal(xsegbd_dev->xseg, p) < 0);
643         wait_for_completion_interruptible(&comp);
644         ret = 0;
645         if (xreq->state & XS_FAILED)
646                 XSEGLOG("Couldn't close disk on mapper");
647 out_put:
648         BUG_ON(xseg_put_request(xsegbd_dev->xseg, xreq, xsegbd_dev->src_portno) == -1);
649 out:
650         __xsegbd_put(xsegbd_dev);
651         return ret;
652
653 out_queue:
654         pending->dev = NULL;
655         pending->comp = NULL;
656         xq_append_head(&xsegbd_dev->blk_queue_pending, blkreq_idx, 1);
657         
658         goto out;
659 }
660
661 static void xseg_callback(xport portno)
662 {
663         struct xsegbd_device *xsegbd_dev;
664         struct xseg_request *xreq;
665         struct request *blkreq;
666         struct xsegbd_pending *pending;
667         unsigned long flags;
668         xqindex blkreq_idx, ridx;
669         int err;
670         void *data;
671
672         xsegbd_dev  = __xsegbd_get_dev(portno);
673         if (!xsegbd_dev) {
674                 XSEGLOG("portno: %u has no xsegbd device assigned", portno);
675                 WARN_ON(1);
676                 return;
677         }
678
679         for (;;) {
680                 xseg_prepare_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
681                 xreq = xseg_receive(xsegbd_dev->xseg, portno, 0);
682                 if (!xreq)
683                         break;
684
685 //              xseg_cancel_wait(xsegbd_dev->xseg, xsegbd_dev->src_portno);
686
687                 blkreq_idx = (xqindex) xreq->priv;
688                 if (blkreq_idx >= xsegbd_dev->nr_requests) {
689                         WARN_ON(1);
690                         //FIXME maybe put request?
691                         continue;
692                 }
693
694                 pending = &xsegbd_dev->blk_req_pending[blkreq_idx];
695                 if (pending->comp) {
696                         /* someone is blocking on this request
697                            and will handle it when we wake them up. */
698                         complete(pending->comp);
699                         /* the request is blocker's responsibility so
700                            we will not put_request(); */
701
702                         continue;
703                 }
704
705                 /* this is now treated as a block I/O request to end */
706                 blkreq = pending->request;
707                 pending->request = NULL;
708                 if (xsegbd_dev != pending->dev) {
709                         //FIXME maybe put request?
710                         XSEGLOG("xsegbd_dev != pending->dev");
711                         BUG_ON(1);
712                         continue;
713                 }
714                 pending->dev = NULL;
715                 if (!blkreq){
716                         //FIXME maybe put request?
717                         XSEGLOG("blkreq does not exist");
718                         BUG_ON(1);
719                         continue;
720                 }
721
722                 err = -EIO;
723                 if (!(xreq->state & XS_SERVED))
724                         goto blk_end;
725
726                 if (xreq->serviced != blk_rq_bytes(blkreq))
727                         goto blk_end;
728
729                 err = 0;
730                 if (!rq_data_dir(blkreq)){
731                         xseg_to_blk(xsegbd_dev->xseg, xreq, blkreq);
732                 }       
733 blk_end:
734                 blk_end_request_all(blkreq, err);
735                 
736                 ridx = xq_append_head(&xsegbd_dev->blk_queue_pending, 
737                                         blkreq_idx, xsegbd_dev->src_portno);
738                 if (ridx == Noneidx) {
739                         XSEGLOG("couldnt append blkreq_idx");
740                         WARN_ON(1);
741                 }
742
743                 if (xseg_put_request(xsegbd_dev->xseg, xreq, 
744                                                 xsegbd_dev->src_portno) < 0){
745                         XSEGLOG("couldn't put req");
746                         BUG_ON(1);
747                 }
748                 __xsegbd_put(xsegbd_dev);
749         }
750         if (xsegbd_dev) {
751                 spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
752                 xseg_request_fn(xsegbd_dev->blk_queue);
753                 spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
754                 __xsegbd_put(xsegbd_dev);
755         }
756 }
757
758
759 /* sysfs interface */
760
761 static struct bus_type xsegbd_bus_type = {
762         .name   = "xsegbd",
763 };
764
765 static ssize_t xsegbd_size_show(struct device *dev,
766                                         struct device_attribute *attr, char *buf)
767 {
768         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
769
770         return sprintf(buf, "%llu\n", (unsigned long long) xsegbd_dev->sectors * 512ULL);
771 }
772
773 static ssize_t xsegbd_major_show(struct device *dev,
774                                         struct device_attribute *attr, char *buf)
775 {
776         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
777
778         return sprintf(buf, "%d\n", xsegbd_dev->major);
779 }
780
781 static ssize_t xsegbd_srcport_show(struct device *dev,
782                                         struct device_attribute *attr, char *buf)
783 {
784         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
785
786         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->src_portno);
787 }
788
789 static ssize_t xsegbd_dstport_show(struct device *dev,
790                                         struct device_attribute *attr, char *buf)
791 {
792         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
793
794         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->dst_portno);
795 }
796
797 static ssize_t xsegbd_id_show(struct device *dev,
798                                         struct device_attribute *attr, char *buf)
799 {
800         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
801
802         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->id);
803 }
804
805 static ssize_t xsegbd_reqs_show(struct device *dev,
806                                         struct device_attribute *attr, char *buf)
807 {
808         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
809
810         return sprintf(buf, "%u\n", (unsigned) xsegbd_dev->nr_requests);
811 }
812
813 static ssize_t xsegbd_target_show(struct device *dev,
814                                         struct device_attribute *attr, char *buf)
815 {
816         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
817
818         return sprintf(buf, "%s\n", xsegbd_dev->target);
819 }
820
821 static ssize_t xsegbd_image_refresh(struct device *dev,
822                                         struct device_attribute *attr,
823                                         const char *buf,
824                                         size_t size)
825 {
826         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
827         int rc, ret = size;
828
829         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
830
831         rc = xsegbd_get_size(xsegbd_dev);
832         if (rc < 0) {
833                 ret = rc;
834                 goto out;
835         }
836
837         set_capacity(xsegbd_dev->gd, xsegbd_dev->sectors);
838
839 out:
840         mutex_unlock(&xsegbd_mutex);
841         return ret;
842 }
843
844 //FIXME
845 static ssize_t xsegbd_cleanup(struct device *dev,
846                                         struct device_attribute *attr,
847                                         const char *buf,
848                                         size_t size)
849 {
850         struct xsegbd_device *xsegbd_dev = dev_to_xsegbd(dev);
851         int ret = size, i;
852         struct request *blkreq = NULL;
853         struct xsegbd_pending *pending = NULL;
854         struct completion *comp = NULL;
855
856         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
857         xlock_acquire(&xsegbd_dev->blk_queue_pending.lock, 
858                                 xsegbd_dev->src_portno);
859         for (i = 0; i < xsegbd_dev->nr_requests; i++) {
860                 if (!__xq_check(&xsegbd_dev->blk_queue_pending, i)) {
861                         pending = &xsegbd_dev->blk_req_pending[i];
862                         blkreq = pending->request;
863                         pending->request = NULL;
864                         comp = pending->comp;
865                         pending->comp = NULL;
866                         if (blkreq){
867                                 XSEGLOG("Cleaning up blkreq %lx [%d]", (unsigned long) blkreq, i);
868                                 blk_end_request_all(blkreq, -EIO);
869                         }
870                         if (comp){
871                                 XSEGLOG("Cleaning up comp %lx [%d]", (unsigned long) comp, i);
872                                 complete(comp);
873                         }
874                         __xq_append_tail(&xsegbd_dev->blk_queue_pending, i);
875                 }
876         }
877         xlock_release(&xsegbd_dev->blk_queue_pending.lock);
878
879         mutex_unlock(&xsegbd_mutex);
880         return ret;
881 }
882
883 static DEVICE_ATTR(size, S_IRUGO, xsegbd_size_show, NULL);
884 static DEVICE_ATTR(major, S_IRUGO, xsegbd_major_show, NULL);
885 static DEVICE_ATTR(srcport, S_IRUGO, xsegbd_srcport_show, NULL);
886 static DEVICE_ATTR(dstport, S_IRUGO, xsegbd_dstport_show, NULL);
887 static DEVICE_ATTR(id , S_IRUGO, xsegbd_id_show, NULL);
888 static DEVICE_ATTR(reqs , S_IRUGO, xsegbd_reqs_show, NULL);
889 static DEVICE_ATTR(target, S_IRUGO, xsegbd_target_show, NULL);
890 static DEVICE_ATTR(refresh , S_IWUSR, NULL, xsegbd_image_refresh);
891 static DEVICE_ATTR(cleanup , S_IWUSR, NULL, xsegbd_cleanup);
892
893 static struct attribute *xsegbd_attrs[] = {
894         &dev_attr_size.attr,
895         &dev_attr_major.attr,
896         &dev_attr_srcport.attr,
897         &dev_attr_dstport.attr,
898         &dev_attr_id.attr,
899         &dev_attr_reqs.attr,
900         &dev_attr_target.attr,
901         &dev_attr_refresh.attr,
902         &dev_attr_cleanup.attr,
903         NULL
904 };
905
906 static struct attribute_group xsegbd_attr_group = {
907         .attrs = xsegbd_attrs,
908 };
909
910 static const struct attribute_group *xsegbd_attr_groups[] = {
911         &xsegbd_attr_group,
912         NULL
913 };
914
915 static void xsegbd_sysfs_dev_release(struct device *dev)
916 {
917 }
918
919 static struct device_type xsegbd_device_type = {
920         .name           = "xsegbd",
921         .groups         = xsegbd_attr_groups,
922         .release        = xsegbd_sysfs_dev_release,
923 };
924
925 static void xsegbd_root_dev_release(struct device *dev)
926 {
927 }
928
929 static struct device xsegbd_root_dev = {
930         .init_name      = "xsegbd",
931         .release        = xsegbd_root_dev_release,
932 };
933
934 static int xsegbd_bus_add_dev(struct xsegbd_device *xsegbd_dev)
935 {
936         int ret = -ENOMEM;
937         struct device *dev;
938
939         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
940         dev = &xsegbd_dev->dev;
941
942         dev->bus = &xsegbd_bus_type;
943         dev->type = &xsegbd_device_type;
944         dev->parent = &xsegbd_root_dev;
945         dev->release = xsegbd_dev_release;
946         dev_set_name(dev, "%d", xsegbd_dev->id);
947
948         ret = device_register(dev);
949
950         mutex_unlock(&xsegbd_mutex);
951         return ret;
952 }
953
954 static void xsegbd_bus_del_dev(struct xsegbd_device *xsegbd_dev)
955 {
956         device_unregister(&xsegbd_dev->dev);
957 }
958
959 static ssize_t xsegbd_add(struct bus_type *bus, const char *buf, size_t count)
960 {
961         struct xsegbd_device *xsegbd_dev;
962         struct xseg_port *port;
963         ssize_t ret = -ENOMEM;
964
965         if (!try_module_get(THIS_MODULE))
966                 return -ENODEV;
967
968         xsegbd_dev = kzalloc(sizeof(*xsegbd_dev), GFP_KERNEL);
969         if (!xsegbd_dev)
970                 goto out;
971
972         spin_lock_init(&xsegbd_dev->rqlock);
973         INIT_LIST_HEAD(&xsegbd_dev->node);
974         init_waitqueue_head(&xsegbd_dev->wq);
975         atomic_set(&xsegbd_dev->usercount, 0);
976
977         /* parse cmd */
978         if (sscanf(buf, "%" __stringify(XSEGBD_TARGET_NAMELEN) "s "
979                         "%d:%d:%d", xsegbd_dev->target, &xsegbd_dev->src_portno,
980                         &xsegbd_dev->dst_portno, &xsegbd_dev->nr_requests) < 3) {
981                 ret = -EINVAL;
982                 goto out_dev;
983         }
984         xsegbd_dev->targetlen = strlen(xsegbd_dev->target);
985
986         spin_lock(&xsegbd_devices_lock);
987         if (xsegbd_devices[xsegbd_dev->src_portno] != NULL) {
988                 ret = -EINVAL;
989                 goto out_unlock;
990         }
991         xsegbd_devices[xsegbd_dev->src_portno] = xsegbd_dev;
992         xsegbd_dev->id = xsegbd_dev->src_portno;
993         spin_unlock(&xsegbd_devices_lock);
994
995         XSEGLOG("registering block device major %d", major);
996         ret = register_blkdev(major, XSEGBD_NAME);
997         if (ret < 0) {
998                 XSEGLOG("cannot register block device!");
999                 ret = -EBUSY;
1000                 goto out_delentry;
1001         }
1002         xsegbd_dev->major = ret;
1003         XSEGLOG("registered block device major %d", xsegbd_dev->major);
1004
1005         ret = xsegbd_bus_add_dev(xsegbd_dev);
1006         if (ret)
1007                 goto out_blkdev;
1008
1009         if (!xq_alloc_seq(&xsegbd_dev->blk_queue_pending, 
1010                                 xsegbd_dev->nr_requests,
1011                                 xsegbd_dev->nr_requests))
1012                 goto out_bus;
1013
1014         xsegbd_dev->blk_req_pending = kzalloc(
1015                         xsegbd_dev->nr_requests *sizeof(struct xsegbd_pending),
1016                                    GFP_KERNEL);
1017         if (!xsegbd_dev->blk_req_pending)
1018                 goto out_freeq;
1019
1020         
1021         XSEGLOG("joining segment");
1022         //FIXME use xsebd module config for now
1023         xsegbd_dev->xseg = xseg_join(   xsegbd.config.type,
1024                                         xsegbd.config.name,
1025                                         "segdev",
1026                                         xseg_callback           );
1027         if (!xsegbd_dev->xseg)
1028                 goto out_freepending;
1029         __sync_synchronize();
1030         
1031         XSEGLOG("%s binding to source port %u (destination %u)", xsegbd_dev->target,
1032                         xsegbd_dev->src_portno, xsegbd_dev->dst_portno);
1033         port = xseg_bind_port(xsegbd_dev->xseg, xsegbd_dev->src_portno, NULL);
1034         if (!port) {
1035                 XSEGLOG("cannot bind to port");
1036                 ret = -EFAULT;
1037
1038                 goto out_xseg;
1039         }
1040         
1041         if (xsegbd_dev->src_portno != xseg_portno(xsegbd_dev->xseg, port)) {
1042                 XSEGLOG("portno != xsegbd_dev->src_portno");
1043                 BUG_ON(1);
1044                 ret = -EFAULT;
1045                 goto out_xseg;
1046         }
1047         xseg_init_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
1048
1049
1050         /* make sure we don't get any requests until we're ready to handle them */
1051         xseg_cancel_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1052
1053         ret = xsegbd_dev_init(xsegbd_dev);
1054         if (ret)
1055                 goto out_signal;
1056
1057         xseg_prepare_wait(xsegbd_dev->xseg, xseg_portno(xsegbd_dev->xseg, port));
1058         return count;
1059
1060 out_signal:
1061         xseg_quit_local_signal(xsegbd_dev->xseg, xsegbd_dev->src_portno);
1062 out_xseg:
1063         xseg_leave(xsegbd_dev->xseg);
1064         
1065 out_freepending:
1066         kfree(xsegbd_dev->blk_req_pending);
1067
1068 out_freeq:
1069         xq_free(&xsegbd_dev->blk_queue_pending);
1070
1071 out_bus:
1072         xsegbd_bus_del_dev(xsegbd_dev);
1073         return ret;
1074
1075 out_blkdev:
1076         unregister_blkdev(xsegbd_dev->major, XSEGBD_NAME);
1077
1078 out_delentry:
1079         spin_lock(&xsegbd_devices_lock);
1080         xsegbd_devices[xsegbd_dev->src_portno] = NULL;
1081
1082 out_unlock:
1083         spin_unlock(&xsegbd_devices_lock);
1084
1085 out_dev:
1086         kfree(xsegbd_dev);
1087
1088 out:
1089         return ret;
1090 }
1091
1092 static ssize_t xsegbd_remove(struct bus_type *bus, const char *buf, size_t count)
1093 {
1094         struct xsegbd_device *xsegbd_dev = NULL;
1095         int id, ret;
1096         unsigned long ul_id;
1097
1098         ret = strict_strtoul(buf, 10, &ul_id);
1099         if (ret)
1100                 return ret;
1101
1102         id = (int) ul_id;
1103         if (id != ul_id)
1104                 return -EINVAL;
1105
1106         mutex_lock_nested(&xsegbd_mutex, SINGLE_DEPTH_NESTING);
1107
1108         ret = count;
1109         xsegbd_dev = __xsegbd_get_dev(id);
1110         if (!xsegbd_dev) {
1111                 ret = -ENOENT;
1112                 goto out_unlock;
1113         }
1114         __xsegbd_put(xsegbd_dev);
1115         xsegbd_bus_del_dev(xsegbd_dev);
1116
1117 out_unlock:
1118         mutex_unlock(&xsegbd_mutex);
1119         return ret;
1120 }
1121
1122 static struct bus_attribute xsegbd_bus_attrs[] = {
1123         __ATTR(add, S_IWUSR, NULL, xsegbd_add),
1124         __ATTR(remove, S_IWUSR, NULL, xsegbd_remove),
1125         __ATTR_NULL
1126 };
1127
1128 static int xsegbd_sysfs_init(void)
1129 {
1130         int ret;
1131
1132         ret = device_register(&xsegbd_root_dev);
1133         if (ret < 0)
1134                 return ret;
1135
1136         xsegbd_bus_type.bus_attrs = xsegbd_bus_attrs;
1137         ret = bus_register(&xsegbd_bus_type);
1138         if (ret < 0)
1139                 device_unregister(&xsegbd_root_dev);
1140
1141         return ret;
1142 }
1143
1144 static void xsegbd_sysfs_cleanup(void)
1145 {
1146         bus_unregister(&xsegbd_bus_type);
1147         device_unregister(&xsegbd_root_dev);
1148 }
1149
1150 /* *************************** */
1151 /* ** Module Initialization ** */
1152 /* *************************** */
1153
1154 static int __init xsegbd_init(void)
1155 {
1156         int ret = -ENOMEM;
1157         xsegbd_devices = kzalloc(max_dev * sizeof(struct xsegbd_devices *), GFP_KERNEL);
1158         if (!xsegbd_devices)
1159                 goto out;
1160
1161         spin_lock_init(&xsegbd_devices_lock);
1162
1163         ret = -ENOSYS;
1164         ret = xsegbd_xseg_init();
1165         if (ret)
1166                 goto out_free;
1167
1168         ret = xsegbd_sysfs_init();
1169         if (ret)
1170                 goto out_xseg;
1171
1172         XSEGLOG("initialization complete");
1173
1174 out:
1175         return ret;
1176
1177 out_xseg:
1178         xsegbd_xseg_quit();
1179         
1180 out_free:
1181         kfree(xsegbd_devices);
1182
1183         goto out;
1184 }
1185
1186 static void __exit xsegbd_exit(void)
1187 {
1188         xsegbd_sysfs_cleanup();
1189         xsegbd_xseg_quit();
1190 }
1191
1192 module_init(xsegbd_init);
1193 module_exit(xsegbd_exit);
1194