10236f8a36fbeb24f1d7f2413a3c84adcca8bfb0
[archipelago] / xseg / sys / xsegbd.c
1 /* xsegbd.c
2  *
3  */
4
5 #include <linux/module.h>
6 #include <linux/moduleparam.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/fs.h>
12 #include <linux/errno.h>
13 #include <linux/timer.h>
14 #include <linux/types.h>
15 #include <linux/vmalloc.h>
16 #include <linux/genhd.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19
20 #include "xsegdev.h"
21 #include "xsegbd.h"
22
23 #define XSEGBD_MINORS 1
24
25 MODULE_DESCRIPTION("xsegbd");
26 MODULE_AUTHOR("XSEG");
27 MODULE_LICENSE("GPL");
28
29 static long sector_size = 0;
30 static long blksize = 512;
31 static int major = 0;
32 static char name[XSEGBD_VOLUME_NAMELEN] = "xsegbd";
33 static char spec[256] = "xsegdev:xsegbd:4:512:64:1024:12";
34 static int src_portno = 0, dst_portno = 1, nr_requests = 128;
35
36 module_param(sector_size, long, 0644);
37 module_param(blksize, long, 0644);
38 module_param(major, int, 0644);
39 module_param(src_portno, int, 0644);
40 module_param(dst_portno, int, 0644);
41 module_param(nr_requests, int, 0644);
42 module_param_string(name, name, sizeof(name), 0644);
43 module_param_string(spec, spec, sizeof(spec), 0644);
44
45 static volatile int count;
46 struct semaphore xsegbd_lock;
47 static struct xsegbd xsegbd;
48
49
50 /* ********************* */
51 /* ** XSEG Operations ** */
52 /* ********************* */
53
54 static void *xsegdev_malloc(uint64_t size)
55 {
56         return kmalloc((size_t)size, GFP_KERNEL);
57 }
58
59 static void *xsegdev_realloc(void *mem, uint64_t size)
60 {
61         return krealloc(mem, (size_t)size, GFP_KERNEL);
62 }
63
64 static void xsegdev_mfree(void *ptr)
65 {
66         return kfree(ptr);
67 }
68
69 static long xsegdev_allocate(const char *name, uint64_t size)
70 {
71         int r;
72         struct xsegdev *xsegdev = xsegdev_get(0);
73
74         r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
75         if (r) {
76                 XSEGLOG("cannot acquire xsegdev");
77                 goto err;
78         }
79
80         if (xsegdev->segment) {
81                 XSEGLOG("destroying existing xsegdev segment");
82                 r = xsegdev_destroy_segment(xsegdev);
83                 if (r)
84                         goto err;
85         }
86
87         XSEGLOG("creating xsegdev segment size %llu", size);
88         r = xsegdev_create_segment(xsegdev, size, 1);
89         if (r)
90                 goto err;
91
92         xsegdev->segsize = size;
93         xsegdev_put(xsegdev);
94         return 0;
95
96 err:
97         return r;
98 }
99
100 static long xsegdev_deallocate(const char *name)
101 {
102         struct xsegdev *xsegdev = xsegdev_get(0);
103         int r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
104         if (r)
105                 return r;
106
107         clear_bit(XSEGDEV_RESERVED, &xsegdev->flags);
108         XSEGLOG("destroying segment");
109         r = xsegdev_destroy_segment(xsegdev);
110         if (r)
111                 XSEGLOG("   ...failed");
112         xsegdev_put(xsegdev);
113         return r;
114 }
115
116 static long xseg_callback(void *arg);
117
118 static void *xsegdev_map(const char *name, uint64_t size)
119 {
120         struct xseg *xseg = NULL;
121         struct xsegdev *dev = xsegdev_get(0);
122         int r;
123         r = IS_ERR(dev) ? PTR_ERR(dev) : 0;
124         if (r)
125                 goto out;
126
127         if (!dev->segment)
128                 goto put_out;
129
130         if (size > dev->segsize)
131                 goto put_out;
132
133         if (dev->callback) /* in use */
134                 goto put_out;
135
136         dev->callback = xseg_callback;
137         dev->callarg = &xsegbd;
138         xseg = (void *)dev->segment;
139
140 put_out:
141         xsegdev_put(dev);
142 out:
143         return xseg;
144 }
145
146 static void xsegdev_unmap(void *ptr, uint64_t size)
147 {
148         struct xsegdev *xsegdev = xsegdev_get(0);
149         int r = IS_ERR(xsegdev) ? PTR_ERR(xsegdev) : 0;
150         if (r)
151                 return;
152
153         xsegdev->callarg = NULL;
154         xsegdev->callback = NULL;
155         xsegdev_put(xsegdev);
156 }
157
158 static struct xseg_type xseg_xsegdev = {
159         /* xseg operations */
160         {
161                 .malloc = xsegdev_malloc,
162                 .realloc = xsegdev_realloc,
163                 .mfree = xsegdev_mfree,
164                 .allocate = xsegdev_allocate,
165                 .deallocate = xsegdev_deallocate,
166                 .map = xsegdev_map,
167                 .unmap = xsegdev_unmap
168         },
169         /* name */
170         "xsegdev"
171 };
172
173 static int posix_signal_init(void)
174 {
175         return 0;
176 }
177
178 static void posix_signal_quit(void) { }
179
180 static int posix_prepare_wait(struct xseg_port *port)
181 {
182         return 0;
183 }
184
185 static int posix_cancel_wait(struct xseg_port *port)
186 {
187         return 0;
188 }
189
190 static int posix_wait_signal(struct xseg_port *port, uint32_t timeout)
191 {
192         return 0;
193 }
194
195 static int posix_signal(struct xseg_port *port)
196 {
197         struct pid *pid;
198         struct task_struct *task;
199         int ret = -ENOENT;
200
201         rcu_read_lock();
202         pid = find_vpid((pid_t)port->waitcue);
203         if (!pid)
204                 goto out;
205         task = pid_task(pid, PIDTYPE_PID);
206         if (!task)
207                 goto out;
208
209         ret = send_sig(SIGIO, task, 1);
210 out:
211         rcu_read_unlock();
212         return ret;
213 }
214
215 static void *posix_malloc(uint64_t size)
216 {
217         return NULL;
218 }
219
220 static void *posix_realloc(void *mem, uint64_t size)
221 {
222         return NULL;
223 }
224
225 static void posix_mfree(void *mem) { }
226
227 static struct xseg_peer xseg_peer_posix = {
228         /* xseg signal operations */
229         {
230                 .signal_init = posix_signal_init,
231                 .signal_quit = posix_signal_quit,
232                 .cancel_wait = posix_cancel_wait,
233                 .prepare_wait = posix_prepare_wait,
234                 .wait_signal = posix_wait_signal,
235                 .signal = posix_signal,
236                 .malloc = posix_malloc,
237                 .realloc = posix_realloc,
238                 .mfree = posix_mfree
239         },
240         /* name */
241         "posix"
242 };
243
244 static int xsegdev_signal_init(void)
245 {
246         return 0;
247 }
248
249 static void xsegdev_signal_quit(void) { }
250
251 static int xsegdev_prepare_wait(struct xseg_port *port)
252 {
253         return -1;
254 }
255
256 static int xsegdev_cancel_wait(struct xseg_port *port)
257 {
258         return -1;
259 }
260
261 static int xsegdev_wait_signal(struct xseg_port *port, uint32_t timeout)
262 {
263         return -1;
264 }
265
266 static int xsegdev_signal(struct xseg_port *port)
267 {
268         return -1;
269 }
270
271 static struct xseg_peer xseg_peer_xsegdev = {
272         /* xseg signal operations */
273         {
274                 .signal_init = xsegdev_signal_init,
275                 .signal_quit = xsegdev_signal_quit,
276                 .cancel_wait = xsegdev_cancel_wait,
277                 .prepare_wait = xsegdev_prepare_wait,
278                 .wait_signal = xsegdev_wait_signal,
279                 .signal = xsegdev_signal,
280                 .malloc = xsegdev_malloc,
281                 .realloc = xsegdev_realloc,
282                 .mfree = xsegdev_mfree
283         },
284         /* name */
285         "xsegdev"
286 };
287
288 /* ************************* */
289 /* ** XSEG Initialization ** */
290 /* ************************* */
291
292 int xsegbd_xseg_init(struct xsegbd *dev)
293 {
294         struct xseg_port *xport;
295         int r;
296
297         if (!dev->name[0])
298                 strncpy(dev->name, name, XSEGBD_VOLUME_NAMELEN);
299
300         XSEGLOG("registering xseg types");
301         dev->namesize = strlen(dev->name);
302         r = xseg_register_type(&xseg_xsegdev);
303         if (r)
304                 goto err0;
305
306         r = xseg_register_peer(&xseg_peer_posix);
307         if (r)
308                 goto err1;
309
310         r = xseg_register_peer(&xseg_peer_xsegdev);
311         if (r)
312                 goto err2;
313
314         r = xseg_initialize("xsegdev");
315         if (r) {
316                 XSEGLOG("cannot initialize 'xsegdev' peer");
317                 goto err3;
318         }
319
320         r = xseg_parse_spec(spec, &dev->config);
321         if (r)
322                 goto err3;
323
324         if (strncmp(dev->config.type, "xsegdev", 16))
325                 XSEGLOG("WARNING: unexpected segment type '%s' vs 'xsegdev'",
326                          dev->config.type);
327
328         XSEGLOG("creating segment");
329         r = xseg_create(&dev->config);
330         if (r) {
331                 XSEGLOG("cannot create segment");
332                 goto err3;
333         }
334
335         XSEGLOG("joining segment");
336         dev->xseg = xseg_join("xsegdev", "xsegbd");
337         if (!dev->xseg) {
338                 XSEGLOG("cannot join segment");
339                 r = -EFAULT;
340                 goto err3;
341         }
342
343         XSEGLOG("binding to source port %u (destination %u)",
344                  src_portno, dst_portno);
345         xport = xseg_bind_port(dev->xseg, src_portno);
346         if (!xport) {
347                 XSEGLOG("cannot bind to port");
348                 dev->xseg = NULL;
349                 r = -EFAULT;
350                 goto err3;
351         }
352         dev->src_portno = xseg_portno(dev->xseg, xport);
353         dev->dst_portno = dst_portno;
354
355         if (nr_requests > dev->xseg->config.nr_requests)
356                 nr_requests = dev->xseg->config.nr_requests;
357
358         if (xseg_alloc_requests(dev->xseg, src_portno, nr_requests)) {
359                 XSEGLOG("cannot allocate requests");
360                 dev->xseg = NULL;
361                 r = -EFAULT;
362                 goto err3;
363         }
364
365         return 0;
366 err3:
367         xseg_unregister_peer(xseg_peer_xsegdev.name);
368 err2:
369         xseg_unregister_peer(xseg_peer_posix.name);
370 err1:
371         xseg_unregister_type(xseg_xsegdev.name);
372 err0:
373         return r;
374 }
375
376 int xsegbd_xseg_quit(struct xsegbd *dev)
377 {
378         /* make sure to unmap the segment first */
379         dev->xseg->type.ops.unmap(dev->xseg, dev->xseg->segment_size);
380
381         xseg_destroy(dev->xseg);
382         dev->xseg = NULL;
383         return 0;
384 }
385
386
387 /* ***************************** */
388 /* ** Block Device Operations ** */
389 /* ***************************** */
390
391 static int xsegbd_open(struct block_device *bdev, fmode_t mode)
392 {
393         int ret = down_interruptible(&xsegbd_lock);
394         if (ret == 0) {
395                 count ++;
396                 up(&xsegbd_lock);
397         }
398         return ret;
399 }
400
401 static int xsegbd_release(struct gendisk *gd, fmode_t mode)
402 {
403         int ret = down_interruptible(&xsegbd_lock);
404         if (ret == 0) {
405                 count --;
406                 up(&xsegbd_lock);
407         }
408         return ret;
409 }
410
411 static int xsegbd_ioctl(struct block_device *bdev, fmode_t mode,
412                         unsigned int cmd, unsigned long arg)
413 {
414         return -ENOTTY;
415 }
416
417 static const struct block_device_operations xsegbd_ops = {
418         .owner          = THIS_MODULE,
419         .open           = xsegbd_open,
420         .release        = xsegbd_release,
421         .ioctl          = xsegbd_ioctl 
422 };
423
424
425 /* *************************** */
426 /* ** Device Initialization ** */
427 /* *************************** */
428
429 static void xseg_request_fn(struct request_queue *rq);
430
431 static loff_t xsegbd_get_size(struct xsegbd *dev)
432 {
433         struct xseg_request *xreq;
434         char *name, *data;
435         uint64_t datasize;
436         loff_t size;
437
438         if ((xreq = xseg_get_request(dev->xseg, dev->src_portno))) {
439                 datasize = sizeof(loff_t);
440                 BUG_ON(xreq->buffersize - dev->namesize < datasize);
441                 BUG_ON(xseg_prep_request(xreq, dev->namesize, datasize));
442
443                 name = XSEG_TAKE_PTR(xreq->name, dev->xseg->segment);
444                 strncpy(name, dev->name, dev->namesize);
445                 xreq->size = datasize;
446                 xreq->offset = 0;
447
448                 xreq->op = X_INFO;
449
450                 BUG_ON(xseg_submit(dev->xseg, dev->dst_portno, xreq) == NoSerial);
451
452                 xseg_signal(dev->xseg, dev->dst_portno);
453         }
454
455         /* callback_fn doesn't handle X_INFO reqs atm, and more importantly we
456          * cannot use an async operation to learn the disk size. Currently, this
457          * behaves like a busy-wait loop and makes insmod block until a peer
458          * responds to our X_INFO req. This will change when the sysfs interface is
459          * implemented, to handle disk operations.
460          */
461         while (!(xreq = xseg_receive(dev->xseg, dev->src_portno))) ;
462
463         while (!(xreq->state & XS_SERVED)) ;
464
465         data = XSEG_TAKE_PTR(xreq->data, dev->xseg->segment);
466         /* TODO: make sure we use consistent types accross peers */
467         size = *((off_t *) data);
468
469         if (xreq)
470                 xseg_put_request(dev->xseg, dev->src_portno, xreq);
471
472         return size;
473 }
474
475 static int xsegbd_dev_init(struct xsegbd *dev, int id, sector_t size)
476 {
477         int ret = -ENOMEM;
478         struct gendisk *disk;
479
480         spin_lock_init(&dev->lock);
481
482         dev->id = id;
483         dev->blk_queue = blk_alloc_queue(GFP_KERNEL);
484         if (!dev->blk_queue)
485                 goto out;
486
487         blk_init_allocated_queue(dev->blk_queue, xseg_request_fn, &dev->lock);
488         dev->blk_queue->queuedata = dev;
489
490         blk_queue_flush(dev->blk_queue, REQ_FLUSH | REQ_FUA);
491         blk_queue_logical_block_size(dev->blk_queue, 512);
492         blk_queue_physical_block_size(dev->blk_queue, blksize);
493         blk_queue_bounce_limit(dev->blk_queue, BLK_BOUNCE_ANY);
494         /* we can handle any number of segments, BUT
495          * parts of the request may be available far sooner than others
496          * but we cannot complete them (unless we handle their bios directly).
497          */
498         blk_queue_max_segments(dev->blk_queue, 1);
499         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, dev->blk_queue);
500
501         /* vkoukis says we don't need partitions */
502         dev->gd = disk = alloc_disk(1);
503         if (!disk)
504                 goto out_free_queue;
505
506         disk->major = major;
507         disk->first_minor = id * XSEGBD_MINORS;
508         disk->fops = &xsegbd_ops;
509         disk->queue = dev->blk_queue;
510         disk->private_data = dev;
511         disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
512         snprintf(disk->disk_name, 32, "xsegbd%c", 'a' + id);
513
514         ret = xsegbd_xseg_init(dev);
515         if (ret < 0)
516                 goto out_free_disk;
517
518         if (!xq_alloc_seq(&dev->blk_queue_pending, nr_requests, nr_requests))
519                 goto out_quit;
520
521         dev->blk_req_pending = kmalloc(sizeof(struct request *) * nr_requests, GFP_KERNEL);
522         if (!dev->blk_req_pending)
523                 goto out_free_pending;
524
525         /* allow a non-zero sector_size parameter to override the disk size */
526         dev->sectors = sector_size ? sector_size : xsegbd_get_size(dev) / 512ULL;
527         set_capacity(disk, dev->sectors);
528
529         add_disk(disk); /* immediately activates the device */
530
531 out:
532         return ret;
533
534 out_free_pending:
535         xq_free(&dev->blk_queue_pending);
536
537 out_quit:
538         xsegbd_xseg_quit(dev);
539
540 out_free_disk:
541         put_disk(disk);
542
543 out_free_queue:
544         blk_cleanup_queue(dev->blk_queue);
545
546         goto out;
547 }
548
549 static int xsegbd_dev_destroy(struct xsegbd *dev)
550 {
551         xq_free(&dev->blk_queue_pending);
552         kfree(dev->blk_req_pending);
553         del_gendisk(dev->gd);
554         put_disk(dev->gd);
555         blk_cleanup_queue(dev->blk_queue);
556         xsegbd_xseg_quit(dev);
557         return 0;
558 }
559
560
561 /* *************************** */
562 /* ** Module Initialization ** */
563 /* *************************** */
564
565 static int __init xsegbd_init(void)
566 {
567         int ret;
568
569         sema_init(&xsegbd_lock, 1);
570
571         XSEGLOG("registering block device major %d", major);
572         ret = register_blkdev(major, XSEGBD_NAME);
573         if (ret < 0) {
574                 XSEGLOG("cannot register block device!");
575                 ret = -EBUSY;
576                 goto out;
577         }
578         major = ret;
579         XSEGLOG("registered block device major %d", major);
580
581         XSEGLOG("initializing device");
582         ret = xsegbd_dev_init(&xsegbd, 0, sector_size);
583         if (ret < 0) {
584                 XSEGLOG("cannot initialize device!");
585                 goto unregister;
586         }
587
588         XSEGLOG("initialization complete");
589 out:
590         return ret;
591
592 unregister:
593         unregister_blkdev(major, XSEGBD_NAME);
594         goto out;
595 }
596
597 static void __exit xsegbd_exit(void)
598 {
599         unregister_blkdev(major, XSEGBD_NAME);
600
601         xseg_disable_driver(xsegbd.xseg, "posix");
602         xseg_unregister_peer("posix");
603         xseg_disable_driver(xsegbd.xseg, "xsegdev");
604         xseg_unregister_peer("xsegdev");
605
606         xsegbd_dev_destroy(&xsegbd);
607         xseg_unregister_type("xsegdev");
608 }
609
610 module_init(xsegbd_init);
611 module_exit(xsegbd_exit);
612
613
614 /* ******************* */
615 /* ** Critical Path ** */
616 /* ******************* */
617
618 static void blk_to_xseg(struct xseg *xseg, struct xseg_request *xreq,
619                         struct request *blkreq)
620 {
621         struct bio_vec *bvec;
622         struct req_iterator iter;
623         uint64_t off = 0;
624         char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
625         rq_for_each_segment(bvec, blkreq, iter) {
626                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
627                 memcpy(data + off, bdata, bvec->bv_len);
628                 off += bvec->bv_len;
629                 kunmap_atomic(bdata);
630         }
631 }
632
633 static void xseg_to_blk(struct xseg *xseg, struct xseg_request *xreq,
634                         struct request *blkreq)
635 {
636         struct bio_vec *bvec;
637         struct req_iterator iter;
638         uint64_t off = 0;
639         char *data = XSEG_TAKE_PTR(xreq->data, xseg->segment);
640         rq_for_each_segment(bvec, blkreq, iter) {
641                 char *bdata = kmap_atomic(bvec->bv_page) + bvec->bv_offset;
642                 memcpy(bdata, data + off, bvec->bv_len);
643                 off += bvec->bv_len;
644                 kunmap_atomic(bdata);
645         }
646 }
647
648 static void xseg_request_fn(struct request_queue *rq)
649 {
650         struct xseg_request *xreq;
651         struct xsegbd *dev = rq->queuedata;
652         struct request *blkreq;
653         xqindex blkreq_idx;
654         char *name;
655         uint64_t datasize;
656
657         for (;;) {
658                 xreq = xseg_get_request(dev->xseg, dev->src_portno);
659                 if (!xreq)
660                         break;
661
662                 blkreq = blk_fetch_request(rq);
663                 if (!blkreq)
664                         break;
665
666                 if (blkreq->cmd_type != REQ_TYPE_FS) {
667                         XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
668                         __blk_end_request_all(blkreq, 0);
669                 }
670
671                 datasize = blk_rq_bytes(blkreq);
672                 BUG_ON(xreq->buffersize - dev->namesize < datasize);
673                 BUG_ON(xseg_prep_request(xreq, dev->namesize, datasize));
674
675                 name = XSEG_TAKE_PTR(xreq->name, dev->xseg->segment);
676                 strncpy(name, dev->name, dev->namesize);
677                 blkreq_idx = xq_pop_head(&dev->blk_queue_pending);
678                 BUG_ON(blkreq_idx == None);
679                 /* WARN_ON(dev->blk_req_pending[blkreq_idx] */
680                 dev->blk_req_pending[blkreq_idx] = blkreq;
681                 xreq->priv = (void *)(unsigned long)blkreq_idx;
682                 xreq->size = datasize;
683                 xreq->offset = blk_rq_pos(blkreq) << 9;
684                 /*
685                 if (xreq->offset >= (sector_size << 9))
686                         XSEGLOG("sector offset: %lu > %lu, flush:%u, fua:%u",
687                                  blk_rq_pos(blkreq), sector_size,
688                                  blkreq->cmd_flags & REQ_FLUSH,
689                                  blkreq->cmd_flags & REQ_FUA);
690                 */
691
692                 if (blkreq->cmd_flags & REQ_FLUSH)
693                         xreq->flags |= XF_FLUSH;
694
695                 if (blkreq->cmd_flags & REQ_FUA)
696                         xreq->flags |= XF_FUA;
697
698                 if (rq_data_dir(blkreq)) {
699                         /* unlock for data transfers? */
700                         blk_to_xseg(dev->xseg, xreq, blkreq);
701                         xreq->op = X_WRITE;
702                 } else {
703                         xreq->op = X_READ;
704                 }
705
706                 BUG_ON(xseg_submit(dev->xseg, dev->dst_portno, xreq) == NoSerial);
707         }
708
709         if (xreq)
710                 xseg_put_request(dev->xseg, dev->src_portno, xreq);
711 }
712
713 static long xseg_callback(void *arg)
714 {
715         struct xsegbd *dev = arg;
716         struct xseg_request *xreq;
717         struct request *blkreq;
718         unsigned long flags;
719         xqindex blkreq_idx;
720         int err;
721
722         for (;;) {
723                 xreq = xseg_receive(dev->xseg, dev->src_portno);
724                 if (!xreq)
725                         break;
726
727                 /* we rely upon our peers to not have touched ->priv */
728                 blkreq_idx = (xqindex)(unsigned long)xreq->priv;
729                 if (blkreq_idx < 0 || blkreq_idx >= nr_requests) {
730                         XSEGLOG("invalid request index: %u! Ignoring.", blkreq_idx);
731                         goto xseg_put;
732                 }
733
734                 blkreq = dev->blk_req_pending[blkreq_idx];
735                 /* WARN_ON(!blkreq); */
736                 err = -EIO;
737
738                 if (!(xreq->state & XS_SERVED))
739                         goto blk_end;
740
741                 if (xreq->serviced != blk_rq_bytes(blkreq))
742                         goto blk_end;
743
744                 /* unlock for data transfer? */
745                 if (!rq_data_dir(blkreq))
746                         xseg_to_blk(dev->xseg, xreq, blkreq);
747
748                 err = 0;
749 blk_end:
750                 blk_end_request_all(blkreq, err);
751                 xq_append_head(&dev->blk_queue_pending, blkreq_idx);
752 xseg_put:
753                 xseg_put_request(dev->xseg, xreq->portno, xreq);
754         }
755
756         spin_lock_irqsave(&dev->lock, flags);
757         xseg_request_fn(dev->blk_queue);
758         spin_unlock_irqrestore(&dev->lock, flags);
759         return 0;
760 }
761
762