root / aio.c @ c48c6522
History | View | Annotate | Download (4.9 kB)
1 | a76bab49 | aliguori | /*
|
---|---|---|---|
2 | a76bab49 | aliguori | * QEMU aio implementation
|
3 | a76bab49 | aliguori | *
|
4 | a76bab49 | aliguori | * Copyright IBM, Corp. 2008
|
5 | a76bab49 | aliguori | *
|
6 | a76bab49 | aliguori | * Authors:
|
7 | a76bab49 | aliguori | * Anthony Liguori <aliguori@us.ibm.com>
|
8 | a76bab49 | aliguori | *
|
9 | a76bab49 | aliguori | * This work is licensed under the terms of the GNU GPL, version 2. See
|
10 | a76bab49 | aliguori | * the COPYING file in the top-level directory.
|
11 | a76bab49 | aliguori | *
|
12 | 6b620ca3 | Paolo Bonzini | * Contributions after 2012-01-13 are licensed under the terms of the
|
13 | 6b620ca3 | Paolo Bonzini | * GNU GPL, version 2 or (at your option) any later version.
|
14 | a76bab49 | aliguori | */
|
15 | a76bab49 | aliguori | |
16 | a76bab49 | aliguori | #include "qemu-common.h" |
17 | a76bab49 | aliguori | #include "block.h" |
18 | 72cf2d4f | Blue Swirl | #include "qemu-queue.h" |
19 | a76bab49 | aliguori | #include "qemu_socket.h" |
20 | a76bab49 | aliguori | |
21 | a76bab49 | aliguori | typedef struct AioHandler AioHandler; |
22 | a76bab49 | aliguori | |
23 | a76bab49 | aliguori | /* The list of registered AIO handlers */
|
24 | 72cf2d4f | Blue Swirl | static QLIST_HEAD(, AioHandler) aio_handlers;
|
25 | a76bab49 | aliguori | |
26 | a76bab49 | aliguori | /* This is a simple lock used to protect the aio_handlers list. Specifically,
|
27 | a76bab49 | aliguori | * it's used to ensure that no callbacks are removed while we're walking and
|
28 | a76bab49 | aliguori | * dispatching callbacks.
|
29 | a76bab49 | aliguori | */
|
30 | a76bab49 | aliguori | static int walking_handlers; |
31 | a76bab49 | aliguori | |
32 | a76bab49 | aliguori | struct AioHandler
|
33 | a76bab49 | aliguori | { |
34 | a76bab49 | aliguori | int fd;
|
35 | a76bab49 | aliguori | IOHandler *io_read; |
36 | a76bab49 | aliguori | IOHandler *io_write; |
37 | a76bab49 | aliguori | AioFlushHandler *io_flush; |
38 | a76bab49 | aliguori | int deleted;
|
39 | a76bab49 | aliguori | void *opaque;
|
40 | 72cf2d4f | Blue Swirl | QLIST_ENTRY(AioHandler) node; |
41 | a76bab49 | aliguori | }; |
42 | a76bab49 | aliguori | |
43 | a76bab49 | aliguori | static AioHandler *find_aio_handler(int fd) |
44 | a76bab49 | aliguori | { |
45 | a76bab49 | aliguori | AioHandler *node; |
46 | a76bab49 | aliguori | |
47 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
48 | a76bab49 | aliguori | if (node->fd == fd)
|
49 | 79d5ca56 | Alexander Graf | if (!node->deleted)
|
50 | 79d5ca56 | Alexander Graf | return node;
|
51 | a76bab49 | aliguori | } |
52 | a76bab49 | aliguori | |
53 | a76bab49 | aliguori | return NULL; |
54 | a76bab49 | aliguori | } |
55 | a76bab49 | aliguori | |
56 | a76bab49 | aliguori | int qemu_aio_set_fd_handler(int fd, |
57 | a76bab49 | aliguori | IOHandler *io_read, |
58 | a76bab49 | aliguori | IOHandler *io_write, |
59 | a76bab49 | aliguori | AioFlushHandler *io_flush, |
60 | a76bab49 | aliguori | void *opaque)
|
61 | a76bab49 | aliguori | { |
62 | a76bab49 | aliguori | AioHandler *node; |
63 | a76bab49 | aliguori | |
64 | a76bab49 | aliguori | node = find_aio_handler(fd); |
65 | a76bab49 | aliguori | |
66 | a76bab49 | aliguori | /* Are we deleting the fd handler? */
|
67 | a76bab49 | aliguori | if (!io_read && !io_write) {
|
68 | a76bab49 | aliguori | if (node) {
|
69 | a76bab49 | aliguori | /* If the lock is held, just mark the node as deleted */
|
70 | a76bab49 | aliguori | if (walking_handlers)
|
71 | a76bab49 | aliguori | node->deleted = 1;
|
72 | a76bab49 | aliguori | else {
|
73 | a76bab49 | aliguori | /* Otherwise, delete it for real. We can't just mark it as
|
74 | a76bab49 | aliguori | * deleted because deleted nodes are only cleaned up after
|
75 | a76bab49 | aliguori | * releasing the walking_handlers lock.
|
76 | a76bab49 | aliguori | */
|
77 | 72cf2d4f | Blue Swirl | QLIST_REMOVE(node, node); |
78 | 7267c094 | Anthony Liguori | g_free(node); |
79 | a76bab49 | aliguori | } |
80 | a76bab49 | aliguori | } |
81 | a76bab49 | aliguori | } else {
|
82 | a76bab49 | aliguori | if (node == NULL) { |
83 | a76bab49 | aliguori | /* Alloc and insert if it's not already there */
|
84 | 7267c094 | Anthony Liguori | node = g_malloc0(sizeof(AioHandler));
|
85 | a76bab49 | aliguori | node->fd = fd; |
86 | 72cf2d4f | Blue Swirl | QLIST_INSERT_HEAD(&aio_handlers, node, node); |
87 | a76bab49 | aliguori | } |
88 | a76bab49 | aliguori | /* Update handler with latest information */
|
89 | a76bab49 | aliguori | node->io_read = io_read; |
90 | a76bab49 | aliguori | node->io_write = io_write; |
91 | a76bab49 | aliguori | node->io_flush = io_flush; |
92 | a76bab49 | aliguori | node->opaque = opaque; |
93 | a76bab49 | aliguori | } |
94 | a76bab49 | aliguori | |
95 | a76bab49 | aliguori | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
|
96 | a76bab49 | aliguori | |
97 | a76bab49 | aliguori | return 0; |
98 | a76bab49 | aliguori | } |
99 | a76bab49 | aliguori | |
100 | a76bab49 | aliguori | void qemu_aio_flush(void) |
101 | a76bab49 | aliguori | { |
102 | bcdc1857 | Paolo Bonzini | while (qemu_aio_wait());
|
103 | a76bab49 | aliguori | } |
104 | a76bab49 | aliguori | |
105 | bcdc1857 | Paolo Bonzini | bool qemu_aio_wait(void) |
106 | a76bab49 | aliguori | { |
107 | 9eb0bfca | Paolo Bonzini | AioHandler *node; |
108 | 9eb0bfca | Paolo Bonzini | fd_set rdfds, wrfds; |
109 | 9eb0bfca | Paolo Bonzini | int max_fd = -1; |
110 | a76bab49 | aliguori | int ret;
|
111 | 9eb0bfca | Paolo Bonzini | bool busy;
|
112 | a76bab49 | aliguori | |
113 | 8febfa26 | Kevin Wolf | /*
|
114 | 8febfa26 | Kevin Wolf | * If there are callbacks left that have been queued, we need to call then.
|
115 | bcdc1857 | Paolo Bonzini | * Do not call select in this case, because it is possible that the caller
|
116 | bcdc1857 | Paolo Bonzini | * does not need a complete flush (as is the case for qemu_aio_wait loops).
|
117 | 8febfa26 | Kevin Wolf | */
|
118 | bafbd6a1 | Paolo Bonzini | if (qemu_bh_poll()) {
|
119 | bcdc1857 | Paolo Bonzini | return true; |
120 | bafbd6a1 | Paolo Bonzini | } |
121 | 8febfa26 | Kevin Wolf | |
122 | 9eb0bfca | Paolo Bonzini | walking_handlers = 1;
|
123 | a76bab49 | aliguori | |
124 | 9eb0bfca | Paolo Bonzini | FD_ZERO(&rdfds); |
125 | 9eb0bfca | Paolo Bonzini | FD_ZERO(&wrfds); |
126 | a76bab49 | aliguori | |
127 | 9eb0bfca | Paolo Bonzini | /* fill fd sets */
|
128 | 9eb0bfca | Paolo Bonzini | busy = false;
|
129 | 9eb0bfca | Paolo Bonzini | QLIST_FOREACH(node, &aio_handlers, node) { |
130 | 9eb0bfca | Paolo Bonzini | /* If there aren't pending AIO operations, don't invoke callbacks.
|
131 | 9eb0bfca | Paolo Bonzini | * Otherwise, if there are no AIO requests, qemu_aio_wait() would
|
132 | 9eb0bfca | Paolo Bonzini | * wait indefinitely.
|
133 | 9eb0bfca | Paolo Bonzini | */
|
134 | 9eb0bfca | Paolo Bonzini | if (node->io_flush) {
|
135 | 9eb0bfca | Paolo Bonzini | if (node->io_flush(node->opaque) == 0) { |
136 | 9eb0bfca | Paolo Bonzini | continue;
|
137 | a76bab49 | aliguori | } |
138 | 9eb0bfca | Paolo Bonzini | busy = true;
|
139 | 9eb0bfca | Paolo Bonzini | } |
140 | 9eb0bfca | Paolo Bonzini | if (!node->deleted && node->io_read) {
|
141 | 9eb0bfca | Paolo Bonzini | FD_SET(node->fd, &rdfds); |
142 | 9eb0bfca | Paolo Bonzini | max_fd = MAX(max_fd, node->fd + 1);
|
143 | a76bab49 | aliguori | } |
144 | 9eb0bfca | Paolo Bonzini | if (!node->deleted && node->io_write) {
|
145 | 9eb0bfca | Paolo Bonzini | FD_SET(node->fd, &wrfds); |
146 | 9eb0bfca | Paolo Bonzini | max_fd = MAX(max_fd, node->fd + 1);
|
147 | 9eb0bfca | Paolo Bonzini | } |
148 | 9eb0bfca | Paolo Bonzini | } |
149 | a76bab49 | aliguori | |
150 | 9eb0bfca | Paolo Bonzini | walking_handlers = 0;
|
151 | a76bab49 | aliguori | |
152 | 9eb0bfca | Paolo Bonzini | /* No AIO operations? Get us out of here */
|
153 | 9eb0bfca | Paolo Bonzini | if (!busy) {
|
154 | 9eb0bfca | Paolo Bonzini | return false; |
155 | 9eb0bfca | Paolo Bonzini | } |
156 | a76bab49 | aliguori | |
157 | 9eb0bfca | Paolo Bonzini | /* wait until next event */
|
158 | 9eb0bfca | Paolo Bonzini | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
159 | 9eb0bfca | Paolo Bonzini | |
160 | 9eb0bfca | Paolo Bonzini | /* if we have any readable fds, dispatch event */
|
161 | 9eb0bfca | Paolo Bonzini | if (ret > 0) { |
162 | 9eb0bfca | Paolo Bonzini | walking_handlers = 1;
|
163 | 9eb0bfca | Paolo Bonzini | |
164 | 9eb0bfca | Paolo Bonzini | /* we have to walk very carefully in case
|
165 | 9eb0bfca | Paolo Bonzini | * qemu_aio_set_fd_handler is called while we're walking */
|
166 | 9eb0bfca | Paolo Bonzini | node = QLIST_FIRST(&aio_handlers); |
167 | 9eb0bfca | Paolo Bonzini | while (node) {
|
168 | 9eb0bfca | Paolo Bonzini | AioHandler *tmp; |
169 | 9eb0bfca | Paolo Bonzini | |
170 | 9eb0bfca | Paolo Bonzini | if (!node->deleted &&
|
171 | 9eb0bfca | Paolo Bonzini | FD_ISSET(node->fd, &rdfds) && |
172 | 9eb0bfca | Paolo Bonzini | node->io_read) { |
173 | 9eb0bfca | Paolo Bonzini | node->io_read(node->opaque); |
174 | 9eb0bfca | Paolo Bonzini | } |
175 | 9eb0bfca | Paolo Bonzini | if (!node->deleted &&
|
176 | 9eb0bfca | Paolo Bonzini | FD_ISSET(node->fd, &wrfds) && |
177 | 9eb0bfca | Paolo Bonzini | node->io_write) { |
178 | 9eb0bfca | Paolo Bonzini | node->io_write(node->opaque); |
179 | a76bab49 | aliguori | } |
180 | a76bab49 | aliguori | |
181 | 9eb0bfca | Paolo Bonzini | tmp = node; |
182 | 9eb0bfca | Paolo Bonzini | node = QLIST_NEXT(node, node); |
183 | 9eb0bfca | Paolo Bonzini | |
184 | 9eb0bfca | Paolo Bonzini | if (tmp->deleted) {
|
185 | 9eb0bfca | Paolo Bonzini | QLIST_REMOVE(tmp, node); |
186 | 9eb0bfca | Paolo Bonzini | g_free(tmp); |
187 | 9eb0bfca | Paolo Bonzini | } |
188 | a76bab49 | aliguori | } |
189 | 9eb0bfca | Paolo Bonzini | |
190 | 9eb0bfca | Paolo Bonzini | walking_handlers = 0;
|
191 | 9eb0bfca | Paolo Bonzini | } |
192 | bcdc1857 | Paolo Bonzini | |
193 | bcdc1857 | Paolo Bonzini | return true; |
194 | a76bab49 | aliguori | } |