root / aio.c @ 60e1b2a6
History | View | Annotate | Download (5.9 kB)
1 | a76bab49 | aliguori | /*
|
---|---|---|---|
2 | a76bab49 | aliguori | * QEMU aio implementation
|
3 | a76bab49 | aliguori | *
|
4 | a76bab49 | aliguori | * Copyright IBM, Corp. 2008
|
5 | a76bab49 | aliguori | *
|
6 | a76bab49 | aliguori | * Authors:
|
7 | a76bab49 | aliguori | * Anthony Liguori <aliguori@us.ibm.com>
|
8 | a76bab49 | aliguori | *
|
9 | a76bab49 | aliguori | * This work is licensed under the terms of the GNU GPL, version 2. See
|
10 | a76bab49 | aliguori | * the COPYING file in the top-level directory.
|
11 | a76bab49 | aliguori | *
|
12 | 6b620ca3 | Paolo Bonzini | * Contributions after 2012-01-13 are licensed under the terms of the
|
13 | 6b620ca3 | Paolo Bonzini | * GNU GPL, version 2 or (at your option) any later version.
|
14 | a76bab49 | aliguori | */
|
15 | a76bab49 | aliguori | |
16 | a76bab49 | aliguori | #include "qemu-common.h" |
17 | a76bab49 | aliguori | #include "block.h" |
18 | 72cf2d4f | Blue Swirl | #include "qemu-queue.h" |
19 | a76bab49 | aliguori | #include "qemu_socket.h" |
20 | a76bab49 | aliguori | |
21 | a76bab49 | aliguori | typedef struct AioHandler AioHandler; |
22 | a76bab49 | aliguori | |
23 | a76bab49 | aliguori | /* The list of registered AIO handlers */
|
24 | 72cf2d4f | Blue Swirl | static QLIST_HEAD(, AioHandler) aio_handlers;
|
25 | a76bab49 | aliguori | |
26 | a76bab49 | aliguori | /* This is a simple lock used to protect the aio_handlers list. Specifically,
|
27 | a76bab49 | aliguori | * it's used to ensure that no callbacks are removed while we're walking and
|
28 | a76bab49 | aliguori | * dispatching callbacks.
|
29 | a76bab49 | aliguori | */
|
30 | a76bab49 | aliguori | static int walking_handlers; |
31 | a76bab49 | aliguori | |
32 | a76bab49 | aliguori | struct AioHandler
|
33 | a76bab49 | aliguori | { |
34 | a76bab49 | aliguori | int fd;
|
35 | a76bab49 | aliguori | IOHandler *io_read; |
36 | a76bab49 | aliguori | IOHandler *io_write; |
37 | a76bab49 | aliguori | AioFlushHandler *io_flush; |
38 | 8febfa26 | Kevin Wolf | AioProcessQueue *io_process_queue; |
39 | a76bab49 | aliguori | int deleted;
|
40 | a76bab49 | aliguori | void *opaque;
|
41 | 72cf2d4f | Blue Swirl | QLIST_ENTRY(AioHandler) node; |
42 | a76bab49 | aliguori | }; |
43 | a76bab49 | aliguori | |
44 | a76bab49 | aliguori | static AioHandler *find_aio_handler(int fd) |
45 | a76bab49 | aliguori | { |
46 | a76bab49 | aliguori | AioHandler *node; |
47 | a76bab49 | aliguori | |
48 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
49 | a76bab49 | aliguori | if (node->fd == fd)
|
50 | 79d5ca56 | Alexander Graf | if (!node->deleted)
|
51 | 79d5ca56 | Alexander Graf | return node;
|
52 | a76bab49 | aliguori | } |
53 | a76bab49 | aliguori | |
54 | a76bab49 | aliguori | return NULL; |
55 | a76bab49 | aliguori | } |
56 | a76bab49 | aliguori | |
57 | a76bab49 | aliguori | int qemu_aio_set_fd_handler(int fd, |
58 | a76bab49 | aliguori | IOHandler *io_read, |
59 | a76bab49 | aliguori | IOHandler *io_write, |
60 | a76bab49 | aliguori | AioFlushHandler *io_flush, |
61 | 8febfa26 | Kevin Wolf | AioProcessQueue *io_process_queue, |
62 | a76bab49 | aliguori | void *opaque)
|
63 | a76bab49 | aliguori | { |
64 | a76bab49 | aliguori | AioHandler *node; |
65 | a76bab49 | aliguori | |
66 | a76bab49 | aliguori | node = find_aio_handler(fd); |
67 | a76bab49 | aliguori | |
68 | a76bab49 | aliguori | /* Are we deleting the fd handler? */
|
69 | a76bab49 | aliguori | if (!io_read && !io_write) {
|
70 | a76bab49 | aliguori | if (node) {
|
71 | a76bab49 | aliguori | /* If the lock is held, just mark the node as deleted */
|
72 | a76bab49 | aliguori | if (walking_handlers)
|
73 | a76bab49 | aliguori | node->deleted = 1;
|
74 | a76bab49 | aliguori | else {
|
75 | a76bab49 | aliguori | /* Otherwise, delete it for real. We can't just mark it as
|
76 | a76bab49 | aliguori | * deleted because deleted nodes are only cleaned up after
|
77 | a76bab49 | aliguori | * releasing the walking_handlers lock.
|
78 | a76bab49 | aliguori | */
|
79 | 72cf2d4f | Blue Swirl | QLIST_REMOVE(node, node); |
80 | 7267c094 | Anthony Liguori | g_free(node); |
81 | a76bab49 | aliguori | } |
82 | a76bab49 | aliguori | } |
83 | a76bab49 | aliguori | } else {
|
84 | a76bab49 | aliguori | if (node == NULL) { |
85 | a76bab49 | aliguori | /* Alloc and insert if it's not already there */
|
86 | 7267c094 | Anthony Liguori | node = g_malloc0(sizeof(AioHandler));
|
87 | a76bab49 | aliguori | node->fd = fd; |
88 | 72cf2d4f | Blue Swirl | QLIST_INSERT_HEAD(&aio_handlers, node, node); |
89 | a76bab49 | aliguori | } |
90 | a76bab49 | aliguori | /* Update handler with latest information */
|
91 | a76bab49 | aliguori | node->io_read = io_read; |
92 | a76bab49 | aliguori | node->io_write = io_write; |
93 | a76bab49 | aliguori | node->io_flush = io_flush; |
94 | 8febfa26 | Kevin Wolf | node->io_process_queue = io_process_queue; |
95 | a76bab49 | aliguori | node->opaque = opaque; |
96 | a76bab49 | aliguori | } |
97 | a76bab49 | aliguori | |
98 | a76bab49 | aliguori | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
|
99 | a76bab49 | aliguori | |
100 | a76bab49 | aliguori | return 0; |
101 | a76bab49 | aliguori | } |
102 | a76bab49 | aliguori | |
103 | a76bab49 | aliguori | void qemu_aio_flush(void) |
104 | a76bab49 | aliguori | { |
105 | a76bab49 | aliguori | AioHandler *node; |
106 | a76bab49 | aliguori | int ret;
|
107 | a76bab49 | aliguori | |
108 | a76bab49 | aliguori | do {
|
109 | a76bab49 | aliguori | ret = 0;
|
110 | a76bab49 | aliguori | |
111 | 986c28d6 | Andrea Arcangeli | /*
|
112 | 986c28d6 | Andrea Arcangeli | * If there are pending emulated aio start them now so flush
|
113 | 986c28d6 | Andrea Arcangeli | * will be able to return 1.
|
114 | 986c28d6 | Andrea Arcangeli | */
|
115 | 986c28d6 | Andrea Arcangeli | qemu_aio_wait(); |
116 | 986c28d6 | Andrea Arcangeli | |
117 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
118 | c53a7285 | Avi Kivity | if (node->io_flush) {
|
119 | c53a7285 | Avi Kivity | ret |= node->io_flush(node->opaque); |
120 | c53a7285 | Avi Kivity | } |
121 | a76bab49 | aliguori | } |
122 | 6e5d97d0 | Nolan | } while (qemu_bh_poll() || ret > 0); |
123 | a76bab49 | aliguori | } |
124 | a76bab49 | aliguori | |
125 | 8febfa26 | Kevin Wolf | int qemu_aio_process_queue(void) |
126 | 8febfa26 | Kevin Wolf | { |
127 | 8febfa26 | Kevin Wolf | AioHandler *node; |
128 | 8febfa26 | Kevin Wolf | int ret = 0; |
129 | 8febfa26 | Kevin Wolf | |
130 | 8febfa26 | Kevin Wolf | walking_handlers = 1;
|
131 | 8febfa26 | Kevin Wolf | |
132 | 8febfa26 | Kevin Wolf | QLIST_FOREACH(node, &aio_handlers, node) { |
133 | 8febfa26 | Kevin Wolf | if (node->io_process_queue) {
|
134 | 8febfa26 | Kevin Wolf | if (node->io_process_queue(node->opaque)) {
|
135 | 8febfa26 | Kevin Wolf | ret = 1;
|
136 | 8febfa26 | Kevin Wolf | } |
137 | 8febfa26 | Kevin Wolf | } |
138 | 8febfa26 | Kevin Wolf | } |
139 | 8febfa26 | Kevin Wolf | |
140 | 8febfa26 | Kevin Wolf | walking_handlers = 0;
|
141 | 8febfa26 | Kevin Wolf | |
142 | 8febfa26 | Kevin Wolf | return ret;
|
143 | 8febfa26 | Kevin Wolf | } |
144 | 8febfa26 | Kevin Wolf | |
145 | a76bab49 | aliguori | void qemu_aio_wait(void) |
146 | a76bab49 | aliguori | { |
147 | a76bab49 | aliguori | int ret;
|
148 | a76bab49 | aliguori | |
149 | a76bab49 | aliguori | if (qemu_bh_poll())
|
150 | a76bab49 | aliguori | return;
|
151 | a76bab49 | aliguori | |
152 | 8febfa26 | Kevin Wolf | /*
|
153 | 8febfa26 | Kevin Wolf | * If there are callbacks left that have been queued, we need to call then.
|
154 | 8febfa26 | Kevin Wolf | * Return afterwards to avoid waiting needlessly in select().
|
155 | 8febfa26 | Kevin Wolf | */
|
156 | 8febfa26 | Kevin Wolf | if (qemu_aio_process_queue())
|
157 | 8febfa26 | Kevin Wolf | return;
|
158 | 8febfa26 | Kevin Wolf | |
159 | a76bab49 | aliguori | do {
|
160 | a76bab49 | aliguori | AioHandler *node; |
161 | a76bab49 | aliguori | fd_set rdfds, wrfds; |
162 | a76bab49 | aliguori | int max_fd = -1; |
163 | a76bab49 | aliguori | |
164 | a76bab49 | aliguori | walking_handlers = 1;
|
165 | a76bab49 | aliguori | |
166 | f71903d0 | aliguori | FD_ZERO(&rdfds); |
167 | f71903d0 | aliguori | FD_ZERO(&wrfds); |
168 | f71903d0 | aliguori | |
169 | a76bab49 | aliguori | /* fill fd sets */
|
170 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
171 | a76bab49 | aliguori | /* If there aren't pending AIO operations, don't invoke callbacks.
|
172 | a76bab49 | aliguori | * Otherwise, if there are no AIO requests, qemu_aio_wait() would
|
173 | a76bab49 | aliguori | * wait indefinitely.
|
174 | a76bab49 | aliguori | */
|
175 | a76bab49 | aliguori | if (node->io_flush && node->io_flush(node->opaque) == 0) |
176 | a76bab49 | aliguori | continue;
|
177 | a76bab49 | aliguori | |
178 | a76bab49 | aliguori | if (!node->deleted && node->io_read) {
|
179 | a76bab49 | aliguori | FD_SET(node->fd, &rdfds); |
180 | a76bab49 | aliguori | max_fd = MAX(max_fd, node->fd + 1);
|
181 | a76bab49 | aliguori | } |
182 | a76bab49 | aliguori | if (!node->deleted && node->io_write) {
|
183 | a76bab49 | aliguori | FD_SET(node->fd, &wrfds); |
184 | a76bab49 | aliguori | max_fd = MAX(max_fd, node->fd + 1);
|
185 | a76bab49 | aliguori | } |
186 | a76bab49 | aliguori | } |
187 | a76bab49 | aliguori | |
188 | a76bab49 | aliguori | walking_handlers = 0;
|
189 | a76bab49 | aliguori | |
190 | a76bab49 | aliguori | /* No AIO operations? Get us out of here */
|
191 | a76bab49 | aliguori | if (max_fd == -1) |
192 | a76bab49 | aliguori | break;
|
193 | a76bab49 | aliguori | |
194 | a76bab49 | aliguori | /* wait until next event */
|
195 | a76bab49 | aliguori | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
196 | a76bab49 | aliguori | if (ret == -1 && errno == EINTR) |
197 | a76bab49 | aliguori | continue;
|
198 | a76bab49 | aliguori | |
199 | a76bab49 | aliguori | /* if we have any readable fds, dispatch event */
|
200 | a76bab49 | aliguori | if (ret > 0) { |
201 | a76bab49 | aliguori | walking_handlers = 1;
|
202 | a76bab49 | aliguori | |
203 | a76bab49 | aliguori | /* we have to walk very carefully in case
|
204 | a76bab49 | aliguori | * qemu_aio_set_fd_handler is called while we're walking */
|
205 | 72cf2d4f | Blue Swirl | node = QLIST_FIRST(&aio_handlers); |
206 | a76bab49 | aliguori | while (node) {
|
207 | a76bab49 | aliguori | AioHandler *tmp; |
208 | a76bab49 | aliguori | |
209 | a76bab49 | aliguori | if (!node->deleted &&
|
210 | a76bab49 | aliguori | FD_ISSET(node->fd, &rdfds) && |
211 | a76bab49 | aliguori | node->io_read) { |
212 | a76bab49 | aliguori | node->io_read(node->opaque); |
213 | a76bab49 | aliguori | } |
214 | a76bab49 | aliguori | if (!node->deleted &&
|
215 | a76bab49 | aliguori | FD_ISSET(node->fd, &wrfds) && |
216 | a76bab49 | aliguori | node->io_write) { |
217 | a76bab49 | aliguori | node->io_write(node->opaque); |
218 | a76bab49 | aliguori | } |
219 | a76bab49 | aliguori | |
220 | a76bab49 | aliguori | tmp = node; |
221 | 72cf2d4f | Blue Swirl | node = QLIST_NEXT(node, node); |
222 | a76bab49 | aliguori | |
223 | a76bab49 | aliguori | if (tmp->deleted) {
|
224 | 72cf2d4f | Blue Swirl | QLIST_REMOVE(tmp, node); |
225 | 7267c094 | Anthony Liguori | g_free(tmp); |
226 | a76bab49 | aliguori | } |
227 | a76bab49 | aliguori | } |
228 | a76bab49 | aliguori | |
229 | a76bab49 | aliguori | walking_handlers = 0;
|
230 | a76bab49 | aliguori | } |
231 | a76bab49 | aliguori | } while (ret == 0); |
232 | a76bab49 | aliguori | } |