root / aio.c @ 148954fa
History | View | Annotate | Download (5.8 kB)
1 | a76bab49 | aliguori | /*
|
---|---|---|---|
2 | a76bab49 | aliguori | * QEMU aio implementation
|
3 | a76bab49 | aliguori | *
|
4 | a76bab49 | aliguori | * Copyright IBM, Corp. 2008
|
5 | a76bab49 | aliguori | *
|
6 | a76bab49 | aliguori | * Authors:
|
7 | a76bab49 | aliguori | * Anthony Liguori <aliguori@us.ibm.com>
|
8 | a76bab49 | aliguori | *
|
9 | a76bab49 | aliguori | * This work is licensed under the terms of the GNU GPL, version 2. See
|
10 | a76bab49 | aliguori | * the COPYING file in the top-level directory.
|
11 | a76bab49 | aliguori | *
|
12 | a76bab49 | aliguori | */
|
13 | a76bab49 | aliguori | |
14 | a76bab49 | aliguori | #include "qemu-common.h" |
15 | a76bab49 | aliguori | #include "block.h" |
16 | 72cf2d4f | Blue Swirl | #include "qemu-queue.h" |
17 | a76bab49 | aliguori | #include "qemu_socket.h" |
18 | a76bab49 | aliguori | |
19 | a76bab49 | aliguori | typedef struct AioHandler AioHandler; |
20 | a76bab49 | aliguori | |
21 | a76bab49 | aliguori | /* The list of registered AIO handlers */
|
22 | 72cf2d4f | Blue Swirl | static QLIST_HEAD(, AioHandler) aio_handlers;
|
23 | a76bab49 | aliguori | |
24 | a76bab49 | aliguori | /* This is a simple lock used to protect the aio_handlers list. Specifically,
|
25 | a76bab49 | aliguori | * it's used to ensure that no callbacks are removed while we're walking and
|
26 | a76bab49 | aliguori | * dispatching callbacks.
|
27 | a76bab49 | aliguori | */
|
28 | a76bab49 | aliguori | static int walking_handlers; |
29 | a76bab49 | aliguori | |
30 | a76bab49 | aliguori | struct AioHandler
|
31 | a76bab49 | aliguori | { |
32 | a76bab49 | aliguori | int fd;
|
33 | a76bab49 | aliguori | IOHandler *io_read; |
34 | a76bab49 | aliguori | IOHandler *io_write; |
35 | a76bab49 | aliguori | AioFlushHandler *io_flush; |
36 | 8febfa26 | Kevin Wolf | AioProcessQueue *io_process_queue; |
37 | a76bab49 | aliguori | int deleted;
|
38 | a76bab49 | aliguori | void *opaque;
|
39 | 72cf2d4f | Blue Swirl | QLIST_ENTRY(AioHandler) node; |
40 | a76bab49 | aliguori | }; |
41 | a76bab49 | aliguori | |
42 | a76bab49 | aliguori | static AioHandler *find_aio_handler(int fd) |
43 | a76bab49 | aliguori | { |
44 | a76bab49 | aliguori | AioHandler *node; |
45 | a76bab49 | aliguori | |
46 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
47 | a76bab49 | aliguori | if (node->fd == fd)
|
48 | 79d5ca56 | Alexander Graf | if (!node->deleted)
|
49 | 79d5ca56 | Alexander Graf | return node;
|
50 | a76bab49 | aliguori | } |
51 | a76bab49 | aliguori | |
52 | a76bab49 | aliguori | return NULL; |
53 | a76bab49 | aliguori | } |
54 | a76bab49 | aliguori | |
55 | a76bab49 | aliguori | int qemu_aio_set_fd_handler(int fd, |
56 | a76bab49 | aliguori | IOHandler *io_read, |
57 | a76bab49 | aliguori | IOHandler *io_write, |
58 | a76bab49 | aliguori | AioFlushHandler *io_flush, |
59 | 8febfa26 | Kevin Wolf | AioProcessQueue *io_process_queue, |
60 | a76bab49 | aliguori | void *opaque)
|
61 | a76bab49 | aliguori | { |
62 | a76bab49 | aliguori | AioHandler *node; |
63 | a76bab49 | aliguori | |
64 | a76bab49 | aliguori | node = find_aio_handler(fd); |
65 | a76bab49 | aliguori | |
66 | a76bab49 | aliguori | /* Are we deleting the fd handler? */
|
67 | a76bab49 | aliguori | if (!io_read && !io_write) {
|
68 | a76bab49 | aliguori | if (node) {
|
69 | a76bab49 | aliguori | /* If the lock is held, just mark the node as deleted */
|
70 | a76bab49 | aliguori | if (walking_handlers)
|
71 | a76bab49 | aliguori | node->deleted = 1;
|
72 | a76bab49 | aliguori | else {
|
73 | a76bab49 | aliguori | /* Otherwise, delete it for real. We can't just mark it as
|
74 | a76bab49 | aliguori | * deleted because deleted nodes are only cleaned up after
|
75 | a76bab49 | aliguori | * releasing the walking_handlers lock.
|
76 | a76bab49 | aliguori | */
|
77 | 72cf2d4f | Blue Swirl | QLIST_REMOVE(node, node); |
78 | a76bab49 | aliguori | qemu_free(node); |
79 | a76bab49 | aliguori | } |
80 | a76bab49 | aliguori | } |
81 | a76bab49 | aliguori | } else {
|
82 | a76bab49 | aliguori | if (node == NULL) { |
83 | a76bab49 | aliguori | /* Alloc and insert if it's not already there */
|
84 | a76bab49 | aliguori | node = qemu_mallocz(sizeof(AioHandler));
|
85 | a76bab49 | aliguori | node->fd = fd; |
86 | 72cf2d4f | Blue Swirl | QLIST_INSERT_HEAD(&aio_handlers, node, node); |
87 | a76bab49 | aliguori | } |
88 | a76bab49 | aliguori | /* Update handler with latest information */
|
89 | a76bab49 | aliguori | node->io_read = io_read; |
90 | a76bab49 | aliguori | node->io_write = io_write; |
91 | a76bab49 | aliguori | node->io_flush = io_flush; |
92 | 8febfa26 | Kevin Wolf | node->io_process_queue = io_process_queue; |
93 | a76bab49 | aliguori | node->opaque = opaque; |
94 | a76bab49 | aliguori | } |
95 | a76bab49 | aliguori | |
96 | a76bab49 | aliguori | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
|
97 | a76bab49 | aliguori | |
98 | a76bab49 | aliguori | return 0; |
99 | a76bab49 | aliguori | } |
100 | a76bab49 | aliguori | |
101 | a76bab49 | aliguori | void qemu_aio_flush(void) |
102 | a76bab49 | aliguori | { |
103 | a76bab49 | aliguori | AioHandler *node; |
104 | a76bab49 | aliguori | int ret;
|
105 | a76bab49 | aliguori | |
106 | a76bab49 | aliguori | do {
|
107 | a76bab49 | aliguori | ret = 0;
|
108 | a76bab49 | aliguori | |
109 | 986c28d6 | Andrea Arcangeli | /*
|
110 | 986c28d6 | Andrea Arcangeli | * If there are pending emulated aio start them now so flush
|
111 | 986c28d6 | Andrea Arcangeli | * will be able to return 1.
|
112 | 986c28d6 | Andrea Arcangeli | */
|
113 | 986c28d6 | Andrea Arcangeli | qemu_aio_wait(); |
114 | 986c28d6 | Andrea Arcangeli | |
115 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
116 | c53a7285 | Avi Kivity | if (node->io_flush) {
|
117 | c53a7285 | Avi Kivity | ret |= node->io_flush(node->opaque); |
118 | c53a7285 | Avi Kivity | } |
119 | a76bab49 | aliguori | } |
120 | 6e5d97d0 | Nolan | } while (qemu_bh_poll() || ret > 0); |
121 | a76bab49 | aliguori | } |
122 | a76bab49 | aliguori | |
123 | 8febfa26 | Kevin Wolf | int qemu_aio_process_queue(void) |
124 | 8febfa26 | Kevin Wolf | { |
125 | 8febfa26 | Kevin Wolf | AioHandler *node; |
126 | 8febfa26 | Kevin Wolf | int ret = 0; |
127 | 8febfa26 | Kevin Wolf | |
128 | 8febfa26 | Kevin Wolf | walking_handlers = 1;
|
129 | 8febfa26 | Kevin Wolf | |
130 | 8febfa26 | Kevin Wolf | QLIST_FOREACH(node, &aio_handlers, node) { |
131 | 8febfa26 | Kevin Wolf | if (node->io_process_queue) {
|
132 | 8febfa26 | Kevin Wolf | if (node->io_process_queue(node->opaque)) {
|
133 | 8febfa26 | Kevin Wolf | ret = 1;
|
134 | 8febfa26 | Kevin Wolf | } |
135 | 8febfa26 | Kevin Wolf | } |
136 | 8febfa26 | Kevin Wolf | } |
137 | 8febfa26 | Kevin Wolf | |
138 | 8febfa26 | Kevin Wolf | walking_handlers = 0;
|
139 | 8febfa26 | Kevin Wolf | |
140 | 8febfa26 | Kevin Wolf | return ret;
|
141 | 8febfa26 | Kevin Wolf | } |
142 | 8febfa26 | Kevin Wolf | |
143 | a76bab49 | aliguori | void qemu_aio_wait(void) |
144 | a76bab49 | aliguori | { |
145 | a76bab49 | aliguori | int ret;
|
146 | a76bab49 | aliguori | |
147 | a76bab49 | aliguori | if (qemu_bh_poll())
|
148 | a76bab49 | aliguori | return;
|
149 | a76bab49 | aliguori | |
150 | 8febfa26 | Kevin Wolf | /*
|
151 | 8febfa26 | Kevin Wolf | * If there are callbacks left that have been queued, we need to call then.
|
152 | 8febfa26 | Kevin Wolf | * Return afterwards to avoid waiting needlessly in select().
|
153 | 8febfa26 | Kevin Wolf | */
|
154 | 8febfa26 | Kevin Wolf | if (qemu_aio_process_queue())
|
155 | 8febfa26 | Kevin Wolf | return;
|
156 | 8febfa26 | Kevin Wolf | |
157 | a76bab49 | aliguori | do {
|
158 | a76bab49 | aliguori | AioHandler *node; |
159 | a76bab49 | aliguori | fd_set rdfds, wrfds; |
160 | a76bab49 | aliguori | int max_fd = -1; |
161 | a76bab49 | aliguori | |
162 | a76bab49 | aliguori | walking_handlers = 1;
|
163 | a76bab49 | aliguori | |
164 | f71903d0 | aliguori | FD_ZERO(&rdfds); |
165 | f71903d0 | aliguori | FD_ZERO(&wrfds); |
166 | f71903d0 | aliguori | |
167 | a76bab49 | aliguori | /* fill fd sets */
|
168 | 72cf2d4f | Blue Swirl | QLIST_FOREACH(node, &aio_handlers, node) { |
169 | a76bab49 | aliguori | /* If there aren't pending AIO operations, don't invoke callbacks.
|
170 | a76bab49 | aliguori | * Otherwise, if there are no AIO requests, qemu_aio_wait() would
|
171 | a76bab49 | aliguori | * wait indefinitely.
|
172 | a76bab49 | aliguori | */
|
173 | a76bab49 | aliguori | if (node->io_flush && node->io_flush(node->opaque) == 0) |
174 | a76bab49 | aliguori | continue;
|
175 | a76bab49 | aliguori | |
176 | a76bab49 | aliguori | if (!node->deleted && node->io_read) {
|
177 | a76bab49 | aliguori | FD_SET(node->fd, &rdfds); |
178 | a76bab49 | aliguori | max_fd = MAX(max_fd, node->fd + 1);
|
179 | a76bab49 | aliguori | } |
180 | a76bab49 | aliguori | if (!node->deleted && node->io_write) {
|
181 | a76bab49 | aliguori | FD_SET(node->fd, &wrfds); |
182 | a76bab49 | aliguori | max_fd = MAX(max_fd, node->fd + 1);
|
183 | a76bab49 | aliguori | } |
184 | a76bab49 | aliguori | } |
185 | a76bab49 | aliguori | |
186 | a76bab49 | aliguori | walking_handlers = 0;
|
187 | a76bab49 | aliguori | |
188 | a76bab49 | aliguori | /* No AIO operations? Get us out of here */
|
189 | a76bab49 | aliguori | if (max_fd == -1) |
190 | a76bab49 | aliguori | break;
|
191 | a76bab49 | aliguori | |
192 | a76bab49 | aliguori | /* wait until next event */
|
193 | a76bab49 | aliguori | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
194 | a76bab49 | aliguori | if (ret == -1 && errno == EINTR) |
195 | a76bab49 | aliguori | continue;
|
196 | a76bab49 | aliguori | |
197 | a76bab49 | aliguori | /* if we have any readable fds, dispatch event */
|
198 | a76bab49 | aliguori | if (ret > 0) { |
199 | a76bab49 | aliguori | walking_handlers = 1;
|
200 | a76bab49 | aliguori | |
201 | a76bab49 | aliguori | /* we have to walk very carefully in case
|
202 | a76bab49 | aliguori | * qemu_aio_set_fd_handler is called while we're walking */
|
203 | 72cf2d4f | Blue Swirl | node = QLIST_FIRST(&aio_handlers); |
204 | a76bab49 | aliguori | while (node) {
|
205 | a76bab49 | aliguori | AioHandler *tmp; |
206 | a76bab49 | aliguori | |
207 | a76bab49 | aliguori | if (!node->deleted &&
|
208 | a76bab49 | aliguori | FD_ISSET(node->fd, &rdfds) && |
209 | a76bab49 | aliguori | node->io_read) { |
210 | a76bab49 | aliguori | node->io_read(node->opaque); |
211 | a76bab49 | aliguori | } |
212 | a76bab49 | aliguori | if (!node->deleted &&
|
213 | a76bab49 | aliguori | FD_ISSET(node->fd, &wrfds) && |
214 | a76bab49 | aliguori | node->io_write) { |
215 | a76bab49 | aliguori | node->io_write(node->opaque); |
216 | a76bab49 | aliguori | } |
217 | a76bab49 | aliguori | |
218 | a76bab49 | aliguori | tmp = node; |
219 | 72cf2d4f | Blue Swirl | node = QLIST_NEXT(node, node); |
220 | a76bab49 | aliguori | |
221 | a76bab49 | aliguori | if (tmp->deleted) {
|
222 | 72cf2d4f | Blue Swirl | QLIST_REMOVE(tmp, node); |
223 | a76bab49 | aliguori | qemu_free(tmp); |
224 | a76bab49 | aliguori | } |
225 | a76bab49 | aliguori | } |
226 | a76bab49 | aliguori | |
227 | a76bab49 | aliguori | walking_handlers = 0;
|
228 | a76bab49 | aliguori | } |
229 | a76bab49 | aliguori | } while (ret == 0); |
230 | a76bab49 | aliguori | } |