root / aio.c @ 1e5b9d2f
History | View | Annotate | Download (5.7 kB)
1 |
/*
|
---|---|
2 |
* QEMU aio implementation
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2008
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "qemu-common.h" |
15 |
#include "block.h" |
16 |
#include "qemu-queue.h" |
17 |
#include "qemu_socket.h" |
18 |
|
19 |
typedef struct AioHandler AioHandler; |
20 |
|
21 |
/* The list of registered AIO handlers */
|
22 |
static QLIST_HEAD(, AioHandler) aio_handlers;
|
23 |
|
24 |
/* This is a simple lock used to protect the aio_handlers list. Specifically,
|
25 |
* it's used to ensure that no callbacks are removed while we're walking and
|
26 |
* dispatching callbacks.
|
27 |
*/
|
28 |
static int walking_handlers; |
29 |
|
30 |
struct AioHandler
|
31 |
{ |
32 |
int fd;
|
33 |
IOHandler *io_read; |
34 |
IOHandler *io_write; |
35 |
AioFlushHandler *io_flush; |
36 |
AioProcessQueue *io_process_queue; |
37 |
int deleted;
|
38 |
void *opaque;
|
39 |
QLIST_ENTRY(AioHandler) node; |
40 |
}; |
41 |
|
42 |
static AioHandler *find_aio_handler(int fd) |
43 |
{ |
44 |
AioHandler *node; |
45 |
|
46 |
QLIST_FOREACH(node, &aio_handlers, node) { |
47 |
if (node->fd == fd)
|
48 |
if (!node->deleted)
|
49 |
return node;
|
50 |
} |
51 |
|
52 |
return NULL; |
53 |
} |
54 |
|
55 |
int qemu_aio_set_fd_handler(int fd, |
56 |
IOHandler *io_read, |
57 |
IOHandler *io_write, |
58 |
AioFlushHandler *io_flush, |
59 |
AioProcessQueue *io_process_queue, |
60 |
void *opaque)
|
61 |
{ |
62 |
AioHandler *node; |
63 |
|
64 |
node = find_aio_handler(fd); |
65 |
|
66 |
/* Are we deleting the fd handler? */
|
67 |
if (!io_read && !io_write) {
|
68 |
if (node) {
|
69 |
/* If the lock is held, just mark the node as deleted */
|
70 |
if (walking_handlers)
|
71 |
node->deleted = 1;
|
72 |
else {
|
73 |
/* Otherwise, delete it for real. We can't just mark it as
|
74 |
* deleted because deleted nodes are only cleaned up after
|
75 |
* releasing the walking_handlers lock.
|
76 |
*/
|
77 |
QLIST_REMOVE(node, node); |
78 |
qemu_free(node); |
79 |
} |
80 |
} |
81 |
} else {
|
82 |
if (node == NULL) { |
83 |
/* Alloc and insert if it's not already there */
|
84 |
node = qemu_mallocz(sizeof(AioHandler));
|
85 |
node->fd = fd; |
86 |
QLIST_INSERT_HEAD(&aio_handlers, node, node); |
87 |
} |
88 |
/* Update handler with latest information */
|
89 |
node->io_read = io_read; |
90 |
node->io_write = io_write; |
91 |
node->io_flush = io_flush; |
92 |
node->io_process_queue = io_process_queue; |
93 |
node->opaque = opaque; |
94 |
} |
95 |
|
96 |
qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
|
97 |
|
98 |
return 0; |
99 |
} |
100 |
|
101 |
void qemu_aio_flush(void) |
102 |
{ |
103 |
AioHandler *node; |
104 |
int ret;
|
105 |
|
106 |
do {
|
107 |
ret = 0;
|
108 |
|
109 |
/*
|
110 |
* If there are pending emulated aio start them now so flush
|
111 |
* will be able to return 1.
|
112 |
*/
|
113 |
qemu_aio_wait(); |
114 |
|
115 |
QLIST_FOREACH(node, &aio_handlers, node) { |
116 |
ret |= node->io_flush(node->opaque); |
117 |
} |
118 |
} while (qemu_bh_poll() || ret > 0); |
119 |
} |
120 |
|
121 |
int qemu_aio_process_queue(void) |
122 |
{ |
123 |
AioHandler *node; |
124 |
int ret = 0; |
125 |
|
126 |
walking_handlers = 1;
|
127 |
|
128 |
QLIST_FOREACH(node, &aio_handlers, node) { |
129 |
if (node->io_process_queue) {
|
130 |
if (node->io_process_queue(node->opaque)) {
|
131 |
ret = 1;
|
132 |
} |
133 |
} |
134 |
} |
135 |
|
136 |
walking_handlers = 0;
|
137 |
|
138 |
return ret;
|
139 |
} |
140 |
|
141 |
void qemu_aio_wait(void) |
142 |
{ |
143 |
int ret;
|
144 |
|
145 |
if (qemu_bh_poll())
|
146 |
return;
|
147 |
|
148 |
/*
|
149 |
* If there are callbacks left that have been queued, we need to call then.
|
150 |
* Return afterwards to avoid waiting needlessly in select().
|
151 |
*/
|
152 |
if (qemu_aio_process_queue())
|
153 |
return;
|
154 |
|
155 |
do {
|
156 |
AioHandler *node; |
157 |
fd_set rdfds, wrfds; |
158 |
int max_fd = -1; |
159 |
|
160 |
walking_handlers = 1;
|
161 |
|
162 |
FD_ZERO(&rdfds); |
163 |
FD_ZERO(&wrfds); |
164 |
|
165 |
/* fill fd sets */
|
166 |
QLIST_FOREACH(node, &aio_handlers, node) { |
167 |
/* If there aren't pending AIO operations, don't invoke callbacks.
|
168 |
* Otherwise, if there are no AIO requests, qemu_aio_wait() would
|
169 |
* wait indefinitely.
|
170 |
*/
|
171 |
if (node->io_flush && node->io_flush(node->opaque) == 0) |
172 |
continue;
|
173 |
|
174 |
if (!node->deleted && node->io_read) {
|
175 |
FD_SET(node->fd, &rdfds); |
176 |
max_fd = MAX(max_fd, node->fd + 1);
|
177 |
} |
178 |
if (!node->deleted && node->io_write) {
|
179 |
FD_SET(node->fd, &wrfds); |
180 |
max_fd = MAX(max_fd, node->fd + 1);
|
181 |
} |
182 |
} |
183 |
|
184 |
walking_handlers = 0;
|
185 |
|
186 |
/* No AIO operations? Get us out of here */
|
187 |
if (max_fd == -1) |
188 |
break;
|
189 |
|
190 |
/* wait until next event */
|
191 |
ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
192 |
if (ret == -1 && errno == EINTR) |
193 |
continue;
|
194 |
|
195 |
/* if we have any readable fds, dispatch event */
|
196 |
if (ret > 0) { |
197 |
walking_handlers = 1;
|
198 |
|
199 |
/* we have to walk very carefully in case
|
200 |
* qemu_aio_set_fd_handler is called while we're walking */
|
201 |
node = QLIST_FIRST(&aio_handlers); |
202 |
while (node) {
|
203 |
AioHandler *tmp; |
204 |
|
205 |
if (!node->deleted &&
|
206 |
FD_ISSET(node->fd, &rdfds) && |
207 |
node->io_read) { |
208 |
node->io_read(node->opaque); |
209 |
} |
210 |
if (!node->deleted &&
|
211 |
FD_ISSET(node->fd, &wrfds) && |
212 |
node->io_write) { |
213 |
node->io_write(node->opaque); |
214 |
} |
215 |
|
216 |
tmp = node; |
217 |
node = QLIST_NEXT(node, node); |
218 |
|
219 |
if (tmp->deleted) {
|
220 |
QLIST_REMOVE(tmp, node); |
221 |
qemu_free(tmp); |
222 |
} |
223 |
} |
224 |
|
225 |
walking_handlers = 0;
|
226 |
} |
227 |
} while (ret == 0); |
228 |
} |