Statistics
| Branch: | Revision:

root / aio-posix.c @ 737e150e

History | View | Annotate | Download (7.3 kB)

1 a76bab49 aliguori
/*
2 a76bab49 aliguori
 * QEMU aio implementation
3 a76bab49 aliguori
 *
4 a76bab49 aliguori
 * Copyright IBM, Corp. 2008
5 a76bab49 aliguori
 *
6 a76bab49 aliguori
 * Authors:
7 a76bab49 aliguori
 *  Anthony Liguori   <aliguori@us.ibm.com>
8 a76bab49 aliguori
 *
9 a76bab49 aliguori
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10 a76bab49 aliguori
 * the COPYING file in the top-level directory.
11 a76bab49 aliguori
 *
12 6b620ca3 Paolo Bonzini
 * Contributions after 2012-01-13 are licensed under the terms of the
13 6b620ca3 Paolo Bonzini
 * GNU GPL, version 2 or (at your option) any later version.
14 a76bab49 aliguori
 */
15 a76bab49 aliguori
16 a76bab49 aliguori
#include "qemu-common.h"
17 737e150e Paolo Bonzini
#include "block/block.h"
18 72cf2d4f Blue Swirl
#include "qemu-queue.h"
19 a76bab49 aliguori
#include "qemu_socket.h"
20 a76bab49 aliguori
21 a76bab49 aliguori
struct AioHandler
22 a76bab49 aliguori
{
23 cd9ba1eb Paolo Bonzini
    GPollFD pfd;
24 a76bab49 aliguori
    IOHandler *io_read;
25 a76bab49 aliguori
    IOHandler *io_write;
26 a76bab49 aliguori
    AioFlushHandler *io_flush;
27 a76bab49 aliguori
    int deleted;
28 a76bab49 aliguori
    void *opaque;
29 72cf2d4f Blue Swirl
    QLIST_ENTRY(AioHandler) node;
30 a76bab49 aliguori
};
31 a76bab49 aliguori
32 a915f4bc Paolo Bonzini
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
33 a76bab49 aliguori
{
34 a76bab49 aliguori
    AioHandler *node;
35 a76bab49 aliguori
36 a915f4bc Paolo Bonzini
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
37 cd9ba1eb Paolo Bonzini
        if (node->pfd.fd == fd)
38 79d5ca56 Alexander Graf
            if (!node->deleted)
39 79d5ca56 Alexander Graf
                return node;
40 a76bab49 aliguori
    }
41 a76bab49 aliguori
42 a76bab49 aliguori
    return NULL;
43 a76bab49 aliguori
}
44 a76bab49 aliguori
45 a915f4bc Paolo Bonzini
void aio_set_fd_handler(AioContext *ctx,
46 a915f4bc Paolo Bonzini
                        int fd,
47 a915f4bc Paolo Bonzini
                        IOHandler *io_read,
48 a915f4bc Paolo Bonzini
                        IOHandler *io_write,
49 a915f4bc Paolo Bonzini
                        AioFlushHandler *io_flush,
50 a915f4bc Paolo Bonzini
                        void *opaque)
51 a76bab49 aliguori
{
52 a76bab49 aliguori
    AioHandler *node;
53 a76bab49 aliguori
54 a915f4bc Paolo Bonzini
    node = find_aio_handler(ctx, fd);
55 a76bab49 aliguori
56 a76bab49 aliguori
    /* Are we deleting the fd handler? */
57 a76bab49 aliguori
    if (!io_read && !io_write) {
58 a76bab49 aliguori
        if (node) {
59 e3713e00 Paolo Bonzini
            g_source_remove_poll(&ctx->source, &node->pfd);
60 e3713e00 Paolo Bonzini
61 a76bab49 aliguori
            /* If the lock is held, just mark the node as deleted */
62 cd9ba1eb Paolo Bonzini
            if (ctx->walking_handlers) {
63 a76bab49 aliguori
                node->deleted = 1;
64 cd9ba1eb Paolo Bonzini
                node->pfd.revents = 0;
65 cd9ba1eb Paolo Bonzini
            } else {
66 a76bab49 aliguori
                /* Otherwise, delete it for real.  We can't just mark it as
67 a76bab49 aliguori
                 * deleted because deleted nodes are only cleaned up after
68 a76bab49 aliguori
                 * releasing the walking_handlers lock.
69 a76bab49 aliguori
                 */
70 72cf2d4f Blue Swirl
                QLIST_REMOVE(node, node);
71 7267c094 Anthony Liguori
                g_free(node);
72 a76bab49 aliguori
            }
73 a76bab49 aliguori
        }
74 a76bab49 aliguori
    } else {
75 a76bab49 aliguori
        if (node == NULL) {
76 a76bab49 aliguori
            /* Alloc and insert if it's not already there */
77 7267c094 Anthony Liguori
            node = g_malloc0(sizeof(AioHandler));
78 cd9ba1eb Paolo Bonzini
            node->pfd.fd = fd;
79 a915f4bc Paolo Bonzini
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
80 e3713e00 Paolo Bonzini
81 e3713e00 Paolo Bonzini
            g_source_add_poll(&ctx->source, &node->pfd);
82 a76bab49 aliguori
        }
83 a76bab49 aliguori
        /* Update handler with latest information */
84 a76bab49 aliguori
        node->io_read = io_read;
85 a76bab49 aliguori
        node->io_write = io_write;
86 a76bab49 aliguori
        node->io_flush = io_flush;
87 a76bab49 aliguori
        node->opaque = opaque;
88 cd9ba1eb Paolo Bonzini
89 cd9ba1eb Paolo Bonzini
        node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
90 cd9ba1eb Paolo Bonzini
        node->pfd.events |= (io_write ? G_IO_OUT : 0);
91 a76bab49 aliguori
    }
92 7ed2b24c Paolo Bonzini
93 7ed2b24c Paolo Bonzini
    aio_notify(ctx);
94 9958c351 Paolo Bonzini
}
95 9958c351 Paolo Bonzini
96 a915f4bc Paolo Bonzini
void aio_set_event_notifier(AioContext *ctx,
97 a915f4bc Paolo Bonzini
                            EventNotifier *notifier,
98 a915f4bc Paolo Bonzini
                            EventNotifierHandler *io_read,
99 a915f4bc Paolo Bonzini
                            AioFlushEventNotifierHandler *io_flush)
100 a76bab49 aliguori
{
101 a915f4bc Paolo Bonzini
    aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
102 a915f4bc Paolo Bonzini
                       (IOHandler *)io_read, NULL,
103 a915f4bc Paolo Bonzini
                       (AioFlushHandler *)io_flush, notifier);
104 a76bab49 aliguori
}
105 a76bab49 aliguori
106 cd9ba1eb Paolo Bonzini
bool aio_pending(AioContext *ctx)
107 cd9ba1eb Paolo Bonzini
{
108 cd9ba1eb Paolo Bonzini
    AioHandler *node;
109 cd9ba1eb Paolo Bonzini
110 cd9ba1eb Paolo Bonzini
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
111 cd9ba1eb Paolo Bonzini
        int revents;
112 cd9ba1eb Paolo Bonzini
113 cd9ba1eb Paolo Bonzini
        /*
114 cd9ba1eb Paolo Bonzini
         * FIXME: right now we cannot get G_IO_HUP and G_IO_ERR because
115 cd9ba1eb Paolo Bonzini
         * main-loop.c is still select based (due to the slirp legacy).
116 cd9ba1eb Paolo Bonzini
         * If main-loop.c ever switches to poll, G_IO_ERR should be
117 cd9ba1eb Paolo Bonzini
         * tested too.  Dispatching G_IO_ERR to both handlers should be
118 cd9ba1eb Paolo Bonzini
         * okay, since handlers need to be ready for spurious wakeups.
119 cd9ba1eb Paolo Bonzini
         */
120 cd9ba1eb Paolo Bonzini
        revents = node->pfd.revents & node->pfd.events;
121 cd9ba1eb Paolo Bonzini
        if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
122 cd9ba1eb Paolo Bonzini
            return true;
123 cd9ba1eb Paolo Bonzini
        }
124 cd9ba1eb Paolo Bonzini
        if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
125 cd9ba1eb Paolo Bonzini
            return true;
126 cd9ba1eb Paolo Bonzini
        }
127 cd9ba1eb Paolo Bonzini
    }
128 cd9ba1eb Paolo Bonzini
129 cd9ba1eb Paolo Bonzini
    return false;
130 cd9ba1eb Paolo Bonzini
}
131 cd9ba1eb Paolo Bonzini
132 7c0628b2 Paolo Bonzini
bool aio_poll(AioContext *ctx, bool blocking)
133 a76bab49 aliguori
{
134 7c0628b2 Paolo Bonzini
    static struct timeval tv0;
135 9eb0bfca Paolo Bonzini
    AioHandler *node;
136 9eb0bfca Paolo Bonzini
    fd_set rdfds, wrfds;
137 9eb0bfca Paolo Bonzini
    int max_fd = -1;
138 a76bab49 aliguori
    int ret;
139 7c0628b2 Paolo Bonzini
    bool busy, progress;
140 7c0628b2 Paolo Bonzini
141 7c0628b2 Paolo Bonzini
    progress = false;
142 a76bab49 aliguori
143 8febfa26 Kevin Wolf
    /*
144 8febfa26 Kevin Wolf
     * If there are callbacks left that have been queued, we need to call then.
145 bcdc1857 Paolo Bonzini
     * Do not call select in this case, because it is possible that the caller
146 bcdc1857 Paolo Bonzini
     * does not need a complete flush (as is the case for qemu_aio_wait loops).
147 8febfa26 Kevin Wolf
     */
148 a915f4bc Paolo Bonzini
    if (aio_bh_poll(ctx)) {
149 7c0628b2 Paolo Bonzini
        blocking = false;
150 7c0628b2 Paolo Bonzini
        progress = true;
151 7c0628b2 Paolo Bonzini
    }
152 7c0628b2 Paolo Bonzini
153 cd9ba1eb Paolo Bonzini
    /*
154 cd9ba1eb Paolo Bonzini
     * Then dispatch any pending callbacks from the GSource.
155 cd9ba1eb Paolo Bonzini
     *
156 cd9ba1eb Paolo Bonzini
     * We have to walk very carefully in case qemu_aio_set_fd_handler is
157 cd9ba1eb Paolo Bonzini
     * called while we're walking.
158 cd9ba1eb Paolo Bonzini
     */
159 cd9ba1eb Paolo Bonzini
    node = QLIST_FIRST(&ctx->aio_handlers);
160 cd9ba1eb Paolo Bonzini
    while (node) {
161 cd9ba1eb Paolo Bonzini
        AioHandler *tmp;
162 cd9ba1eb Paolo Bonzini
        int revents;
163 cd9ba1eb Paolo Bonzini
164 cd9ba1eb Paolo Bonzini
        ctx->walking_handlers++;
165 cd9ba1eb Paolo Bonzini
166 cd9ba1eb Paolo Bonzini
        revents = node->pfd.revents & node->pfd.events;
167 cd9ba1eb Paolo Bonzini
        node->pfd.revents = 0;
168 cd9ba1eb Paolo Bonzini
169 cd9ba1eb Paolo Bonzini
        /* See comment in aio_pending.  */
170 cd9ba1eb Paolo Bonzini
        if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
171 cd9ba1eb Paolo Bonzini
            node->io_read(node->opaque);
172 cd9ba1eb Paolo Bonzini
            progress = true;
173 cd9ba1eb Paolo Bonzini
        }
174 cd9ba1eb Paolo Bonzini
        if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
175 cd9ba1eb Paolo Bonzini
            node->io_write(node->opaque);
176 cd9ba1eb Paolo Bonzini
            progress = true;
177 cd9ba1eb Paolo Bonzini
        }
178 cd9ba1eb Paolo Bonzini
179 cd9ba1eb Paolo Bonzini
        tmp = node;
180 cd9ba1eb Paolo Bonzini
        node = QLIST_NEXT(node, node);
181 cd9ba1eb Paolo Bonzini
182 cd9ba1eb Paolo Bonzini
        ctx->walking_handlers--;
183 cd9ba1eb Paolo Bonzini
184 cd9ba1eb Paolo Bonzini
        if (!ctx->walking_handlers && tmp->deleted) {
185 cd9ba1eb Paolo Bonzini
            QLIST_REMOVE(tmp, node);
186 cd9ba1eb Paolo Bonzini
            g_free(tmp);
187 cd9ba1eb Paolo Bonzini
        }
188 cd9ba1eb Paolo Bonzini
    }
189 cd9ba1eb Paolo Bonzini
190 7c0628b2 Paolo Bonzini
    if (progress && !blocking) {
191 bcdc1857 Paolo Bonzini
        return true;
192 bafbd6a1 Paolo Bonzini
    }
193 8febfa26 Kevin Wolf
194 a915f4bc Paolo Bonzini
    ctx->walking_handlers++;
195 a76bab49 aliguori
196 9eb0bfca Paolo Bonzini
    FD_ZERO(&rdfds);
197 9eb0bfca Paolo Bonzini
    FD_ZERO(&wrfds);
198 a76bab49 aliguori
199 9eb0bfca Paolo Bonzini
    /* fill fd sets */
200 9eb0bfca Paolo Bonzini
    busy = false;
201 a915f4bc Paolo Bonzini
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
202 9eb0bfca Paolo Bonzini
        /* If there aren't pending AIO operations, don't invoke callbacks.
203 9eb0bfca Paolo Bonzini
         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
204 9eb0bfca Paolo Bonzini
         * wait indefinitely.
205 9eb0bfca Paolo Bonzini
         */
206 4231c88d Paolo Bonzini
        if (!node->deleted && node->io_flush) {
207 9eb0bfca Paolo Bonzini
            if (node->io_flush(node->opaque) == 0) {
208 9eb0bfca Paolo Bonzini
                continue;
209 a76bab49 aliguori
            }
210 9eb0bfca Paolo Bonzini
            busy = true;
211 9eb0bfca Paolo Bonzini
        }
212 9eb0bfca Paolo Bonzini
        if (!node->deleted && node->io_read) {
213 cd9ba1eb Paolo Bonzini
            FD_SET(node->pfd.fd, &rdfds);
214 cd9ba1eb Paolo Bonzini
            max_fd = MAX(max_fd, node->pfd.fd + 1);
215 a76bab49 aliguori
        }
216 9eb0bfca Paolo Bonzini
        if (!node->deleted && node->io_write) {
217 cd9ba1eb Paolo Bonzini
            FD_SET(node->pfd.fd, &wrfds);
218 cd9ba1eb Paolo Bonzini
            max_fd = MAX(max_fd, node->pfd.fd + 1);
219 9eb0bfca Paolo Bonzini
        }
220 9eb0bfca Paolo Bonzini
    }
221 a76bab49 aliguori
222 a915f4bc Paolo Bonzini
    ctx->walking_handlers--;
223 a76bab49 aliguori
224 9eb0bfca Paolo Bonzini
    /* No AIO operations?  Get us out of here */
225 9eb0bfca Paolo Bonzini
    if (!busy) {
226 7c0628b2 Paolo Bonzini
        return progress;
227 9eb0bfca Paolo Bonzini
    }
228 a76bab49 aliguori
229 9eb0bfca Paolo Bonzini
    /* wait until next event */
230 7c0628b2 Paolo Bonzini
    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
231 9eb0bfca Paolo Bonzini
232 9eb0bfca Paolo Bonzini
    /* if we have any readable fds, dispatch event */
233 9eb0bfca Paolo Bonzini
    if (ret > 0) {
234 9eb0bfca Paolo Bonzini
        /* we have to walk very carefully in case
235 9eb0bfca Paolo Bonzini
         * qemu_aio_set_fd_handler is called while we're walking */
236 a915f4bc Paolo Bonzini
        node = QLIST_FIRST(&ctx->aio_handlers);
237 9eb0bfca Paolo Bonzini
        while (node) {
238 9eb0bfca Paolo Bonzini
            AioHandler *tmp;
239 9eb0bfca Paolo Bonzini
240 a915f4bc Paolo Bonzini
            ctx->walking_handlers++;
241 2db2bfc0 Paolo Bonzini
242 9eb0bfca Paolo Bonzini
            if (!node->deleted &&
243 cd9ba1eb Paolo Bonzini
                FD_ISSET(node->pfd.fd, &rdfds) &&
244 9eb0bfca Paolo Bonzini
                node->io_read) {
245 9eb0bfca Paolo Bonzini
                node->io_read(node->opaque);
246 cd9ba1eb Paolo Bonzini
                progress = true;
247 9eb0bfca Paolo Bonzini
            }
248 9eb0bfca Paolo Bonzini
            if (!node->deleted &&
249 cd9ba1eb Paolo Bonzini
                FD_ISSET(node->pfd.fd, &wrfds) &&
250 9eb0bfca Paolo Bonzini
                node->io_write) {
251 9eb0bfca Paolo Bonzini
                node->io_write(node->opaque);
252 cd9ba1eb Paolo Bonzini
                progress = true;
253 a76bab49 aliguori
            }
254 a76bab49 aliguori
255 9eb0bfca Paolo Bonzini
            tmp = node;
256 9eb0bfca Paolo Bonzini
            node = QLIST_NEXT(node, node);
257 9eb0bfca Paolo Bonzini
258 a915f4bc Paolo Bonzini
            ctx->walking_handlers--;
259 2db2bfc0 Paolo Bonzini
260 a915f4bc Paolo Bonzini
            if (!ctx->walking_handlers && tmp->deleted) {
261 9eb0bfca Paolo Bonzini
                QLIST_REMOVE(tmp, node);
262 9eb0bfca Paolo Bonzini
                g_free(tmp);
263 9eb0bfca Paolo Bonzini
            }
264 a76bab49 aliguori
        }
265 9eb0bfca Paolo Bonzini
    }
266 bcdc1857 Paolo Bonzini
267 7c0628b2 Paolo Bonzini
    return progress;
268 a76bab49 aliguori
}