Statistics
| Branch: | Revision:

root / linux-aio.c @ a74cdab4

History | View | Annotate | Download (6.6 kB)

1 5c6c3a6c Christoph Hellwig
/*
2 5c6c3a6c Christoph Hellwig
 * Linux native AIO support.
3 5c6c3a6c Christoph Hellwig
 *
4 5c6c3a6c Christoph Hellwig
 * Copyright (C) 2009 IBM, Corp.
5 5c6c3a6c Christoph Hellwig
 * Copyright (C) 2009 Red Hat, Inc.
6 5c6c3a6c Christoph Hellwig
 *
7 5c6c3a6c Christoph Hellwig
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 5c6c3a6c Christoph Hellwig
 * See the COPYING file in the top-level directory.
9 5c6c3a6c Christoph Hellwig
 */
10 5c6c3a6c Christoph Hellwig
#include "qemu-common.h"
11 5c6c3a6c Christoph Hellwig
#include "qemu-aio.h"
12 5c6c3a6c Christoph Hellwig
#include "block_int.h"
13 5c6c3a6c Christoph Hellwig
#include "block/raw-posix-aio.h"
14 5c6c3a6c Christoph Hellwig
15 5c6c3a6c Christoph Hellwig
#include <sys/eventfd.h>
16 5c6c3a6c Christoph Hellwig
#include <libaio.h>
17 5c6c3a6c Christoph Hellwig
18 5c6c3a6c Christoph Hellwig
/*
19 5c6c3a6c Christoph Hellwig
 * Queue size (per-device).
20 5c6c3a6c Christoph Hellwig
 *
21 5c6c3a6c Christoph Hellwig
 * XXX: eventually we need to communicate this to the guest and/or make it
22 5c6c3a6c Christoph Hellwig
 *      tunable by the guest.  If we get more outstanding requests at a time
23 5c6c3a6c Christoph Hellwig
 *      than this we will get EAGAIN from io_submit which is communicated to
24 5c6c3a6c Christoph Hellwig
 *      the guest as an I/O error.
25 5c6c3a6c Christoph Hellwig
 */
26 5c6c3a6c Christoph Hellwig
#define MAX_EVENTS 128
27 5c6c3a6c Christoph Hellwig
28 5c6c3a6c Christoph Hellwig
struct qemu_laiocb {
29 5c6c3a6c Christoph Hellwig
    BlockDriverAIOCB common;
30 5c6c3a6c Christoph Hellwig
    struct qemu_laio_state *ctx;
31 5c6c3a6c Christoph Hellwig
    struct iocb iocb;
32 5c6c3a6c Christoph Hellwig
    ssize_t ret;
33 5c6c3a6c Christoph Hellwig
    size_t nbytes;
34 db0ffc24 Kevin Wolf
    int async_context_id;
35 db0ffc24 Kevin Wolf
    QLIST_ENTRY(qemu_laiocb) node;
36 5c6c3a6c Christoph Hellwig
};
37 5c6c3a6c Christoph Hellwig
38 5c6c3a6c Christoph Hellwig
struct qemu_laio_state {
39 5c6c3a6c Christoph Hellwig
    io_context_t ctx;
40 5c6c3a6c Christoph Hellwig
    int efd;
41 5c6c3a6c Christoph Hellwig
    int count;
42 db0ffc24 Kevin Wolf
    QLIST_HEAD(, qemu_laiocb) completed_reqs;
43 5c6c3a6c Christoph Hellwig
};
44 5c6c3a6c Christoph Hellwig
45 5c6c3a6c Christoph Hellwig
static inline ssize_t io_event_ret(struct io_event *ev)
46 5c6c3a6c Christoph Hellwig
{
47 5c6c3a6c Christoph Hellwig
    return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
48 5c6c3a6c Christoph Hellwig
}
49 5c6c3a6c Christoph Hellwig
50 db0ffc24 Kevin Wolf
/*
51 db0ffc24 Kevin Wolf
 * Completes an AIO request (calls the callback and frees the ACB).
52 db0ffc24 Kevin Wolf
 * Be sure to be in the right AsyncContext before calling this function.
53 db0ffc24 Kevin Wolf
 */
54 db0ffc24 Kevin Wolf
static void qemu_laio_process_completion(struct qemu_laio_state *s,
55 db0ffc24 Kevin Wolf
    struct qemu_laiocb *laiocb)
56 db0ffc24 Kevin Wolf
{
57 db0ffc24 Kevin Wolf
    int ret;
58 db0ffc24 Kevin Wolf
59 db0ffc24 Kevin Wolf
    s->count--;
60 db0ffc24 Kevin Wolf
61 db0ffc24 Kevin Wolf
    ret = laiocb->ret;
62 db0ffc24 Kevin Wolf
    if (ret != -ECANCELED) {
63 db0ffc24 Kevin Wolf
        if (ret == laiocb->nbytes)
64 db0ffc24 Kevin Wolf
            ret = 0;
65 db0ffc24 Kevin Wolf
        else if (ret >= 0)
66 db0ffc24 Kevin Wolf
            ret = -EINVAL;
67 db0ffc24 Kevin Wolf
68 db0ffc24 Kevin Wolf
        laiocb->common.cb(laiocb->common.opaque, ret);
69 db0ffc24 Kevin Wolf
    }
70 db0ffc24 Kevin Wolf
71 db0ffc24 Kevin Wolf
    qemu_aio_release(laiocb);
72 db0ffc24 Kevin Wolf
}
73 db0ffc24 Kevin Wolf
74 db0ffc24 Kevin Wolf
/*
75 db0ffc24 Kevin Wolf
 * Processes all queued AIO requests, i.e. requests that have return from OS
76 db0ffc24 Kevin Wolf
 * but their callback was not called yet. Requests that cannot have their
77 db0ffc24 Kevin Wolf
 * callback called in the current AsyncContext, remain in the queue.
78 db0ffc24 Kevin Wolf
 *
79 db0ffc24 Kevin Wolf
 * Returns 1 if at least one request could be completed, 0 otherwise.
80 db0ffc24 Kevin Wolf
 */
81 db0ffc24 Kevin Wolf
static int qemu_laio_process_requests(void *opaque)
82 db0ffc24 Kevin Wolf
{
83 db0ffc24 Kevin Wolf
    struct qemu_laio_state *s = opaque;
84 db0ffc24 Kevin Wolf
    struct qemu_laiocb *laiocb, *next;
85 db0ffc24 Kevin Wolf
    int res = 0;
86 db0ffc24 Kevin Wolf
87 db0ffc24 Kevin Wolf
    QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) {
88 db0ffc24 Kevin Wolf
        if (laiocb->async_context_id == get_async_context_id()) {
89 db0ffc24 Kevin Wolf
            qemu_laio_process_completion(s, laiocb);
90 db0ffc24 Kevin Wolf
            QLIST_REMOVE(laiocb, node);
91 db0ffc24 Kevin Wolf
            res = 1;
92 db0ffc24 Kevin Wolf
        }
93 db0ffc24 Kevin Wolf
    }
94 db0ffc24 Kevin Wolf
95 db0ffc24 Kevin Wolf
    return res;
96 db0ffc24 Kevin Wolf
}
97 db0ffc24 Kevin Wolf
98 db0ffc24 Kevin Wolf
/*
99 db0ffc24 Kevin Wolf
 * Puts a request in the completion queue so that its callback is called the
100 db0ffc24 Kevin Wolf
 * next time when it's possible. If we already are in the right AsyncContext,
101 db0ffc24 Kevin Wolf
 * the request is completed immediately instead.
102 db0ffc24 Kevin Wolf
 */
103 db0ffc24 Kevin Wolf
static void qemu_laio_enqueue_completed(struct qemu_laio_state *s,
104 db0ffc24 Kevin Wolf
    struct qemu_laiocb* laiocb)
105 db0ffc24 Kevin Wolf
{
106 db0ffc24 Kevin Wolf
    if (laiocb->async_context_id == get_async_context_id()) {
107 db0ffc24 Kevin Wolf
        qemu_laio_process_completion(s, laiocb);
108 db0ffc24 Kevin Wolf
    } else {
109 db0ffc24 Kevin Wolf
        QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node);
110 db0ffc24 Kevin Wolf
    }
111 db0ffc24 Kevin Wolf
}
112 db0ffc24 Kevin Wolf
113 5c6c3a6c Christoph Hellwig
static void qemu_laio_completion_cb(void *opaque)
114 5c6c3a6c Christoph Hellwig
{
115 5c6c3a6c Christoph Hellwig
    struct qemu_laio_state *s = opaque;
116 5c6c3a6c Christoph Hellwig
117 5c6c3a6c Christoph Hellwig
    while (1) {
118 5c6c3a6c Christoph Hellwig
        struct io_event events[MAX_EVENTS];
119 5c6c3a6c Christoph Hellwig
        uint64_t val;
120 5c6c3a6c Christoph Hellwig
        ssize_t ret;
121 5c6c3a6c Christoph Hellwig
        struct timespec ts = { 0 };
122 5c6c3a6c Christoph Hellwig
        int nevents, i;
123 5c6c3a6c Christoph Hellwig
124 5c6c3a6c Christoph Hellwig
        do {
125 5c6c3a6c Christoph Hellwig
            ret = read(s->efd, &val, sizeof(val));
126 2be50649 Stefan Hajnoczi
        } while (ret == -1 && errno == EINTR);
127 5c6c3a6c Christoph Hellwig
128 5c6c3a6c Christoph Hellwig
        if (ret == -1 && errno == EAGAIN)
129 5c6c3a6c Christoph Hellwig
            break;
130 5c6c3a6c Christoph Hellwig
131 5c6c3a6c Christoph Hellwig
        if (ret != 8)
132 5c6c3a6c Christoph Hellwig
            break;
133 5c6c3a6c Christoph Hellwig
134 5c6c3a6c Christoph Hellwig
        do {
135 5c6c3a6c Christoph Hellwig
            nevents = io_getevents(s->ctx, val, MAX_EVENTS, events, &ts);
136 5c6c3a6c Christoph Hellwig
        } while (nevents == -EINTR);
137 5c6c3a6c Christoph Hellwig
138 5c6c3a6c Christoph Hellwig
        for (i = 0; i < nevents; i++) {
139 5c6c3a6c Christoph Hellwig
            struct iocb *iocb = events[i].obj;
140 5c6c3a6c Christoph Hellwig
            struct qemu_laiocb *laiocb =
141 5c6c3a6c Christoph Hellwig
                    container_of(iocb, struct qemu_laiocb, iocb);
142 5c6c3a6c Christoph Hellwig
143 db0ffc24 Kevin Wolf
            laiocb->ret = io_event_ret(&events[i]);
144 db0ffc24 Kevin Wolf
            qemu_laio_enqueue_completed(s, laiocb);
145 5c6c3a6c Christoph Hellwig
        }
146 5c6c3a6c Christoph Hellwig
    }
147 5c6c3a6c Christoph Hellwig
}
148 5c6c3a6c Christoph Hellwig
149 5c6c3a6c Christoph Hellwig
static int qemu_laio_flush_cb(void *opaque)
150 5c6c3a6c Christoph Hellwig
{
151 5c6c3a6c Christoph Hellwig
    struct qemu_laio_state *s = opaque;
152 5c6c3a6c Christoph Hellwig
153 5c6c3a6c Christoph Hellwig
    return (s->count > 0) ? 1 : 0;
154 5c6c3a6c Christoph Hellwig
}
155 5c6c3a6c Christoph Hellwig
156 5c6c3a6c Christoph Hellwig
static void laio_cancel(BlockDriverAIOCB *blockacb)
157 5c6c3a6c Christoph Hellwig
{
158 5c6c3a6c Christoph Hellwig
    struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
159 5c6c3a6c Christoph Hellwig
    struct io_event event;
160 5c6c3a6c Christoph Hellwig
    int ret;
161 5c6c3a6c Christoph Hellwig
162 5c6c3a6c Christoph Hellwig
    if (laiocb->ret != -EINPROGRESS)
163 5c6c3a6c Christoph Hellwig
        return;
164 5c6c3a6c Christoph Hellwig
165 5c6c3a6c Christoph Hellwig
    /*
166 5c6c3a6c Christoph Hellwig
     * Note that as of Linux 2.6.31 neither the block device code nor any
167 5c6c3a6c Christoph Hellwig
     * filesystem implements cancellation of AIO request.
168 5c6c3a6c Christoph Hellwig
     * Thus the polling loop below is the normal code path.
169 5c6c3a6c Christoph Hellwig
     */
170 5c6c3a6c Christoph Hellwig
    ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
171 5c6c3a6c Christoph Hellwig
    if (ret == 0) {
172 5c6c3a6c Christoph Hellwig
        laiocb->ret = -ECANCELED;
173 5c6c3a6c Christoph Hellwig
        return;
174 5c6c3a6c Christoph Hellwig
    }
175 5c6c3a6c Christoph Hellwig
176 5c6c3a6c Christoph Hellwig
    /*
177 5c6c3a6c Christoph Hellwig
     * We have to wait for the iocb to finish.
178 5c6c3a6c Christoph Hellwig
     *
179 5c6c3a6c Christoph Hellwig
     * The only way to get the iocb status update is by polling the io context.
180 5c6c3a6c Christoph Hellwig
     * We might be able to do this slightly more optimal by removing the
181 5c6c3a6c Christoph Hellwig
     * O_NONBLOCK flag.
182 5c6c3a6c Christoph Hellwig
     */
183 5c6c3a6c Christoph Hellwig
    while (laiocb->ret == -EINPROGRESS)
184 5c6c3a6c Christoph Hellwig
        qemu_laio_completion_cb(laiocb->ctx);
185 5c6c3a6c Christoph Hellwig
}
186 5c6c3a6c Christoph Hellwig
187 5c6c3a6c Christoph Hellwig
static AIOPool laio_pool = {
188 5c6c3a6c Christoph Hellwig
    .aiocb_size         = sizeof(struct qemu_laiocb),
189 5c6c3a6c Christoph Hellwig
    .cancel             = laio_cancel,
190 5c6c3a6c Christoph Hellwig
};
191 5c6c3a6c Christoph Hellwig
192 5c6c3a6c Christoph Hellwig
BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
193 5c6c3a6c Christoph Hellwig
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
194 5c6c3a6c Christoph Hellwig
        BlockDriverCompletionFunc *cb, void *opaque, int type)
195 5c6c3a6c Christoph Hellwig
{
196 5c6c3a6c Christoph Hellwig
    struct qemu_laio_state *s = aio_ctx;
197 5c6c3a6c Christoph Hellwig
    struct qemu_laiocb *laiocb;
198 5c6c3a6c Christoph Hellwig
    struct iocb *iocbs;
199 5c6c3a6c Christoph Hellwig
    off_t offset = sector_num * 512;
200 5c6c3a6c Christoph Hellwig
201 5c6c3a6c Christoph Hellwig
    laiocb = qemu_aio_get(&laio_pool, bs, cb, opaque);
202 5c6c3a6c Christoph Hellwig
    if (!laiocb)
203 5c6c3a6c Christoph Hellwig
        return NULL;
204 5c6c3a6c Christoph Hellwig
    laiocb->nbytes = nb_sectors * 512;
205 5c6c3a6c Christoph Hellwig
    laiocb->ctx = s;
206 5c6c3a6c Christoph Hellwig
    laiocb->ret = -EINPROGRESS;
207 db0ffc24 Kevin Wolf
    laiocb->async_context_id = get_async_context_id();
208 5c6c3a6c Christoph Hellwig
209 5c6c3a6c Christoph Hellwig
    iocbs = &laiocb->iocb;
210 5c6c3a6c Christoph Hellwig
211 5c6c3a6c Christoph Hellwig
    switch (type) {
212 5c6c3a6c Christoph Hellwig
    case QEMU_AIO_WRITE:
213 5c6c3a6c Christoph Hellwig
        io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
214 5c6c3a6c Christoph Hellwig
        break;
215 5c6c3a6c Christoph Hellwig
    case QEMU_AIO_READ:
216 5c6c3a6c Christoph Hellwig
        io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
217 5c6c3a6c Christoph Hellwig
        break;
218 5c6c3a6c Christoph Hellwig
    default:
219 5c6c3a6c Christoph Hellwig
        fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
220 5c6c3a6c Christoph Hellwig
                        __func__, type);
221 5c6c3a6c Christoph Hellwig
        goto out_free_aiocb;
222 5c6c3a6c Christoph Hellwig
    }
223 5c6c3a6c Christoph Hellwig
    io_set_eventfd(&laiocb->iocb, s->efd);
224 5c6c3a6c Christoph Hellwig
    s->count++;
225 5c6c3a6c Christoph Hellwig
226 5c6c3a6c Christoph Hellwig
    if (io_submit(s->ctx, 1, &iocbs) < 0)
227 5c6c3a6c Christoph Hellwig
        goto out_dec_count;
228 5c6c3a6c Christoph Hellwig
    return &laiocb->common;
229 5c6c3a6c Christoph Hellwig
230 5c6c3a6c Christoph Hellwig
out_free_aiocb:
231 5c6c3a6c Christoph Hellwig
    qemu_aio_release(laiocb);
232 5c6c3a6c Christoph Hellwig
out_dec_count:
233 5c6c3a6c Christoph Hellwig
    s->count--;
234 5c6c3a6c Christoph Hellwig
    return NULL;
235 5c6c3a6c Christoph Hellwig
}
236 5c6c3a6c Christoph Hellwig
237 5c6c3a6c Christoph Hellwig
void *laio_init(void)
238 5c6c3a6c Christoph Hellwig
{
239 5c6c3a6c Christoph Hellwig
    struct qemu_laio_state *s;
240 5c6c3a6c Christoph Hellwig
241 5c6c3a6c Christoph Hellwig
    s = qemu_mallocz(sizeof(*s));
242 db0ffc24 Kevin Wolf
    QLIST_INIT(&s->completed_reqs);
243 5c6c3a6c Christoph Hellwig
    s->efd = eventfd(0, 0);
244 5c6c3a6c Christoph Hellwig
    if (s->efd == -1)
245 5c6c3a6c Christoph Hellwig
        goto out_free_state;
246 5c6c3a6c Christoph Hellwig
    fcntl(s->efd, F_SETFL, O_NONBLOCK);
247 5c6c3a6c Christoph Hellwig
248 5c6c3a6c Christoph Hellwig
    if (io_setup(MAX_EVENTS, &s->ctx) != 0)
249 5c6c3a6c Christoph Hellwig
        goto out_close_efd;
250 5c6c3a6c Christoph Hellwig
251 db0ffc24 Kevin Wolf
    qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL,
252 db0ffc24 Kevin Wolf
        qemu_laio_flush_cb, qemu_laio_process_requests, s);
253 5c6c3a6c Christoph Hellwig
254 5c6c3a6c Christoph Hellwig
    return s;
255 5c6c3a6c Christoph Hellwig
256 5c6c3a6c Christoph Hellwig
out_close_efd:
257 5c6c3a6c Christoph Hellwig
    close(s->efd);
258 5c6c3a6c Christoph Hellwig
out_free_state:
259 5c6c3a6c Christoph Hellwig
    qemu_free(s);
260 5c6c3a6c Christoph Hellwig
    return NULL;
261 5c6c3a6c Christoph Hellwig
}