root / linux-aio.c @ 5bc89ef6
History | View | Annotate | Download (5 kB)
1 | 5c6c3a6c | Christoph Hellwig | /*
|
---|---|---|---|
2 | 5c6c3a6c | Christoph Hellwig | * Linux native AIO support.
|
3 | 5c6c3a6c | Christoph Hellwig | *
|
4 | 5c6c3a6c | Christoph Hellwig | * Copyright (C) 2009 IBM, Corp.
|
5 | 5c6c3a6c | Christoph Hellwig | * Copyright (C) 2009 Red Hat, Inc.
|
6 | 5c6c3a6c | Christoph Hellwig | *
|
7 | 5c6c3a6c | Christoph Hellwig | * This work is licensed under the terms of the GNU GPL, version 2 or later.
|
8 | 5c6c3a6c | Christoph Hellwig | * See the COPYING file in the top-level directory.
|
9 | 5c6c3a6c | Christoph Hellwig | */
|
10 | 5c6c3a6c | Christoph Hellwig | #include "qemu-common.h" |
11 | 5c6c3a6c | Christoph Hellwig | #include "qemu-aio.h" |
12 | 5c6c3a6c | Christoph Hellwig | #include "block_int.h" |
13 | 5c6c3a6c | Christoph Hellwig | #include "block/raw-posix-aio.h" |
14 | 5c6c3a6c | Christoph Hellwig | |
15 | 5c6c3a6c | Christoph Hellwig | #include <sys/eventfd.h> |
16 | 5c6c3a6c | Christoph Hellwig | #include <libaio.h> |
17 | 5c6c3a6c | Christoph Hellwig | |
18 | 5c6c3a6c | Christoph Hellwig | /*
|
19 | 5c6c3a6c | Christoph Hellwig | * Queue size (per-device).
|
20 | 5c6c3a6c | Christoph Hellwig | *
|
21 | 5c6c3a6c | Christoph Hellwig | * XXX: eventually we need to communicate this to the guest and/or make it
|
22 | 5c6c3a6c | Christoph Hellwig | * tunable by the guest. If we get more outstanding requests at a time
|
23 | 5c6c3a6c | Christoph Hellwig | * than this we will get EAGAIN from io_submit which is communicated to
|
24 | 5c6c3a6c | Christoph Hellwig | * the guest as an I/O error.
|
25 | 5c6c3a6c | Christoph Hellwig | */
|
26 | 5c6c3a6c | Christoph Hellwig | #define MAX_EVENTS 128 |
27 | 5c6c3a6c | Christoph Hellwig | |
28 | 5c6c3a6c | Christoph Hellwig | struct qemu_laiocb {
|
29 | 5c6c3a6c | Christoph Hellwig | BlockDriverAIOCB common; |
30 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state *ctx;
|
31 | 5c6c3a6c | Christoph Hellwig | struct iocb iocb;
|
32 | 5c6c3a6c | Christoph Hellwig | ssize_t ret; |
33 | 5c6c3a6c | Christoph Hellwig | size_t nbytes; |
34 | 5c6c3a6c | Christoph Hellwig | }; |
35 | 5c6c3a6c | Christoph Hellwig | |
36 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state {
|
37 | 5c6c3a6c | Christoph Hellwig | io_context_t ctx; |
38 | 5c6c3a6c | Christoph Hellwig | int efd;
|
39 | 5c6c3a6c | Christoph Hellwig | int count;
|
40 | 5c6c3a6c | Christoph Hellwig | }; |
41 | 5c6c3a6c | Christoph Hellwig | |
42 | 5c6c3a6c | Christoph Hellwig | static inline ssize_t io_event_ret(struct io_event *ev) |
43 | 5c6c3a6c | Christoph Hellwig | { |
44 | 5c6c3a6c | Christoph Hellwig | return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res); |
45 | 5c6c3a6c | Christoph Hellwig | } |
46 | 5c6c3a6c | Christoph Hellwig | |
47 | 5c6c3a6c | Christoph Hellwig | static void qemu_laio_completion_cb(void *opaque) |
48 | 5c6c3a6c | Christoph Hellwig | { |
49 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state *s = opaque;
|
50 | 5c6c3a6c | Christoph Hellwig | |
51 | 5c6c3a6c | Christoph Hellwig | while (1) { |
52 | 5c6c3a6c | Christoph Hellwig | struct io_event events[MAX_EVENTS];
|
53 | 5c6c3a6c | Christoph Hellwig | uint64_t val; |
54 | 5c6c3a6c | Christoph Hellwig | ssize_t ret; |
55 | 5c6c3a6c | Christoph Hellwig | struct timespec ts = { 0 }; |
56 | 5c6c3a6c | Christoph Hellwig | int nevents, i;
|
57 | 5c6c3a6c | Christoph Hellwig | |
58 | 5c6c3a6c | Christoph Hellwig | do {
|
59 | 5c6c3a6c | Christoph Hellwig | ret = read(s->efd, &val, sizeof(val));
|
60 | 5c6c3a6c | Christoph Hellwig | } while (ret == 1 && errno == EINTR); |
61 | 5c6c3a6c | Christoph Hellwig | |
62 | 5c6c3a6c | Christoph Hellwig | if (ret == -1 && errno == EAGAIN) |
63 | 5c6c3a6c | Christoph Hellwig | break;
|
64 | 5c6c3a6c | Christoph Hellwig | |
65 | 5c6c3a6c | Christoph Hellwig | if (ret != 8) |
66 | 5c6c3a6c | Christoph Hellwig | break;
|
67 | 5c6c3a6c | Christoph Hellwig | |
68 | 5c6c3a6c | Christoph Hellwig | do {
|
69 | 5c6c3a6c | Christoph Hellwig | nevents = io_getevents(s->ctx, val, MAX_EVENTS, events, &ts); |
70 | 5c6c3a6c | Christoph Hellwig | } while (nevents == -EINTR);
|
71 | 5c6c3a6c | Christoph Hellwig | |
72 | 5c6c3a6c | Christoph Hellwig | for (i = 0; i < nevents; i++) { |
73 | 5c6c3a6c | Christoph Hellwig | struct iocb *iocb = events[i].obj;
|
74 | 5c6c3a6c | Christoph Hellwig | struct qemu_laiocb *laiocb =
|
75 | 5c6c3a6c | Christoph Hellwig | container_of(iocb, struct qemu_laiocb, iocb);
|
76 | 5c6c3a6c | Christoph Hellwig | |
77 | 5c6c3a6c | Christoph Hellwig | s->count--; |
78 | 5c6c3a6c | Christoph Hellwig | |
79 | 5c6c3a6c | Christoph Hellwig | ret = laiocb->ret = io_event_ret(&events[i]); |
80 | 5c6c3a6c | Christoph Hellwig | if (ret != -ECANCELED) {
|
81 | 5c6c3a6c | Christoph Hellwig | if (ret == laiocb->nbytes)
|
82 | 5c6c3a6c | Christoph Hellwig | ret = 0;
|
83 | 5c6c3a6c | Christoph Hellwig | else if (ret >= 0) |
84 | 5c6c3a6c | Christoph Hellwig | ret = -EINVAL; |
85 | 5c6c3a6c | Christoph Hellwig | |
86 | 5c6c3a6c | Christoph Hellwig | laiocb->common.cb(laiocb->common.opaque, ret); |
87 | 5c6c3a6c | Christoph Hellwig | } |
88 | 5c6c3a6c | Christoph Hellwig | |
89 | 5c6c3a6c | Christoph Hellwig | qemu_aio_release(laiocb); |
90 | 5c6c3a6c | Christoph Hellwig | } |
91 | 5c6c3a6c | Christoph Hellwig | } |
92 | 5c6c3a6c | Christoph Hellwig | } |
93 | 5c6c3a6c | Christoph Hellwig | |
94 | 5c6c3a6c | Christoph Hellwig | static int qemu_laio_flush_cb(void *opaque) |
95 | 5c6c3a6c | Christoph Hellwig | { |
96 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state *s = opaque;
|
97 | 5c6c3a6c | Christoph Hellwig | |
98 | 5c6c3a6c | Christoph Hellwig | return (s->count > 0) ? 1 : 0; |
99 | 5c6c3a6c | Christoph Hellwig | } |
100 | 5c6c3a6c | Christoph Hellwig | |
101 | 5c6c3a6c | Christoph Hellwig | static void laio_cancel(BlockDriverAIOCB *blockacb) |
102 | 5c6c3a6c | Christoph Hellwig | { |
103 | 5c6c3a6c | Christoph Hellwig | struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb; |
104 | 5c6c3a6c | Christoph Hellwig | struct io_event event;
|
105 | 5c6c3a6c | Christoph Hellwig | int ret;
|
106 | 5c6c3a6c | Christoph Hellwig | |
107 | 5c6c3a6c | Christoph Hellwig | if (laiocb->ret != -EINPROGRESS)
|
108 | 5c6c3a6c | Christoph Hellwig | return;
|
109 | 5c6c3a6c | Christoph Hellwig | |
110 | 5c6c3a6c | Christoph Hellwig | /*
|
111 | 5c6c3a6c | Christoph Hellwig | * Note that as of Linux 2.6.31 neither the block device code nor any
|
112 | 5c6c3a6c | Christoph Hellwig | * filesystem implements cancellation of AIO request.
|
113 | 5c6c3a6c | Christoph Hellwig | * Thus the polling loop below is the normal code path.
|
114 | 5c6c3a6c | Christoph Hellwig | */
|
115 | 5c6c3a6c | Christoph Hellwig | ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event); |
116 | 5c6c3a6c | Christoph Hellwig | if (ret == 0) { |
117 | 5c6c3a6c | Christoph Hellwig | laiocb->ret = -ECANCELED; |
118 | 5c6c3a6c | Christoph Hellwig | return;
|
119 | 5c6c3a6c | Christoph Hellwig | } |
120 | 5c6c3a6c | Christoph Hellwig | |
121 | 5c6c3a6c | Christoph Hellwig | /*
|
122 | 5c6c3a6c | Christoph Hellwig | * We have to wait for the iocb to finish.
|
123 | 5c6c3a6c | Christoph Hellwig | *
|
124 | 5c6c3a6c | Christoph Hellwig | * The only way to get the iocb status update is by polling the io context.
|
125 | 5c6c3a6c | Christoph Hellwig | * We might be able to do this slightly more optimal by removing the
|
126 | 5c6c3a6c | Christoph Hellwig | * O_NONBLOCK flag.
|
127 | 5c6c3a6c | Christoph Hellwig | */
|
128 | 5c6c3a6c | Christoph Hellwig | while (laiocb->ret == -EINPROGRESS)
|
129 | 5c6c3a6c | Christoph Hellwig | qemu_laio_completion_cb(laiocb->ctx); |
130 | 5c6c3a6c | Christoph Hellwig | } |
131 | 5c6c3a6c | Christoph Hellwig | |
132 | 5c6c3a6c | Christoph Hellwig | static AIOPool laio_pool = {
|
133 | 5c6c3a6c | Christoph Hellwig | .aiocb_size = sizeof(struct qemu_laiocb), |
134 | 5c6c3a6c | Christoph Hellwig | .cancel = laio_cancel, |
135 | 5c6c3a6c | Christoph Hellwig | }; |
136 | 5c6c3a6c | Christoph Hellwig | |
137 | 5c6c3a6c | Christoph Hellwig | BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd, |
138 | 5c6c3a6c | Christoph Hellwig | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
139 | 5c6c3a6c | Christoph Hellwig | BlockDriverCompletionFunc *cb, void *opaque, int type) |
140 | 5c6c3a6c | Christoph Hellwig | { |
141 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state *s = aio_ctx;
|
142 | 5c6c3a6c | Christoph Hellwig | struct qemu_laiocb *laiocb;
|
143 | 5c6c3a6c | Christoph Hellwig | struct iocb *iocbs;
|
144 | 5c6c3a6c | Christoph Hellwig | off_t offset = sector_num * 512;
|
145 | 5c6c3a6c | Christoph Hellwig | |
146 | 5c6c3a6c | Christoph Hellwig | laiocb = qemu_aio_get(&laio_pool, bs, cb, opaque); |
147 | 5c6c3a6c | Christoph Hellwig | if (!laiocb)
|
148 | 5c6c3a6c | Christoph Hellwig | return NULL; |
149 | 5c6c3a6c | Christoph Hellwig | laiocb->nbytes = nb_sectors * 512;
|
150 | 5c6c3a6c | Christoph Hellwig | laiocb->ctx = s; |
151 | 5c6c3a6c | Christoph Hellwig | laiocb->ret = -EINPROGRESS; |
152 | 5c6c3a6c | Christoph Hellwig | |
153 | 5c6c3a6c | Christoph Hellwig | iocbs = &laiocb->iocb; |
154 | 5c6c3a6c | Christoph Hellwig | |
155 | 5c6c3a6c | Christoph Hellwig | switch (type) {
|
156 | 5c6c3a6c | Christoph Hellwig | case QEMU_AIO_WRITE:
|
157 | 5c6c3a6c | Christoph Hellwig | io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); |
158 | 5c6c3a6c | Christoph Hellwig | break;
|
159 | 5c6c3a6c | Christoph Hellwig | case QEMU_AIO_READ:
|
160 | 5c6c3a6c | Christoph Hellwig | io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); |
161 | 5c6c3a6c | Christoph Hellwig | break;
|
162 | 5c6c3a6c | Christoph Hellwig | default:
|
163 | 5c6c3a6c | Christoph Hellwig | fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
|
164 | 5c6c3a6c | Christoph Hellwig | __func__, type); |
165 | 5c6c3a6c | Christoph Hellwig | goto out_free_aiocb;
|
166 | 5c6c3a6c | Christoph Hellwig | } |
167 | 5c6c3a6c | Christoph Hellwig | io_set_eventfd(&laiocb->iocb, s->efd); |
168 | 5c6c3a6c | Christoph Hellwig | s->count++; |
169 | 5c6c3a6c | Christoph Hellwig | |
170 | 5c6c3a6c | Christoph Hellwig | if (io_submit(s->ctx, 1, &iocbs) < 0) |
171 | 5c6c3a6c | Christoph Hellwig | goto out_dec_count;
|
172 | 5c6c3a6c | Christoph Hellwig | return &laiocb->common;
|
173 | 5c6c3a6c | Christoph Hellwig | |
174 | 5c6c3a6c | Christoph Hellwig | out_free_aiocb:
|
175 | 5c6c3a6c | Christoph Hellwig | qemu_aio_release(laiocb); |
176 | 5c6c3a6c | Christoph Hellwig | out_dec_count:
|
177 | 5c6c3a6c | Christoph Hellwig | s->count--; |
178 | 5c6c3a6c | Christoph Hellwig | return NULL; |
179 | 5c6c3a6c | Christoph Hellwig | } |
180 | 5c6c3a6c | Christoph Hellwig | |
181 | 5c6c3a6c | Christoph Hellwig | void *laio_init(void) |
182 | 5c6c3a6c | Christoph Hellwig | { |
183 | 5c6c3a6c | Christoph Hellwig | struct qemu_laio_state *s;
|
184 | 5c6c3a6c | Christoph Hellwig | |
185 | 5c6c3a6c | Christoph Hellwig | s = qemu_mallocz(sizeof(*s));
|
186 | 5c6c3a6c | Christoph Hellwig | s->efd = eventfd(0, 0); |
187 | 5c6c3a6c | Christoph Hellwig | if (s->efd == -1) |
188 | 5c6c3a6c | Christoph Hellwig | goto out_free_state;
|
189 | 5c6c3a6c | Christoph Hellwig | fcntl(s->efd, F_SETFL, O_NONBLOCK); |
190 | 5c6c3a6c | Christoph Hellwig | |
191 | 5c6c3a6c | Christoph Hellwig | if (io_setup(MAX_EVENTS, &s->ctx) != 0) |
192 | 5c6c3a6c | Christoph Hellwig | goto out_close_efd;
|
193 | 5c6c3a6c | Christoph Hellwig | |
194 | 5c6c3a6c | Christoph Hellwig | qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, |
195 | 5c6c3a6c | Christoph Hellwig | NULL, qemu_laio_flush_cb, s);
|
196 | 5c6c3a6c | Christoph Hellwig | |
197 | 5c6c3a6c | Christoph Hellwig | return s;
|
198 | 5c6c3a6c | Christoph Hellwig | |
199 | 5c6c3a6c | Christoph Hellwig | out_close_efd:
|
200 | 5c6c3a6c | Christoph Hellwig | close(s->efd); |
201 | 5c6c3a6c | Christoph Hellwig | out_free_state:
|
202 | 5c6c3a6c | Christoph Hellwig | qemu_free(s); |
203 | 5c6c3a6c | Christoph Hellwig | return NULL; |
204 | 5c6c3a6c | Christoph Hellwig | } |