root / async.c @ e685b4eb
History | View | Annotate | Download (5.9 kB)
1 | 4f999d05 | Kevin Wolf | /*
|
---|---|---|---|
2 | 4f999d05 | Kevin Wolf | * QEMU System Emulator
|
3 | 4f999d05 | Kevin Wolf | *
|
4 | 4f999d05 | Kevin Wolf | * Copyright (c) 2003-2008 Fabrice Bellard
|
5 | 4f999d05 | Kevin Wolf | *
|
6 | 4f999d05 | Kevin Wolf | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 4f999d05 | Kevin Wolf | * of this software and associated documentation files (the "Software"), to deal
|
8 | 4f999d05 | Kevin Wolf | * in the Software without restriction, including without limitation the rights
|
9 | 4f999d05 | Kevin Wolf | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 4f999d05 | Kevin Wolf | * copies of the Software, and to permit persons to whom the Software is
|
11 | 4f999d05 | Kevin Wolf | * furnished to do so, subject to the following conditions:
|
12 | 4f999d05 | Kevin Wolf | *
|
13 | 4f999d05 | Kevin Wolf | * The above copyright notice and this permission notice shall be included in
|
14 | 4f999d05 | Kevin Wolf | * all copies or substantial portions of the Software.
|
15 | 4f999d05 | Kevin Wolf | *
|
16 | 4f999d05 | Kevin Wolf | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 4f999d05 | Kevin Wolf | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 4f999d05 | Kevin Wolf | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 4f999d05 | Kevin Wolf | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 4f999d05 | Kevin Wolf | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 4f999d05 | Kevin Wolf | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 4f999d05 | Kevin Wolf | * THE SOFTWARE.
|
23 | 4f999d05 | Kevin Wolf | */
|
24 | 4f999d05 | Kevin Wolf | |
25 | 4f999d05 | Kevin Wolf | #include "qemu-common.h" |
26 | 9a1e9481 | Kevin Wolf | #include "qemu-aio.h" |
27 | 9a1e9481 | Kevin Wolf | |
28 | 9a1e9481 | Kevin Wolf | /*
|
29 | 9a1e9481 | Kevin Wolf | * An AsyncContext protects the callbacks of AIO requests and Bottom Halves
|
30 | 9a1e9481 | Kevin Wolf | * against interfering with each other. A typical example is qcow2 that accepts
|
31 | 9a1e9481 | Kevin Wolf | * asynchronous requests, but relies for manipulation of its metadata on
|
32 | 9a1e9481 | Kevin Wolf | * synchronous bdrv_read/write that doesn't trigger any callbacks.
|
33 | 9a1e9481 | Kevin Wolf | *
|
34 | 9a1e9481 | Kevin Wolf | * However, these functions are often emulated using AIO which means that AIO
|
35 | 9a1e9481 | Kevin Wolf | * callbacks must be run - but at the same time we must not run callbacks of
|
36 | 9a1e9481 | Kevin Wolf | * other requests as they might start to modify metadata and corrupt the
|
37 | 9a1e9481 | Kevin Wolf | * internal state of the caller of bdrv_read/write.
|
38 | 9a1e9481 | Kevin Wolf | *
|
39 | 9a1e9481 | Kevin Wolf | * To achieve the desired semantics we switch into a new AsyncContext.
|
40 | 9a1e9481 | Kevin Wolf | * Callbacks must only be run if they belong to the current AsyncContext.
|
41 | 9a1e9481 | Kevin Wolf | * Otherwise they need to be queued until their own context is active again.
|
42 | 9a1e9481 | Kevin Wolf | * This is how you can make qemu_aio_wait() wait only for your own callbacks.
|
43 | 9a1e9481 | Kevin Wolf | *
|
44 | 9a1e9481 | Kevin Wolf | * The AsyncContexts form a stack. When you leave a AsyncContexts, you always
|
45 | 9a1e9481 | Kevin Wolf | * return to the old ("parent") context.
|
46 | 9a1e9481 | Kevin Wolf | */
|
47 | 9a1e9481 | Kevin Wolf | struct AsyncContext {
|
48 | 9a1e9481 | Kevin Wolf | /* Consecutive number of the AsyncContext (position in the stack) */
|
49 | 9a1e9481 | Kevin Wolf | int id;
|
50 | 9a1e9481 | Kevin Wolf | |
51 | 9a1e9481 | Kevin Wolf | /* Anchor of the list of Bottom Halves belonging to the context */
|
52 | 9a1e9481 | Kevin Wolf | struct QEMUBH *first_bh;
|
53 | 9a1e9481 | Kevin Wolf | |
54 | 9a1e9481 | Kevin Wolf | /* Link to parent context */
|
55 | 9a1e9481 | Kevin Wolf | struct AsyncContext *parent;
|
56 | 9a1e9481 | Kevin Wolf | }; |
57 | 9a1e9481 | Kevin Wolf | |
58 | 9a1e9481 | Kevin Wolf | /* The currently active AsyncContext */
|
59 | 9a1e9481 | Kevin Wolf | static struct AsyncContext *async_context = &(struct AsyncContext) { 0 }; |
60 | 9a1e9481 | Kevin Wolf | |
61 | 9a1e9481 | Kevin Wolf | /*
|
62 | 9a1e9481 | Kevin Wolf | * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks
|
63 | 9a1e9481 | Kevin Wolf | * won't be called until this context is left again.
|
64 | 9a1e9481 | Kevin Wolf | */
|
65 | 9a1e9481 | Kevin Wolf | void async_context_push(void) |
66 | 9a1e9481 | Kevin Wolf | { |
67 | 9a1e9481 | Kevin Wolf | struct AsyncContext *new = qemu_mallocz(sizeof(*new)); |
68 | 9a1e9481 | Kevin Wolf | new->parent = async_context; |
69 | 9a1e9481 | Kevin Wolf | new->id = async_context->id + 1;
|
70 | 9a1e9481 | Kevin Wolf | async_context = new; |
71 | 9a1e9481 | Kevin Wolf | } |
72 | 9a1e9481 | Kevin Wolf | |
73 | 9a1e9481 | Kevin Wolf | /* Run queued AIO completions and destroy Bottom Half */
|
74 | 9a1e9481 | Kevin Wolf | static void bh_run_aio_completions(void *opaque) |
75 | 9a1e9481 | Kevin Wolf | { |
76 | 9a1e9481 | Kevin Wolf | QEMUBH **bh = opaque; |
77 | 9a1e9481 | Kevin Wolf | qemu_bh_delete(*bh); |
78 | 9a1e9481 | Kevin Wolf | qemu_free(bh); |
79 | 9a1e9481 | Kevin Wolf | qemu_aio_process_queue(); |
80 | 9a1e9481 | Kevin Wolf | } |
81 | 9a1e9481 | Kevin Wolf | /*
|
82 | 9a1e9481 | Kevin Wolf | * Leave the currently active AsyncContext. All Bottom Halves belonging to the
|
83 | 9a1e9481 | Kevin Wolf | * old context are executed before changing the context.
|
84 | 9a1e9481 | Kevin Wolf | */
|
85 | 9a1e9481 | Kevin Wolf | void async_context_pop(void) |
86 | 9a1e9481 | Kevin Wolf | { |
87 | 9a1e9481 | Kevin Wolf | struct AsyncContext *old = async_context;
|
88 | 9a1e9481 | Kevin Wolf | QEMUBH **bh; |
89 | 9a1e9481 | Kevin Wolf | |
90 | 9a1e9481 | Kevin Wolf | /* Flush the bottom halves, we don't want to lose them */
|
91 | 9a1e9481 | Kevin Wolf | while (qemu_bh_poll());
|
92 | 9a1e9481 | Kevin Wolf | |
93 | 9a1e9481 | Kevin Wolf | /* Switch back to the parent context */
|
94 | 9a1e9481 | Kevin Wolf | async_context = async_context->parent; |
95 | 9a1e9481 | Kevin Wolf | qemu_free(old); |
96 | 9a1e9481 | Kevin Wolf | |
97 | 9a1e9481 | Kevin Wolf | if (async_context == NULL) { |
98 | 9a1e9481 | Kevin Wolf | abort(); |
99 | 9a1e9481 | Kevin Wolf | } |
100 | 9a1e9481 | Kevin Wolf | |
101 | 9a1e9481 | Kevin Wolf | /* Schedule BH to run any queued AIO completions as soon as possible */
|
102 | 9a1e9481 | Kevin Wolf | bh = qemu_malloc(sizeof(*bh));
|
103 | 9a1e9481 | Kevin Wolf | *bh = qemu_bh_new(bh_run_aio_completions, bh); |
104 | 9a1e9481 | Kevin Wolf | qemu_bh_schedule(*bh); |
105 | 9a1e9481 | Kevin Wolf | } |
106 | 9a1e9481 | Kevin Wolf | |
107 | 9a1e9481 | Kevin Wolf | /*
|
108 | 9a1e9481 | Kevin Wolf | * Returns the ID of the currently active AsyncContext
|
109 | 9a1e9481 | Kevin Wolf | */
|
110 | 9a1e9481 | Kevin Wolf | int get_async_context_id(void) |
111 | 9a1e9481 | Kevin Wolf | { |
112 | 9a1e9481 | Kevin Wolf | return async_context->id;
|
113 | 9a1e9481 | Kevin Wolf | } |
114 | 4f999d05 | Kevin Wolf | |
115 | 4f999d05 | Kevin Wolf | /***********************************************************/
|
116 | 4f999d05 | Kevin Wolf | /* bottom halves (can be seen as timers which expire ASAP) */
|
117 | 4f999d05 | Kevin Wolf | |
118 | 4f999d05 | Kevin Wolf | struct QEMUBH {
|
119 | 4f999d05 | Kevin Wolf | QEMUBHFunc *cb; |
120 | 4f999d05 | Kevin Wolf | void *opaque;
|
121 | 4f999d05 | Kevin Wolf | int scheduled;
|
122 | 4f999d05 | Kevin Wolf | int idle;
|
123 | 4f999d05 | Kevin Wolf | int deleted;
|
124 | 4f999d05 | Kevin Wolf | QEMUBH *next; |
125 | 4f999d05 | Kevin Wolf | }; |
126 | 4f999d05 | Kevin Wolf | |
127 | 4f999d05 | Kevin Wolf | QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
|
128 | 4f999d05 | Kevin Wolf | { |
129 | 4f999d05 | Kevin Wolf | QEMUBH *bh; |
130 | 4f999d05 | Kevin Wolf | bh = qemu_mallocz(sizeof(QEMUBH));
|
131 | 4f999d05 | Kevin Wolf | bh->cb = cb; |
132 | 4f999d05 | Kevin Wolf | bh->opaque = opaque; |
133 | 9a1e9481 | Kevin Wolf | bh->next = async_context->first_bh; |
134 | 9a1e9481 | Kevin Wolf | async_context->first_bh = bh; |
135 | 4f999d05 | Kevin Wolf | return bh;
|
136 | 4f999d05 | Kevin Wolf | } |
137 | 4f999d05 | Kevin Wolf | |
138 | 4f999d05 | Kevin Wolf | int qemu_bh_poll(void) |
139 | 4f999d05 | Kevin Wolf | { |
140 | 4f999d05 | Kevin Wolf | QEMUBH *bh, **bhp; |
141 | 4f999d05 | Kevin Wolf | int ret;
|
142 | 4f999d05 | Kevin Wolf | |
143 | 4f999d05 | Kevin Wolf | ret = 0;
|
144 | 9a1e9481 | Kevin Wolf | for (bh = async_context->first_bh; bh; bh = bh->next) {
|
145 | 4f999d05 | Kevin Wolf | if (!bh->deleted && bh->scheduled) {
|
146 | 4f999d05 | Kevin Wolf | bh->scheduled = 0;
|
147 | 4f999d05 | Kevin Wolf | if (!bh->idle)
|
148 | 4f999d05 | Kevin Wolf | ret = 1;
|
149 | 4f999d05 | Kevin Wolf | bh->idle = 0;
|
150 | 4f999d05 | Kevin Wolf | bh->cb(bh->opaque); |
151 | 4f999d05 | Kevin Wolf | } |
152 | 4f999d05 | Kevin Wolf | } |
153 | 4f999d05 | Kevin Wolf | |
154 | 4f999d05 | Kevin Wolf | /* remove deleted bhs */
|
155 | 9a1e9481 | Kevin Wolf | bhp = &async_context->first_bh; |
156 | 4f999d05 | Kevin Wolf | while (*bhp) {
|
157 | 4f999d05 | Kevin Wolf | bh = *bhp; |
158 | 4f999d05 | Kevin Wolf | if (bh->deleted) {
|
159 | 4f999d05 | Kevin Wolf | *bhp = bh->next; |
160 | 4f999d05 | Kevin Wolf | qemu_free(bh); |
161 | 4f999d05 | Kevin Wolf | } else
|
162 | 4f999d05 | Kevin Wolf | bhp = &bh->next; |
163 | 4f999d05 | Kevin Wolf | } |
164 | 4f999d05 | Kevin Wolf | |
165 | 4f999d05 | Kevin Wolf | return ret;
|
166 | 4f999d05 | Kevin Wolf | } |
167 | 4f999d05 | Kevin Wolf | |
168 | 4f999d05 | Kevin Wolf | void qemu_bh_schedule_idle(QEMUBH *bh)
|
169 | 4f999d05 | Kevin Wolf | { |
170 | 4f999d05 | Kevin Wolf | if (bh->scheduled)
|
171 | 4f999d05 | Kevin Wolf | return;
|
172 | 4f999d05 | Kevin Wolf | bh->scheduled = 1;
|
173 | 4f999d05 | Kevin Wolf | bh->idle = 1;
|
174 | 4f999d05 | Kevin Wolf | } |
175 | 4f999d05 | Kevin Wolf | |
176 | 4f999d05 | Kevin Wolf | void qemu_bh_schedule(QEMUBH *bh)
|
177 | 4f999d05 | Kevin Wolf | { |
178 | 4f999d05 | Kevin Wolf | if (bh->scheduled)
|
179 | 4f999d05 | Kevin Wolf | return;
|
180 | 4f999d05 | Kevin Wolf | bh->scheduled = 1;
|
181 | 4f999d05 | Kevin Wolf | bh->idle = 0;
|
182 | 4f999d05 | Kevin Wolf | /* stop the currently executing CPU to execute the BH ASAP */
|
183 | 4f999d05 | Kevin Wolf | qemu_notify_event(); |
184 | 4f999d05 | Kevin Wolf | } |
185 | 4f999d05 | Kevin Wolf | |
186 | 4f999d05 | Kevin Wolf | void qemu_bh_cancel(QEMUBH *bh)
|
187 | 4f999d05 | Kevin Wolf | { |
188 | 4f999d05 | Kevin Wolf | bh->scheduled = 0;
|
189 | 4f999d05 | Kevin Wolf | } |
190 | 4f999d05 | Kevin Wolf | |
191 | 4f999d05 | Kevin Wolf | void qemu_bh_delete(QEMUBH *bh)
|
192 | 4f999d05 | Kevin Wolf | { |
193 | 4f999d05 | Kevin Wolf | bh->scheduled = 0;
|
194 | 4f999d05 | Kevin Wolf | bh->deleted = 1;
|
195 | 4f999d05 | Kevin Wolf | } |
196 | 4f999d05 | Kevin Wolf | |
197 | 4f999d05 | Kevin Wolf | void qemu_bh_update_timeout(int *timeout) |
198 | 4f999d05 | Kevin Wolf | { |
199 | 4f999d05 | Kevin Wolf | QEMUBH *bh; |
200 | 4f999d05 | Kevin Wolf | |
201 | 9a1e9481 | Kevin Wolf | for (bh = async_context->first_bh; bh; bh = bh->next) {
|
202 | 4f999d05 | Kevin Wolf | if (!bh->deleted && bh->scheduled) {
|
203 | 4f999d05 | Kevin Wolf | if (bh->idle) {
|
204 | 4f999d05 | Kevin Wolf | /* idle bottom halves will be polled at least
|
205 | 4f999d05 | Kevin Wolf | * every 10ms */
|
206 | 4f999d05 | Kevin Wolf | *timeout = MIN(10, *timeout);
|
207 | 4f999d05 | Kevin Wolf | } else {
|
208 | 4f999d05 | Kevin Wolf | /* non-idle bottom halves will be executed
|
209 | 4f999d05 | Kevin Wolf | * immediately */
|
210 | 4f999d05 | Kevin Wolf | *timeout = 0;
|
211 | 4f999d05 | Kevin Wolf | break;
|
212 | 4f999d05 | Kevin Wolf | } |
213 | 4f999d05 | Kevin Wolf | } |
214 | 4f999d05 | Kevin Wolf | } |
215 | 4f999d05 | Kevin Wolf | } |