Statistics
| Branch: | Revision:

root / async.c @ cba933b2

History | View | Annotate | Download (6.9 kB)

1 4f999d05 Kevin Wolf
/*
2 4f999d05 Kevin Wolf
 * QEMU System Emulator
3 4f999d05 Kevin Wolf
 *
4 4f999d05 Kevin Wolf
 * Copyright (c) 2003-2008 Fabrice Bellard
5 4f999d05 Kevin Wolf
 *
6 4f999d05 Kevin Wolf
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 4f999d05 Kevin Wolf
 * of this software and associated documentation files (the "Software"), to deal
8 4f999d05 Kevin Wolf
 * in the Software without restriction, including without limitation the rights
9 4f999d05 Kevin Wolf
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 4f999d05 Kevin Wolf
 * copies of the Software, and to permit persons to whom the Software is
11 4f999d05 Kevin Wolf
 * furnished to do so, subject to the following conditions:
12 4f999d05 Kevin Wolf
 *
13 4f999d05 Kevin Wolf
 * The above copyright notice and this permission notice shall be included in
14 4f999d05 Kevin Wolf
 * all copies or substantial portions of the Software.
15 4f999d05 Kevin Wolf
 *
16 4f999d05 Kevin Wolf
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 4f999d05 Kevin Wolf
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 4f999d05 Kevin Wolf
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 4f999d05 Kevin Wolf
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 4f999d05 Kevin Wolf
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 4f999d05 Kevin Wolf
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 4f999d05 Kevin Wolf
 * THE SOFTWARE.
23 4f999d05 Kevin Wolf
 */
24 4f999d05 Kevin Wolf
25 4f999d05 Kevin Wolf
#include "qemu-common.h"
26 737e150e Paolo Bonzini
#include "block/aio.h"
27 9b34277d Stefan Hajnoczi
#include "block/thread-pool.h"
28 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
29 9a1e9481 Kevin Wolf
30 4f999d05 Kevin Wolf
/***********************************************************/
31 4f999d05 Kevin Wolf
/* bottom halves (can be seen as timers which expire ASAP) */
32 4f999d05 Kevin Wolf
33 4f999d05 Kevin Wolf
struct QEMUBH {
34 2f4dc3c1 Paolo Bonzini
    AioContext *ctx;
35 4f999d05 Kevin Wolf
    QEMUBHFunc *cb;
36 4f999d05 Kevin Wolf
    void *opaque;
37 4f999d05 Kevin Wolf
    QEMUBH *next;
38 9b47b17e Stefan Weil
    bool scheduled;
39 9b47b17e Stefan Weil
    bool idle;
40 9b47b17e Stefan Weil
    bool deleted;
41 4f999d05 Kevin Wolf
};
42 4f999d05 Kevin Wolf
43 f627aab1 Paolo Bonzini
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
44 4f999d05 Kevin Wolf
{
45 4f999d05 Kevin Wolf
    QEMUBH *bh;
46 7267c094 Anthony Liguori
    bh = g_malloc0(sizeof(QEMUBH));
47 2f4dc3c1 Paolo Bonzini
    bh->ctx = ctx;
48 4f999d05 Kevin Wolf
    bh->cb = cb;
49 4f999d05 Kevin Wolf
    bh->opaque = opaque;
50 dcc772e2 Liu Ping Fan
    qemu_mutex_lock(&ctx->bh_lock);
51 f627aab1 Paolo Bonzini
    bh->next = ctx->first_bh;
52 dcc772e2 Liu Ping Fan
    /* Make sure that the members are ready before putting bh into list */
53 dcc772e2 Liu Ping Fan
    smp_wmb();
54 f627aab1 Paolo Bonzini
    ctx->first_bh = bh;
55 dcc772e2 Liu Ping Fan
    qemu_mutex_unlock(&ctx->bh_lock);
56 4f999d05 Kevin Wolf
    return bh;
57 4f999d05 Kevin Wolf
}
58 4f999d05 Kevin Wolf
59 dcc772e2 Liu Ping Fan
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
60 f627aab1 Paolo Bonzini
int aio_bh_poll(AioContext *ctx)
61 4f999d05 Kevin Wolf
{
62 7887f620 Kevin Wolf
    QEMUBH *bh, **bhp, *next;
63 4f999d05 Kevin Wolf
    int ret;
64 648fb0ea Kevin Wolf
65 f627aab1 Paolo Bonzini
    ctx->walking_bh++;
66 4f999d05 Kevin Wolf
67 4f999d05 Kevin Wolf
    ret = 0;
68 f627aab1 Paolo Bonzini
    for (bh = ctx->first_bh; bh; bh = next) {
69 dcc772e2 Liu Ping Fan
        /* Make sure that fetching bh happens before accessing its members */
70 dcc772e2 Liu Ping Fan
        smp_read_barrier_depends();
71 7887f620 Kevin Wolf
        next = bh->next;
72 4f999d05 Kevin Wolf
        if (!bh->deleted && bh->scheduled) {
73 4f999d05 Kevin Wolf
            bh->scheduled = 0;
74 dcc772e2 Liu Ping Fan
            /* Paired with write barrier in bh schedule to ensure reading for
75 dcc772e2 Liu Ping Fan
             * idle & callbacks coming after bh's scheduling.
76 dcc772e2 Liu Ping Fan
             */
77 dcc772e2 Liu Ping Fan
            smp_rmb();
78 4f999d05 Kevin Wolf
            if (!bh->idle)
79 4f999d05 Kevin Wolf
                ret = 1;
80 4f999d05 Kevin Wolf
            bh->idle = 0;
81 4f999d05 Kevin Wolf
            bh->cb(bh->opaque);
82 4f999d05 Kevin Wolf
        }
83 4f999d05 Kevin Wolf
    }
84 4f999d05 Kevin Wolf
85 f627aab1 Paolo Bonzini
    ctx->walking_bh--;
86 648fb0ea Kevin Wolf
87 4f999d05 Kevin Wolf
    /* remove deleted bhs */
88 f627aab1 Paolo Bonzini
    if (!ctx->walking_bh) {
89 dcc772e2 Liu Ping Fan
        qemu_mutex_lock(&ctx->bh_lock);
90 f627aab1 Paolo Bonzini
        bhp = &ctx->first_bh;
91 648fb0ea Kevin Wolf
        while (*bhp) {
92 648fb0ea Kevin Wolf
            bh = *bhp;
93 648fb0ea Kevin Wolf
            if (bh->deleted) {
94 648fb0ea Kevin Wolf
                *bhp = bh->next;
95 648fb0ea Kevin Wolf
                g_free(bh);
96 648fb0ea Kevin Wolf
            } else {
97 648fb0ea Kevin Wolf
                bhp = &bh->next;
98 648fb0ea Kevin Wolf
            }
99 648fb0ea Kevin Wolf
        }
100 dcc772e2 Liu Ping Fan
        qemu_mutex_unlock(&ctx->bh_lock);
101 4f999d05 Kevin Wolf
    }
102 4f999d05 Kevin Wolf
103 4f999d05 Kevin Wolf
    return ret;
104 4f999d05 Kevin Wolf
}
105 4f999d05 Kevin Wolf
106 4f999d05 Kevin Wolf
void qemu_bh_schedule_idle(QEMUBH *bh)
107 4f999d05 Kevin Wolf
{
108 4f999d05 Kevin Wolf
    if (bh->scheduled)
109 4f999d05 Kevin Wolf
        return;
110 4f999d05 Kevin Wolf
    bh->idle = 1;
111 dcc772e2 Liu Ping Fan
    /* Make sure that idle & any writes needed by the callback are done
112 dcc772e2 Liu Ping Fan
     * before the locations are read in the aio_bh_poll.
113 dcc772e2 Liu Ping Fan
     */
114 dcc772e2 Liu Ping Fan
    smp_wmb();
115 dcc772e2 Liu Ping Fan
    bh->scheduled = 1;
116 4f999d05 Kevin Wolf
}
117 4f999d05 Kevin Wolf
118 4f999d05 Kevin Wolf
void qemu_bh_schedule(QEMUBH *bh)
119 4f999d05 Kevin Wolf
{
120 4f999d05 Kevin Wolf
    if (bh->scheduled)
121 4f999d05 Kevin Wolf
        return;
122 4f999d05 Kevin Wolf
    bh->idle = 0;
123 dcc772e2 Liu Ping Fan
    /* Make sure that idle & any writes needed by the callback are done
124 dcc772e2 Liu Ping Fan
     * before the locations are read in the aio_bh_poll.
125 dcc772e2 Liu Ping Fan
     */
126 dcc772e2 Liu Ping Fan
    smp_wmb();
127 dcc772e2 Liu Ping Fan
    bh->scheduled = 1;
128 2f4dc3c1 Paolo Bonzini
    aio_notify(bh->ctx);
129 4f999d05 Kevin Wolf
}
130 4f999d05 Kevin Wolf
131 dcc772e2 Liu Ping Fan
132 dcc772e2 Liu Ping Fan
/* This func is async.
133 dcc772e2 Liu Ping Fan
 */
134 4f999d05 Kevin Wolf
void qemu_bh_cancel(QEMUBH *bh)
135 4f999d05 Kevin Wolf
{
136 4f999d05 Kevin Wolf
    bh->scheduled = 0;
137 4f999d05 Kevin Wolf
}
138 4f999d05 Kevin Wolf
139 dcc772e2 Liu Ping Fan
/* This func is async.The bottom half will do the delete action at the finial
140 dcc772e2 Liu Ping Fan
 * end.
141 dcc772e2 Liu Ping Fan
 */
142 4f999d05 Kevin Wolf
void qemu_bh_delete(QEMUBH *bh)
143 4f999d05 Kevin Wolf
{
144 4f999d05 Kevin Wolf
    bh->scheduled = 0;
145 4f999d05 Kevin Wolf
    bh->deleted = 1;
146 4f999d05 Kevin Wolf
}
147 4f999d05 Kevin Wolf
148 22bfa75e Paolo Bonzini
static gboolean
149 22bfa75e Paolo Bonzini
aio_ctx_prepare(GSource *source, gint    *timeout)
150 4f999d05 Kevin Wolf
{
151 22bfa75e Paolo Bonzini
    AioContext *ctx = (AioContext *) source;
152 4f999d05 Kevin Wolf
    QEMUBH *bh;
153 533a8cf3 Alex Bligh
    int deadline;
154 4f999d05 Kevin Wolf
155 533a8cf3 Alex Bligh
    /* We assume there is no timeout already supplied */
156 533a8cf3 Alex Bligh
    *timeout = -1;
157 f627aab1 Paolo Bonzini
    for (bh = ctx->first_bh; bh; bh = bh->next) {
158 4f999d05 Kevin Wolf
        if (!bh->deleted && bh->scheduled) {
159 4f999d05 Kevin Wolf
            if (bh->idle) {
160 4f999d05 Kevin Wolf
                /* idle bottom halves will be polled at least
161 4f999d05 Kevin Wolf
                 * every 10ms */
162 22bfa75e Paolo Bonzini
                *timeout = 10;
163 4f999d05 Kevin Wolf
            } else {
164 4f999d05 Kevin Wolf
                /* non-idle bottom halves will be executed
165 4f999d05 Kevin Wolf
                 * immediately */
166 4f999d05 Kevin Wolf
                *timeout = 0;
167 f5022a13 Paolo Bonzini
                return true;
168 4f999d05 Kevin Wolf
            }
169 4f999d05 Kevin Wolf
        }
170 4f999d05 Kevin Wolf
    }
171 e3713e00 Paolo Bonzini
172 533a8cf3 Alex Bligh
    deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
173 533a8cf3 Alex Bligh
    if (deadline == 0) {
174 533a8cf3 Alex Bligh
        *timeout = 0;
175 533a8cf3 Alex Bligh
        return true;
176 533a8cf3 Alex Bligh
    } else {
177 533a8cf3 Alex Bligh
        *timeout = qemu_soonest_timeout(*timeout, deadline);
178 533a8cf3 Alex Bligh
    }
179 533a8cf3 Alex Bligh
180 f5022a13 Paolo Bonzini
    return false;
181 e3713e00 Paolo Bonzini
}
182 e3713e00 Paolo Bonzini
183 e3713e00 Paolo Bonzini
static gboolean
184 e3713e00 Paolo Bonzini
aio_ctx_check(GSource *source)
185 e3713e00 Paolo Bonzini
{
186 e3713e00 Paolo Bonzini
    AioContext *ctx = (AioContext *) source;
187 e3713e00 Paolo Bonzini
    QEMUBH *bh;
188 e3713e00 Paolo Bonzini
189 e3713e00 Paolo Bonzini
    for (bh = ctx->first_bh; bh; bh = bh->next) {
190 e3713e00 Paolo Bonzini
        if (!bh->deleted && bh->scheduled) {
191 e3713e00 Paolo Bonzini
            return true;
192 e3713e00 Paolo Bonzini
        }
193 e3713e00 Paolo Bonzini
    }
194 533a8cf3 Alex Bligh
    return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
195 e3713e00 Paolo Bonzini
}
196 e3713e00 Paolo Bonzini
197 e3713e00 Paolo Bonzini
static gboolean
198 e3713e00 Paolo Bonzini
aio_ctx_dispatch(GSource     *source,
199 e3713e00 Paolo Bonzini
                 GSourceFunc  callback,
200 e3713e00 Paolo Bonzini
                 gpointer     user_data)
201 e3713e00 Paolo Bonzini
{
202 e3713e00 Paolo Bonzini
    AioContext *ctx = (AioContext *) source;
203 e3713e00 Paolo Bonzini
204 e3713e00 Paolo Bonzini
    assert(callback == NULL);
205 e3713e00 Paolo Bonzini
    aio_poll(ctx, false);
206 e3713e00 Paolo Bonzini
    return true;
207 e3713e00 Paolo Bonzini
}
208 e3713e00 Paolo Bonzini
209 2f4dc3c1 Paolo Bonzini
static void
210 2f4dc3c1 Paolo Bonzini
aio_ctx_finalize(GSource     *source)
211 2f4dc3c1 Paolo Bonzini
{
212 2f4dc3c1 Paolo Bonzini
    AioContext *ctx = (AioContext *) source;
213 2f4dc3c1 Paolo Bonzini
214 9b34277d Stefan Hajnoczi
    thread_pool_free(ctx->thread_pool);
215 f2e5dca4 Stefan Hajnoczi
    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
216 2f4dc3c1 Paolo Bonzini
    event_notifier_cleanup(&ctx->notifier);
217 dcc772e2 Liu Ping Fan
    qemu_mutex_destroy(&ctx->bh_lock);
218 6b5f8762 Stefan Hajnoczi
    g_array_free(ctx->pollfds, TRUE);
219 dae21b98 Alex Bligh
    timerlistgroup_deinit(&ctx->tlg);
220 2f4dc3c1 Paolo Bonzini
}
221 2f4dc3c1 Paolo Bonzini
222 e3713e00 Paolo Bonzini
static GSourceFuncs aio_source_funcs = {
223 e3713e00 Paolo Bonzini
    aio_ctx_prepare,
224 e3713e00 Paolo Bonzini
    aio_ctx_check,
225 e3713e00 Paolo Bonzini
    aio_ctx_dispatch,
226 2f4dc3c1 Paolo Bonzini
    aio_ctx_finalize
227 e3713e00 Paolo Bonzini
};
228 e3713e00 Paolo Bonzini
229 e3713e00 Paolo Bonzini
GSource *aio_get_g_source(AioContext *ctx)
230 e3713e00 Paolo Bonzini
{
231 e3713e00 Paolo Bonzini
    g_source_ref(&ctx->source);
232 e3713e00 Paolo Bonzini
    return &ctx->source;
233 e3713e00 Paolo Bonzini
}
234 a915f4bc Paolo Bonzini
235 9b34277d Stefan Hajnoczi
ThreadPool *aio_get_thread_pool(AioContext *ctx)
236 9b34277d Stefan Hajnoczi
{
237 9b34277d Stefan Hajnoczi
    if (!ctx->thread_pool) {
238 9b34277d Stefan Hajnoczi
        ctx->thread_pool = thread_pool_new(ctx);
239 9b34277d Stefan Hajnoczi
    }
240 9b34277d Stefan Hajnoczi
    return ctx->thread_pool;
241 9b34277d Stefan Hajnoczi
}
242 9b34277d Stefan Hajnoczi
243 2f4dc3c1 Paolo Bonzini
void aio_notify(AioContext *ctx)
244 2f4dc3c1 Paolo Bonzini
{
245 2f4dc3c1 Paolo Bonzini
    event_notifier_set(&ctx->notifier);
246 2f4dc3c1 Paolo Bonzini
}
247 2f4dc3c1 Paolo Bonzini
248 d5541d86 Alex Bligh
static void aio_timerlist_notify(void *opaque)
249 d5541d86 Alex Bligh
{
250 d5541d86 Alex Bligh
    aio_notify(opaque);
251 d5541d86 Alex Bligh
}
252 d5541d86 Alex Bligh
253 f627aab1 Paolo Bonzini
AioContext *aio_context_new(void)
254 f627aab1 Paolo Bonzini
{
255 2f4dc3c1 Paolo Bonzini
    AioContext *ctx;
256 2f4dc3c1 Paolo Bonzini
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
257 6b5f8762 Stefan Hajnoczi
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
258 9b34277d Stefan Hajnoczi
    ctx->thread_pool = NULL;
259 dcc772e2 Liu Ping Fan
    qemu_mutex_init(&ctx->bh_lock);
260 2f4dc3c1 Paolo Bonzini
    event_notifier_init(&ctx->notifier, false);
261 2f4dc3c1 Paolo Bonzini
    aio_set_event_notifier(ctx, &ctx->notifier, 
262 2f4dc3c1 Paolo Bonzini
                           (EventNotifierHandler *)
263 f2e5dca4 Stefan Hajnoczi
                           event_notifier_test_and_clear);
264 d5541d86 Alex Bligh
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
265 2f4dc3c1 Paolo Bonzini
266 2f4dc3c1 Paolo Bonzini
    return ctx;
267 e3713e00 Paolo Bonzini
}
268 e3713e00 Paolo Bonzini
269 e3713e00 Paolo Bonzini
void aio_context_ref(AioContext *ctx)
270 e3713e00 Paolo Bonzini
{
271 e3713e00 Paolo Bonzini
    g_source_ref(&ctx->source);
272 e3713e00 Paolo Bonzini
}
273 e3713e00 Paolo Bonzini
274 e3713e00 Paolo Bonzini
void aio_context_unref(AioContext *ctx)
275 e3713e00 Paolo Bonzini
{
276 e3713e00 Paolo Bonzini
    g_source_unref(&ctx->source);
277 f627aab1 Paolo Bonzini
}