Statistics
| Branch: | Revision:

root / async.c @ f53ec699

History | View | Annotate | Download (6.4 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "block/aio.h"
27
#include "block/thread-pool.h"
28
#include "qemu/main-loop.h"
29

    
30
/***********************************************************/
31
/* bottom halves (can be seen as timers which expire ASAP) */
32

    
33
struct QEMUBH {
34
    AioContext *ctx;
35
    QEMUBHFunc *cb;
36
    void *opaque;
37
    QEMUBH *next;
38
    bool scheduled;
39
    bool idle;
40
    bool deleted;
41
};
42

    
43
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
44
{
45
    QEMUBH *bh;
46
    bh = g_malloc0(sizeof(QEMUBH));
47
    bh->ctx = ctx;
48
    bh->cb = cb;
49
    bh->opaque = opaque;
50
    qemu_mutex_lock(&ctx->bh_lock);
51
    bh->next = ctx->first_bh;
52
    /* Make sure that the members are ready before putting bh into list */
53
    smp_wmb();
54
    ctx->first_bh = bh;
55
    qemu_mutex_unlock(&ctx->bh_lock);
56
    return bh;
57
}
58

    
59
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
60
int aio_bh_poll(AioContext *ctx)
61
{
62
    QEMUBH *bh, **bhp, *next;
63
    int ret;
64

    
65
    ctx->walking_bh++;
66

    
67
    ret = 0;
68
    for (bh = ctx->first_bh; bh; bh = next) {
69
        /* Make sure that fetching bh happens before accessing its members */
70
        smp_read_barrier_depends();
71
        next = bh->next;
72
        if (!bh->deleted && bh->scheduled) {
73
            bh->scheduled = 0;
74
            /* Paired with write barrier in bh schedule to ensure reading for
75
             * idle & callbacks coming after bh's scheduling.
76
             */
77
            smp_rmb();
78
            if (!bh->idle)
79
                ret = 1;
80
            bh->idle = 0;
81
            bh->cb(bh->opaque);
82
        }
83
    }
84

    
85
    ctx->walking_bh--;
86

    
87
    /* remove deleted bhs */
88
    if (!ctx->walking_bh) {
89
        qemu_mutex_lock(&ctx->bh_lock);
90
        bhp = &ctx->first_bh;
91
        while (*bhp) {
92
            bh = *bhp;
93
            if (bh->deleted) {
94
                *bhp = bh->next;
95
                g_free(bh);
96
            } else {
97
                bhp = &bh->next;
98
            }
99
        }
100
        qemu_mutex_unlock(&ctx->bh_lock);
101
    }
102

    
103
    return ret;
104
}
105

    
106
void qemu_bh_schedule_idle(QEMUBH *bh)
107
{
108
    if (bh->scheduled)
109
        return;
110
    bh->idle = 1;
111
    /* Make sure that idle & any writes needed by the callback are done
112
     * before the locations are read in the aio_bh_poll.
113
     */
114
    smp_wmb();
115
    bh->scheduled = 1;
116
}
117

    
118
void qemu_bh_schedule(QEMUBH *bh)
119
{
120
    if (bh->scheduled)
121
        return;
122
    bh->idle = 0;
123
    /* Make sure that idle & any writes needed by the callback are done
124
     * before the locations are read in the aio_bh_poll.
125
     */
126
    smp_wmb();
127
    bh->scheduled = 1;
128
    aio_notify(bh->ctx);
129
}
130

    
131

    
132
/* This func is async.
133
 */
134
void qemu_bh_cancel(QEMUBH *bh)
135
{
136
    bh->scheduled = 0;
137
}
138

    
139
/* This func is async.The bottom half will do the delete action at the finial
140
 * end.
141
 */
142
void qemu_bh_delete(QEMUBH *bh)
143
{
144
    bh->scheduled = 0;
145
    bh->deleted = 1;
146
}
147

    
148
static gboolean
149
aio_ctx_prepare(GSource *source, gint    *timeout)
150
{
151
    AioContext *ctx = (AioContext *) source;
152
    QEMUBH *bh;
153

    
154
    for (bh = ctx->first_bh; bh; bh = bh->next) {
155
        if (!bh->deleted && bh->scheduled) {
156
            if (bh->idle) {
157
                /* idle bottom halves will be polled at least
158
                 * every 10ms */
159
                *timeout = 10;
160
            } else {
161
                /* non-idle bottom halves will be executed
162
                 * immediately */
163
                *timeout = 0;
164
                return true;
165
            }
166
        }
167
    }
168

    
169
    return false;
170
}
171

    
172
static gboolean
173
aio_ctx_check(GSource *source)
174
{
175
    AioContext *ctx = (AioContext *) source;
176
    QEMUBH *bh;
177

    
178
    for (bh = ctx->first_bh; bh; bh = bh->next) {
179
        if (!bh->deleted && bh->scheduled) {
180
            return true;
181
        }
182
    }
183
    return aio_pending(ctx);
184
}
185

    
186
static gboolean
187
aio_ctx_dispatch(GSource     *source,
188
                 GSourceFunc  callback,
189
                 gpointer     user_data)
190
{
191
    AioContext *ctx = (AioContext *) source;
192

    
193
    assert(callback == NULL);
194
    aio_poll(ctx, false);
195
    return true;
196
}
197

    
198
static void
199
aio_ctx_finalize(GSource     *source)
200
{
201
    AioContext *ctx = (AioContext *) source;
202

    
203
    thread_pool_free(ctx->thread_pool);
204
    aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
205
    event_notifier_cleanup(&ctx->notifier);
206
    qemu_mutex_destroy(&ctx->bh_lock);
207
    g_array_free(ctx->pollfds, TRUE);
208
}
209

    
210
static GSourceFuncs aio_source_funcs = {
211
    aio_ctx_prepare,
212
    aio_ctx_check,
213
    aio_ctx_dispatch,
214
    aio_ctx_finalize
215
};
216

    
217
GSource *aio_get_g_source(AioContext *ctx)
218
{
219
    g_source_ref(&ctx->source);
220
    return &ctx->source;
221
}
222

    
223
ThreadPool *aio_get_thread_pool(AioContext *ctx)
224
{
225
    if (!ctx->thread_pool) {
226
        ctx->thread_pool = thread_pool_new(ctx);
227
    }
228
    return ctx->thread_pool;
229
}
230

    
231
void aio_notify(AioContext *ctx)
232
{
233
    event_notifier_set(&ctx->notifier);
234
}
235

    
236
AioContext *aio_context_new(void)
237
{
238
    AioContext *ctx;
239
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
240
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
241
    ctx->thread_pool = NULL;
242
    qemu_mutex_init(&ctx->bh_lock);
243
    event_notifier_init(&ctx->notifier, false);
244
    aio_set_event_notifier(ctx, &ctx->notifier, 
245
                           (EventNotifierHandler *)
246
                           event_notifier_test_and_clear, NULL);
247

    
248
    return ctx;
249
}
250

    
251
void aio_context_ref(AioContext *ctx)
252
{
253
    g_source_ref(&ctx->source);
254
}
255

    
256
void aio_context_unref(AioContext *ctx)
257
{
258
    g_source_unref(&ctx->source);
259
}