47 |
47 |
bh->ctx = ctx;
|
48 |
48 |
bh->cb = cb;
|
49 |
49 |
bh->opaque = opaque;
|
|
50 |
qemu_mutex_lock(&ctx->bh_lock);
|
50 |
51 |
bh->next = ctx->first_bh;
|
|
52 |
/* Make sure that the members are ready before putting bh into list */
|
|
53 |
smp_wmb();
|
51 |
54 |
ctx->first_bh = bh;
|
|
55 |
qemu_mutex_unlock(&ctx->bh_lock);
|
52 |
56 |
return bh;
|
53 |
57 |
}
|
54 |
58 |
|
|
59 |
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
|
55 |
60 |
int aio_bh_poll(AioContext *ctx)
|
56 |
61 |
{
|
57 |
62 |
QEMUBH *bh, **bhp, *next;
|
... | ... | |
61 |
66 |
|
62 |
67 |
ret = 0;
|
63 |
68 |
for (bh = ctx->first_bh; bh; bh = next) {
|
|
69 |
/* Make sure that fetching bh happens before accessing its members */
|
|
70 |
smp_read_barrier_depends();
|
64 |
71 |
next = bh->next;
|
65 |
72 |
if (!bh->deleted && bh->scheduled) {
|
66 |
73 |
bh->scheduled = 0;
|
|
74 |
/* Paired with write barrier in bh schedule to ensure reading for
|
|
75 |
* idle & callbacks coming after bh's scheduling.
|
|
76 |
*/
|
|
77 |
smp_rmb();
|
67 |
78 |
if (!bh->idle)
|
68 |
79 |
ret = 1;
|
69 |
80 |
bh->idle = 0;
|
... | ... | |
75 |
86 |
|
76 |
87 |
/* remove deleted bhs */
|
77 |
88 |
if (!ctx->walking_bh) {
|
|
89 |
qemu_mutex_lock(&ctx->bh_lock);
|
78 |
90 |
bhp = &ctx->first_bh;
|
79 |
91 |
while (*bhp) {
|
80 |
92 |
bh = *bhp;
|
... | ... | |
85 |
97 |
bhp = &bh->next;
|
86 |
98 |
}
|
87 |
99 |
}
|
|
100 |
qemu_mutex_unlock(&ctx->bh_lock);
|
88 |
101 |
}
|
89 |
102 |
|
90 |
103 |
return ret;
|
... | ... | |
94 |
107 |
{
|
95 |
108 |
if (bh->scheduled)
|
96 |
109 |
return;
|
97 |
|
bh->scheduled = 1;
|
98 |
110 |
bh->idle = 1;
|
|
111 |
/* Make sure that idle & any writes needed by the callback are done
|
|
112 |
* before the locations are read in the aio_bh_poll.
|
|
113 |
*/
|
|
114 |
smp_wmb();
|
|
115 |
bh->scheduled = 1;
|
99 |
116 |
}
|
100 |
117 |
|
101 |
118 |
void qemu_bh_schedule(QEMUBH *bh)
|
102 |
119 |
{
|
103 |
120 |
if (bh->scheduled)
|
104 |
121 |
return;
|
105 |
|
bh->scheduled = 1;
|
106 |
122 |
bh->idle = 0;
|
|
123 |
/* Make sure that idle & any writes needed by the callback are done
|
|
124 |
* before the locations are read in the aio_bh_poll.
|
|
125 |
*/
|
|
126 |
smp_wmb();
|
|
127 |
bh->scheduled = 1;
|
107 |
128 |
aio_notify(bh->ctx);
|
108 |
129 |
}
|
109 |
130 |
|
|
131 |
|
|
132 |
/* This func is async.
|
|
133 |
*/
|
110 |
134 |
void qemu_bh_cancel(QEMUBH *bh)
|
111 |
135 |
{
|
112 |
136 |
bh->scheduled = 0;
|
113 |
137 |
}
|
114 |
138 |
|
|
139 |
/* This func is async.The bottom half will do the delete action at the finial
|
|
140 |
* end.
|
|
141 |
*/
|
115 |
142 |
void qemu_bh_delete(QEMUBH *bh)
|
116 |
143 |
{
|
117 |
144 |
bh->scheduled = 0;
|
... | ... | |
176 |
203 |
thread_pool_free(ctx->thread_pool);
|
177 |
204 |
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
|
178 |
205 |
event_notifier_cleanup(&ctx->notifier);
|
|
206 |
qemu_mutex_destroy(&ctx->bh_lock);
|
179 |
207 |
g_array_free(ctx->pollfds, TRUE);
|
180 |
208 |
}
|
181 |
209 |
|
... | ... | |
211 |
239 |
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
|
212 |
240 |
ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
|
213 |
241 |
ctx->thread_pool = NULL;
|
|
242 |
qemu_mutex_init(&ctx->bh_lock);
|
214 |
243 |
event_notifier_init(&ctx->notifier, false);
|
215 |
244 |
aio_set_event_notifier(ctx, &ctx->notifier,
|
216 |
245 |
(EventNotifierHandler *)
|