Statistics
| Branch: | Revision:

root / aio-win32.c @ 3523e4bd

History | View | Annotate | Download (6 kB)

1
/*
2
 * QEMU aio implementation
3
 *
4
 * Copyright IBM Corp., 2008
5
 * Copyright Red Hat Inc., 2012
6
 *
7
 * Authors:
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *  Paolo Bonzini     <pbonzini@redhat.com>
10
 *
11
 * This work is licensed under the terms of the GNU GPL, version 2.  See
12
 * the COPYING file in the top-level directory.
13
 *
14
 * Contributions after 2012-01-13 are licensed under the terms of the
15
 * GNU GPL, version 2 or (at your option) any later version.
16
 */
17

    
18
#include "qemu-common.h"
19
#include "block/block.h"
20
#include "qemu/queue.h"
21
#include "qemu/sockets.h"
22

    
23
struct AioHandler {
24
    EventNotifier *e;
25
    EventNotifierHandler *io_notify;
26
    GPollFD pfd;
27
    int deleted;
28
    QLIST_ENTRY(AioHandler) node;
29
};
30

    
31
void aio_set_event_notifier(AioContext *ctx,
32
                            EventNotifier *e,
33
                            EventNotifierHandler *io_notify)
34
{
35
    AioHandler *node;
36

    
37
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
38
        if (node->e == e && !node->deleted) {
39
            break;
40
        }
41
    }
42

    
43
    /* Are we deleting the fd handler? */
44
    if (!io_notify) {
45
        if (node) {
46
            g_source_remove_poll(&ctx->source, &node->pfd);
47

    
48
            /* If the lock is held, just mark the node as deleted */
49
            if (ctx->walking_handlers) {
50
                node->deleted = 1;
51
                node->pfd.revents = 0;
52
            } else {
53
                /* Otherwise, delete it for real.  We can't just mark it as
54
                 * deleted because deleted nodes are only cleaned up after
55
                 * releasing the walking_handlers lock.
56
                 */
57
                QLIST_REMOVE(node, node);
58
                g_free(node);
59
            }
60
        }
61
    } else {
62
        if (node == NULL) {
63
            /* Alloc and insert if it's not already there */
64
            node = g_malloc0(sizeof(AioHandler));
65
            node->e = e;
66
            node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
67
            node->pfd.events = G_IO_IN;
68
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
69

    
70
            g_source_add_poll(&ctx->source, &node->pfd);
71
        }
72
        /* Update handler with latest information */
73
        node->io_notify = io_notify;
74
    }
75

    
76
    aio_notify(ctx);
77
}
78

    
79
bool aio_pending(AioContext *ctx)
80
{
81
    AioHandler *node;
82

    
83
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
84
        if (node->pfd.revents && node->io_notify) {
85
            return true;
86
        }
87
    }
88

    
89
    return false;
90
}
91

    
92
bool aio_poll(AioContext *ctx, bool blocking)
93
{
94
    AioHandler *node;
95
    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
96
    bool progress;
97
    int count;
98
    int timeout;
99

    
100
    progress = false;
101

    
102
    /*
103
     * If there are callbacks left that have been queued, we need to call then.
104
     * Do not call select in this case, because it is possible that the caller
105
     * does not need a complete flush (as is the case for qemu_aio_wait loops).
106
     */
107
    if (aio_bh_poll(ctx)) {
108
        blocking = false;
109
        progress = true;
110
    }
111

    
112
    /* Run timers */
113
    progress |= timerlistgroup_run_timers(&ctx->tlg);
114

    
115
    /*
116
     * Then dispatch any pending callbacks from the GSource.
117
     *
118
     * We have to walk very carefully in case qemu_aio_set_fd_handler is
119
     * called while we're walking.
120
     */
121
    node = QLIST_FIRST(&ctx->aio_handlers);
122
    while (node) {
123
        AioHandler *tmp;
124

    
125
        ctx->walking_handlers++;
126

    
127
        if (node->pfd.revents && node->io_notify) {
128
            node->pfd.revents = 0;
129
            node->io_notify(node->e);
130

    
131
            /* aio_notify() does not count as progress */
132
            if (node->e != &ctx->notifier) {
133
                progress = true;
134
            }
135
        }
136

    
137
        tmp = node;
138
        node = QLIST_NEXT(node, node);
139

    
140
        ctx->walking_handlers--;
141

    
142
        if (!ctx->walking_handlers && tmp->deleted) {
143
            QLIST_REMOVE(tmp, node);
144
            g_free(tmp);
145
        }
146
    }
147

    
148
    if (progress && !blocking) {
149
        return true;
150
    }
151

    
152
    ctx->walking_handlers++;
153

    
154
    /* fill fd sets */
155
    count = 0;
156
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
157
        if (!node->deleted && node->io_notify) {
158
            events[count++] = event_notifier_get_handle(node->e);
159
        }
160
    }
161

    
162
    ctx->walking_handlers--;
163

    
164
    /* wait until next event */
165
    while (count > 0) {
166
        int ret;
167

    
168
        timeout = blocking ?
169
            qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
170
        ret = WaitForMultipleObjects(count, events, FALSE, timeout);
171

    
172
        /* if we have any signaled events, dispatch event */
173
        if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
174
            break;
175
        }
176

    
177
        blocking = false;
178

    
179
        /* we have to walk very carefully in case
180
         * qemu_aio_set_fd_handler is called while we're walking */
181
        node = QLIST_FIRST(&ctx->aio_handlers);
182
        while (node) {
183
            AioHandler *tmp;
184

    
185
            ctx->walking_handlers++;
186

    
187
            if (!node->deleted &&
188
                event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
189
                node->io_notify) {
190
                node->io_notify(node->e);
191

    
192
                /* aio_notify() does not count as progress */
193
                if (node->e != &ctx->notifier) {
194
                    progress = true;
195
                }
196
            }
197

    
198
            tmp = node;
199
            node = QLIST_NEXT(node, node);
200

    
201
            ctx->walking_handlers--;
202

    
203
            if (!ctx->walking_handlers && tmp->deleted) {
204
                QLIST_REMOVE(tmp, node);
205
                g_free(tmp);
206
            }
207
        }
208

    
209
        /* Try again, but only call each handler once.  */
210
        events[ret - WAIT_OBJECT_0] = events[--count];
211
    }
212

    
213
    if (blocking) {
214
        /* Run the timers a second time. We do this because otherwise aio_wait
215
         * will not note progress - and will stop a drain early - if we have
216
         * a timer that was not ready to run entering g_poll but is ready
217
         * after g_poll. This will only do anything if a timer has expired.
218
         */
219
        progress |= timerlistgroup_run_timers(&ctx->tlg);
220
    }
221

    
222
    return progress;
223
}