Revision 8febfa26

b/aio.c
33 33
    IOHandler *io_read;
34 34
    IOHandler *io_write;
35 35
    AioFlushHandler *io_flush;
36
    AioProcessQueue *io_process_queue;
36 37
    int deleted;
37 38
    void *opaque;
38 39
    QLIST_ENTRY(AioHandler) node;
......
55 56
                            IOHandler *io_read,
56 57
                            IOHandler *io_write,
57 58
                            AioFlushHandler *io_flush,
59
                            AioProcessQueue *io_process_queue,
58 60
                            void *opaque)
59 61
{
60 62
    AioHandler *node;
......
87 89
        node->io_read = io_read;
88 90
        node->io_write = io_write;
89 91
        node->io_flush = io_flush;
92
        node->io_process_queue = io_process_queue;
90 93
        node->opaque = opaque;
91 94
    }
92 95

  
......
115 118
    } while (qemu_bh_poll() || ret > 0);
116 119
}
117 120

  
121
int qemu_aio_process_queue(void)
122
{
123
    AioHandler *node;
124
    int ret = 0;
125

  
126
    walking_handlers = 1;
127

  
128
    QLIST_FOREACH(node, &aio_handlers, node) {
129
        if (node->io_process_queue) {
130
            if (node->io_process_queue(node->opaque)) {
131
                ret = 1;
132
            }
133
        }
134
    }
135

  
136
    walking_handlers = 0;
137

  
138
    return ret;
139
}
140

  
118 141
void qemu_aio_wait(void)
119 142
{
120 143
    int ret;
......
122 145
    if (qemu_bh_poll())
123 146
        return;
124 147

  
148
    /*
149
     * If there are callbacks left that have been queued, we need to call then.
150
     * Return afterwards to avoid waiting needlessly in select().
151
     */
152
    if (qemu_aio_process_queue())
153
        return;
154

  
125 155
    do {
126 156
        AioHandler *node;
127 157
        fd_set rdfds, wrfds;
b/block/curl.c
83 83
    dprintf("CURL (AIO): Sock action %d on fd %d\n", action, fd);
84 84
    switch (action) {
85 85
        case CURL_POLL_IN:
86
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, s);
86
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, NULL, s);
87 87
            break;
88 88
        case CURL_POLL_OUT:
89
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, s);
89
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, NULL, s);
90 90
            break;
91 91
        case CURL_POLL_INOUT:
92 92
            qemu_aio_set_fd_handler(fd, curl_multi_do,
93
                                    curl_multi_do, NULL, s);
93
                                    curl_multi_do, NULL, NULL, s);
94 94
            break;
95 95
        case CURL_POLL_REMOVE:
96
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
96
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL, NULL);
97 97
            break;
98 98
    }
99 99

  
b/linux-aio.c
192 192
        goto out_close_efd;
193 193

  
194 194
    qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb,
195
                            NULL, qemu_laio_flush_cb, s);
195
                            NULL, qemu_laio_flush_cb, NULL, s);
196 196

  
197 197
    return s;
198 198

  
b/posix-aio-compat.c
624 624
    fcntl(s->rfd, F_SETFL, O_NONBLOCK);
625 625
    fcntl(s->wfd, F_SETFL, O_NONBLOCK);
626 626

  
627
    qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
627
    qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush,
628
        posix_aio_process_queue, s);
628 629

  
629 630
    ret = pthread_attr_init(&attr);
630 631
    if (ret)
b/qemu-aio.h
20 20
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
21 21
typedef int (AioFlushHandler)(void *opaque);
22 22

  
23
/* Runs all currently allowed AIO callbacks of completed requests in the
24
 * respective AIO backend. Returns 0 if no requests was handled, non-zero
25
 * if at least one queued request was handled. */
26
typedef int (AioProcessQueue)(void *opaque);
27

  
23 28
/* Flush any pending AIO operation. This function will block until all
24 29
 * outstanding AIO operations have been completed or cancelled. */
25 30
void qemu_aio_flush(void);
......
30 35
 * result of executing I/O completion or bh callbacks. */
31 36
void qemu_aio_wait(void);
32 37

  
38
/*
39
 * Runs all currently allowed AIO callbacks of completed requests. Returns 0
40
 * if no requests were handled, non-zero if at least one request was
41
 * processed.
42
 */
43
int qemu_aio_process_queue(void);
44

  
33 45
/* Register a file descriptor and associated callbacks.  Behaves very similarly
34 46
 * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
35 47
 * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
......
41 53
                            IOHandler *io_read,
42 54
                            IOHandler *io_write,
43 55
                            AioFlushHandler *io_flush,
56
                            AioProcessQueue *io_process_queue,
44 57
                            void *opaque);
45 58

  
46 59
#endif

Also available in: Unified diff