Statistics
| Branch: | Revision:

root / aio.c @ 08c573a8

History | View | Annotate | Download (4.9 kB)

1
/*
2
 * QEMU aio implementation
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 * Contributions after 2012-01-13 are licensed under the terms of the
13
 * GNU GPL, version 2 or (at your option) any later version.
14
 */
15

    
16
#include "qemu-common.h"
17
#include "block.h"
18
#include "qemu-queue.h"
19
#include "qemu_socket.h"
20

    
21
typedef struct AioHandler AioHandler;
22

    
23
/* The list of registered AIO handlers */
24
static QLIST_HEAD(, AioHandler) aio_handlers;
25

    
26
/* This is a simple lock used to protect the aio_handlers list.  Specifically,
27
 * it's used to ensure that no callbacks are removed while we're walking and
28
 * dispatching callbacks.
29
 */
30
static int walking_handlers;
31

    
32
struct AioHandler
33
{
34
    int fd;
35
    IOHandler *io_read;
36
    IOHandler *io_write;
37
    AioFlushHandler *io_flush;
38
    int deleted;
39
    void *opaque;
40
    QLIST_ENTRY(AioHandler) node;
41
};
42

    
43
static AioHandler *find_aio_handler(int fd)
44
{
45
    AioHandler *node;
46

    
47
    QLIST_FOREACH(node, &aio_handlers, node) {
48
        if (node->fd == fd)
49
            if (!node->deleted)
50
                return node;
51
    }
52

    
53
    return NULL;
54
}
55

    
56
int qemu_aio_set_fd_handler(int fd,
57
                            IOHandler *io_read,
58
                            IOHandler *io_write,
59
                            AioFlushHandler *io_flush,
60
                            void *opaque)
61
{
62
    AioHandler *node;
63

    
64
    node = find_aio_handler(fd);
65

    
66
    /* Are we deleting the fd handler? */
67
    if (!io_read && !io_write) {
68
        if (node) {
69
            /* If the lock is held, just mark the node as deleted */
70
            if (walking_handlers)
71
                node->deleted = 1;
72
            else {
73
                /* Otherwise, delete it for real.  We can't just mark it as
74
                 * deleted because deleted nodes are only cleaned up after
75
                 * releasing the walking_handlers lock.
76
                 */
77
                QLIST_REMOVE(node, node);
78
                g_free(node);
79
            }
80
        }
81
    } else {
82
        if (node == NULL) {
83
            /* Alloc and insert if it's not already there */
84
            node = g_malloc0(sizeof(AioHandler));
85
            node->fd = fd;
86
            QLIST_INSERT_HEAD(&aio_handlers, node, node);
87
        }
88
        /* Update handler with latest information */
89
        node->io_read = io_read;
90
        node->io_write = io_write;
91
        node->io_flush = io_flush;
92
        node->opaque = opaque;
93
    }
94

    
95
    qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
96

    
97
    return 0;
98
}
99

    
100
void qemu_aio_flush(void)
101
{
102
    while (qemu_aio_wait());
103
}
104

    
105
bool qemu_aio_wait(void)
106
{
107
    AioHandler *node;
108
    fd_set rdfds, wrfds;
109
    int max_fd = -1;
110
    int ret;
111
    bool busy;
112

    
113
    /*
114
     * If there are callbacks left that have been queued, we need to call then.
115
     * Do not call select in this case, because it is possible that the caller
116
     * does not need a complete flush (as is the case for qemu_aio_wait loops).
117
     */
118
    if (qemu_bh_poll()) {
119
        return true;
120
    }
121

    
122
    walking_handlers = 1;
123

    
124
    FD_ZERO(&rdfds);
125
    FD_ZERO(&wrfds);
126

    
127
    /* fill fd sets */
128
    busy = false;
129
    QLIST_FOREACH(node, &aio_handlers, node) {
130
        /* If there aren't pending AIO operations, don't invoke callbacks.
131
         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
132
         * wait indefinitely.
133
         */
134
        if (node->io_flush) {
135
            if (node->io_flush(node->opaque) == 0) {
136
                continue;
137
            }
138
            busy = true;
139
        }
140
        if (!node->deleted && node->io_read) {
141
            FD_SET(node->fd, &rdfds);
142
            max_fd = MAX(max_fd, node->fd + 1);
143
        }
144
        if (!node->deleted && node->io_write) {
145
            FD_SET(node->fd, &wrfds);
146
            max_fd = MAX(max_fd, node->fd + 1);
147
        }
148
    }
149

    
150
    walking_handlers = 0;
151

    
152
    /* No AIO operations?  Get us out of here */
153
    if (!busy) {
154
        return false;
155
    }
156

    
157
    /* wait until next event */
158
    ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
159

    
160
    /* if we have any readable fds, dispatch event */
161
    if (ret > 0) {
162
        walking_handlers = 1;
163

    
164
        /* we have to walk very carefully in case
165
         * qemu_aio_set_fd_handler is called while we're walking */
166
        node = QLIST_FIRST(&aio_handlers);
167
        while (node) {
168
            AioHandler *tmp;
169

    
170
            if (!node->deleted &&
171
                FD_ISSET(node->fd, &rdfds) &&
172
                node->io_read) {
173
                node->io_read(node->opaque);
174
            }
175
            if (!node->deleted &&
176
                FD_ISSET(node->fd, &wrfds) &&
177
                node->io_write) {
178
                node->io_write(node->opaque);
179
            }
180

    
181
            tmp = node;
182
            node = QLIST_NEXT(node, node);
183

    
184
            if (tmp->deleted) {
185
                QLIST_REMOVE(tmp, node);
186
                g_free(tmp);
187
            }
188
        }
189

    
190
        walking_handlers = 0;
191
    }
192

    
193
    return true;
194
}