Statistics
| Branch: | Revision:

root / aio.c @ 97aff481

History | View | Annotate | Download (4.9 kB)

1
/*
2
 * QEMU aio implementation
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "qemu-common.h"
15
#include "block.h"
16
#include "sys-queue.h"
17
#include "qemu_socket.h"
18

    
19
typedef struct AioHandler AioHandler;
20

    
21
/* The list of registered AIO handlers */
22
static LIST_HEAD(, AioHandler) aio_handlers;
23

    
24
/* This is a simple lock used to protect the aio_handlers list.  Specifically,
25
 * it's used to ensure that no callbacks are removed while we're walking and
26
 * dispatching callbacks.
27
 */
28
static int walking_handlers;
29

    
30
struct AioHandler
31
{
32
    int fd;
33
    IOHandler *io_read;
34
    IOHandler *io_write;
35
    AioFlushHandler *io_flush;
36
    int deleted;
37
    void *opaque;
38
    LIST_ENTRY(AioHandler) node;
39
};
40

    
41
static AioHandler *find_aio_handler(int fd)
42
{
43
    AioHandler *node;
44

    
45
    LIST_FOREACH(node, &aio_handlers, node) {
46
        if (node->fd == fd)
47
            if (!node->deleted)
48
                return node;
49
    }
50

    
51
    return NULL;
52
}
53

    
54
int qemu_aio_set_fd_handler(int fd,
55
                            IOHandler *io_read,
56
                            IOHandler *io_write,
57
                            AioFlushHandler *io_flush,
58
                            void *opaque)
59
{
60
    AioHandler *node;
61

    
62
    node = find_aio_handler(fd);
63

    
64
    /* Are we deleting the fd handler? */
65
    if (!io_read && !io_write) {
66
        if (node) {
67
            /* If the lock is held, just mark the node as deleted */
68
            if (walking_handlers)
69
                node->deleted = 1;
70
            else {
71
                /* Otherwise, delete it for real.  We can't just mark it as
72
                 * deleted because deleted nodes are only cleaned up after
73
                 * releasing the walking_handlers lock.
74
                 */
75
                LIST_REMOVE(node, node);
76
                qemu_free(node);
77
            }
78
        }
79
    } else {
80
        if (node == NULL) {
81
            /* Alloc and insert if it's not already there */
82
            node = qemu_mallocz(sizeof(AioHandler));
83
            node->fd = fd;
84
            LIST_INSERT_HEAD(&aio_handlers, node, node);
85
        }
86
        /* Update handler with latest information */
87
        node->io_read = io_read;
88
        node->io_write = io_write;
89
        node->io_flush = io_flush;
90
        node->opaque = opaque;
91
    }
92

    
93
    qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
94

    
95
    return 0;
96
}
97

    
98
void qemu_aio_flush(void)
99
{
100
    AioHandler *node;
101
    int ret;
102

    
103
    do {
104
        ret = 0;
105

    
106
        LIST_FOREACH(node, &aio_handlers, node) {
107
            ret |= node->io_flush(node->opaque);
108
        }
109

    
110
        qemu_aio_wait();
111
    } while (ret > 0);
112
}
113

    
114
void qemu_aio_wait(void)
115
{
116
    int ret;
117

    
118
    if (qemu_bh_poll())
119
        return;
120

    
121
    do {
122
        AioHandler *node;
123
        fd_set rdfds, wrfds;
124
        int max_fd = -1;
125

    
126
        walking_handlers = 1;
127

    
128
        FD_ZERO(&rdfds);
129
        FD_ZERO(&wrfds);
130

    
131
        /* fill fd sets */
132
        LIST_FOREACH(node, &aio_handlers, node) {
133
            /* If there aren't pending AIO operations, don't invoke callbacks.
134
             * Otherwise, if there are no AIO requests, qemu_aio_wait() would
135
             * wait indefinitely.
136
             */
137
            if (node->io_flush && node->io_flush(node->opaque) == 0)
138
                continue;
139

    
140
            if (!node->deleted && node->io_read) {
141
                FD_SET(node->fd, &rdfds);
142
                max_fd = MAX(max_fd, node->fd + 1);
143
            }
144
            if (!node->deleted && node->io_write) {
145
                FD_SET(node->fd, &wrfds);
146
                max_fd = MAX(max_fd, node->fd + 1);
147
            }
148
        }
149

    
150
        walking_handlers = 0;
151

    
152
        /* No AIO operations?  Get us out of here */
153
        if (max_fd == -1)
154
            break;
155

    
156
        /* wait until next event */
157
        ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
158
        if (ret == -1 && errno == EINTR)
159
            continue;
160

    
161
        /* if we have any readable fds, dispatch event */
162
        if (ret > 0) {
163
            walking_handlers = 1;
164

    
165
            /* we have to walk very carefully in case
166
             * qemu_aio_set_fd_handler is called while we're walking */
167
            node = LIST_FIRST(&aio_handlers);
168
            while (node) {
169
                AioHandler *tmp;
170

    
171
                if (!node->deleted &&
172
                    FD_ISSET(node->fd, &rdfds) &&
173
                    node->io_read) {
174
                    node->io_read(node->opaque);
175
                }
176
                if (!node->deleted &&
177
                    FD_ISSET(node->fd, &wrfds) &&
178
                    node->io_write) {
179
                    node->io_write(node->opaque);
180
                }
181

    
182
                tmp = node;
183
                node = LIST_NEXT(node, node);
184

    
185
                if (tmp->deleted) {
186
                    LIST_REMOVE(tmp, node);
187
                    qemu_free(tmp);
188
                }
189
            }
190

    
191
            walking_handlers = 0;
192
        }
193
    } while (ret == 0);
194
}