Statistics
| Branch: | Revision:

root / aio.c @ d7585251

History | View | Annotate | Download (4.9 kB)

1
/*
2
 * QEMU aio implementation
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "qemu-common.h"
15
#include "block.h"
16
#include "sys-queue.h"
17
#include "qemu_socket.h"
18

    
19
typedef struct AioHandler AioHandler;
20

    
21
/* The list of registered AIO handlers */
22
static LIST_HEAD(, AioHandler) aio_handlers;
23

    
24
/* This is a simple lock used to protect the aio_handlers list.  Specifically,
25
 * it's used to ensure that no callbacks are removed while we're walking and
26
 * dispatching callbacks.
27
 */
28
static int walking_handlers;
29

    
30
struct AioHandler
31
{
32
    int fd;
33
    IOHandler *io_read;
34
    IOHandler *io_write;
35
    AioFlushHandler *io_flush;
36
    int deleted;
37
    void *opaque;
38
    LIST_ENTRY(AioHandler) node;
39
};
40

    
41
static AioHandler *find_aio_handler(int fd)
42
{
43
    AioHandler *node;
44

    
45
    LIST_FOREACH(node, &aio_handlers, node) {
46
        if (node->fd == fd)
47
            return node;
48
    }
49

    
50
    return NULL;
51
}
52

    
53
int qemu_aio_set_fd_handler(int fd,
54
                            IOHandler *io_read,
55
                            IOHandler *io_write,
56
                            AioFlushHandler *io_flush,
57
                            void *opaque)
58
{
59
    AioHandler *node;
60

    
61
    node = find_aio_handler(fd);
62

    
63
    /* Are we deleting the fd handler? */
64
    if (!io_read && !io_write) {
65
        if (node) {
66
            /* If the lock is held, just mark the node as deleted */
67
            if (walking_handlers)
68
                node->deleted = 1;
69
            else {
70
                /* Otherwise, delete it for real.  We can't just mark it as
71
                 * deleted because deleted nodes are only cleaned up after
72
                 * releasing the walking_handlers lock.
73
                 */
74
                LIST_REMOVE(node, node);
75
                qemu_free(node);
76
            }
77
        }
78
    } else {
79
        if (node == NULL) {
80
            /* Alloc and insert if it's not already there */
81
            node = qemu_mallocz(sizeof(AioHandler));
82
            node->fd = fd;
83
            LIST_INSERT_HEAD(&aio_handlers, node, node);
84
        }
85
        /* Update handler with latest information */
86
        node->io_read = io_read;
87
        node->io_write = io_write;
88
        node->io_flush = io_flush;
89
        node->opaque = opaque;
90
    }
91

    
92
    qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
93

    
94
    return 0;
95
}
96

    
97
void qemu_aio_flush(void)
98
{
99
    AioHandler *node;
100
    int ret;
101

    
102
    do {
103
        ret = 0;
104

    
105
        LIST_FOREACH(node, &aio_handlers, node) {
106
            ret |= node->io_flush(node->opaque);
107
        }
108

    
109
        qemu_aio_wait();
110
    } while (ret > 0);
111
}
112

    
113
void qemu_aio_wait(void)
114
{
115
    int ret;
116

    
117
    if (qemu_bh_poll())
118
        return;
119

    
120
    do {
121
        AioHandler *node;
122
        fd_set rdfds, wrfds;
123
        int max_fd = -1;
124

    
125
        walking_handlers = 1;
126

    
127
        FD_ZERO(&rdfds);
128
        FD_ZERO(&wrfds);
129

    
130
        /* fill fd sets */
131
        LIST_FOREACH(node, &aio_handlers, node) {
132
            /* If there aren't pending AIO operations, don't invoke callbacks.
133
             * Otherwise, if there are no AIO requests, qemu_aio_wait() would
134
             * wait indefinitely.
135
             */
136
            if (node->io_flush && node->io_flush(node->opaque) == 0)
137
                continue;
138

    
139
            if (!node->deleted && node->io_read) {
140
                FD_SET(node->fd, &rdfds);
141
                max_fd = MAX(max_fd, node->fd + 1);
142
            }
143
            if (!node->deleted && node->io_write) {
144
                FD_SET(node->fd, &wrfds);
145
                max_fd = MAX(max_fd, node->fd + 1);
146
            }
147
        }
148

    
149
        walking_handlers = 0;
150

    
151
        /* No AIO operations?  Get us out of here */
152
        if (max_fd == -1)
153
            break;
154

    
155
        /* wait until next event */
156
        ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
157
        if (ret == -1 && errno == EINTR)
158
            continue;
159

    
160
        /* if we have any readable fds, dispatch event */
161
        if (ret > 0) {
162
            walking_handlers = 1;
163

    
164
            /* we have to walk very carefully in case
165
             * qemu_aio_set_fd_handler is called while we're walking */
166
            node = LIST_FIRST(&aio_handlers);
167
            while (node) {
168
                AioHandler *tmp;
169

    
170
                if (!node->deleted &&
171
                    FD_ISSET(node->fd, &rdfds) &&
172
                    node->io_read) {
173
                    node->io_read(node->opaque);
174
                }
175
                if (!node->deleted &&
176
                    FD_ISSET(node->fd, &wrfds) &&
177
                    node->io_write) {
178
                    node->io_write(node->opaque);
179
                }
180

    
181
                tmp = node;
182
                node = LIST_NEXT(node, node);
183

    
184
                if (tmp->deleted) {
185
                    LIST_REMOVE(tmp, node);
186
                    qemu_free(tmp);
187
                }
188
            }
189

    
190
            walking_handlers = 0;
191
        }
192
    } while (ret == 0);
193
}