Revision 7c0628b2 aio.c

b/aio.c
93 93
                       (AioFlushHandler *)io_flush, notifier);
94 94
}
95 95

  
96
bool aio_wait(AioContext *ctx)
96
bool aio_poll(AioContext *ctx, bool blocking)
97 97
{
98
    static struct timeval tv0;
98 99
    AioHandler *node;
99 100
    fd_set rdfds, wrfds;
100 101
    int max_fd = -1;
101 102
    int ret;
102
    bool busy;
103
    bool busy, progress;
104

  
105
    progress = false;
103 106

  
104 107
    /*
105 108
     * If there are callbacks left that have been queued, we need to call then.
......
107 110
     * does not need a complete flush (as is the case for qemu_aio_wait loops).
108 111
     */
109 112
    if (aio_bh_poll(ctx)) {
113
        blocking = false;
114
        progress = true;
115
    }
116

  
117
    if (progress && !blocking) {
110 118
        return true;
111 119
    }
112 120

  
......
142 150

  
143 151
    /* No AIO operations?  Get us out of here */
144 152
    if (!busy) {
145
        return false;
153
        return progress;
146 154
    }
147 155

  
148 156
    /* wait until next event */
149
    ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
157
    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
150 158

  
151 159
    /* if we have any readable fds, dispatch event */
152 160
    if (ret > 0) {
......
161 169
            if (!node->deleted &&
162 170
                FD_ISSET(node->fd, &rdfds) &&
163 171
                node->io_read) {
172
                progress = true;
164 173
                node->io_read(node->opaque);
165 174
            }
166 175
            if (!node->deleted &&
167 176
                FD_ISSET(node->fd, &wrfds) &&
168 177
                node->io_write) {
178
                progress = true;
169 179
                node->io_write(node->opaque);
170 180
            }
171 181

  
......
181 191
        }
182 192
    }
183 193

  
184
    return true;
194
    return progress;
185 195
}

Also available in: Unified diff