Statistics
| Branch: | Revision:

root / main-loop.c @ feature-archipelago

History | View | Annotate | Download (13.5 kB)

1 d3b12f5d Paolo Bonzini
/*
2 d3b12f5d Paolo Bonzini
 * QEMU System Emulator
3 d3b12f5d Paolo Bonzini
 *
4 d3b12f5d Paolo Bonzini
 * Copyright (c) 2003-2008 Fabrice Bellard
5 d3b12f5d Paolo Bonzini
 *
6 d3b12f5d Paolo Bonzini
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 d3b12f5d Paolo Bonzini
 * of this software and associated documentation files (the "Software"), to deal
8 d3b12f5d Paolo Bonzini
 * in the Software without restriction, including without limitation the rights
9 d3b12f5d Paolo Bonzini
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 d3b12f5d Paolo Bonzini
 * copies of the Software, and to permit persons to whom the Software is
11 d3b12f5d Paolo Bonzini
 * furnished to do so, subject to the following conditions:
12 d3b12f5d Paolo Bonzini
 *
13 d3b12f5d Paolo Bonzini
 * The above copyright notice and this permission notice shall be included in
14 d3b12f5d Paolo Bonzini
 * all copies or substantial portions of the Software.
15 d3b12f5d Paolo Bonzini
 *
16 d3b12f5d Paolo Bonzini
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 d3b12f5d Paolo Bonzini
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 d3b12f5d Paolo Bonzini
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 d3b12f5d Paolo Bonzini
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 d3b12f5d Paolo Bonzini
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 d3b12f5d Paolo Bonzini
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 d3b12f5d Paolo Bonzini
 * THE SOFTWARE.
23 d3b12f5d Paolo Bonzini
 */
24 d3b12f5d Paolo Bonzini
25 0ec024f6 Stefan Weil
#include "qemu-common.h"
26 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
27 520b6dd4 Michael Tokarev
#include "qemu/sockets.h"        // struct in_addr needed for libslirp.h
28 520b6dd4 Michael Tokarev
#include "slirp/libslirp.h"
29 1de7afc9 Paolo Bonzini
#include "qemu/main-loop.h"
30 737e150e Paolo Bonzini
#include "block/aio.h"
31 d3b12f5d Paolo Bonzini
32 d3b12f5d Paolo Bonzini
#ifndef _WIN32
33 d3b12f5d Paolo Bonzini
34 1de7afc9 Paolo Bonzini
#include "qemu/compatfd.h"
35 0ec024f6 Stefan Weil
36 d3b12f5d Paolo Bonzini
/* If we have signalfd, we mask out the signals we want to handle and then
37 d3b12f5d Paolo Bonzini
 * use signalfd to listen for them.  We rely on whatever the current signal
38 d3b12f5d Paolo Bonzini
 * handler is to dispatch the signals when we receive them.
39 d3b12f5d Paolo Bonzini
 */
40 d3b12f5d Paolo Bonzini
static void sigfd_handler(void *opaque)
41 d3b12f5d Paolo Bonzini
{
42 d3b12f5d Paolo Bonzini
    int fd = (intptr_t)opaque;
43 d3b12f5d Paolo Bonzini
    struct qemu_signalfd_siginfo info;
44 d3b12f5d Paolo Bonzini
    struct sigaction action;
45 d3b12f5d Paolo Bonzini
    ssize_t len;
46 d3b12f5d Paolo Bonzini
47 d3b12f5d Paolo Bonzini
    while (1) {
48 d3b12f5d Paolo Bonzini
        do {
49 d3b12f5d Paolo Bonzini
            len = read(fd, &info, sizeof(info));
50 d3b12f5d Paolo Bonzini
        } while (len == -1 && errno == EINTR);
51 d3b12f5d Paolo Bonzini
52 d3b12f5d Paolo Bonzini
        if (len == -1 && errno == EAGAIN) {
53 d3b12f5d Paolo Bonzini
            break;
54 d3b12f5d Paolo Bonzini
        }
55 d3b12f5d Paolo Bonzini
56 d3b12f5d Paolo Bonzini
        if (len != sizeof(info)) {
57 d3b12f5d Paolo Bonzini
            printf("read from sigfd returned %zd: %m\n", len);
58 d3b12f5d Paolo Bonzini
            return;
59 d3b12f5d Paolo Bonzini
        }
60 d3b12f5d Paolo Bonzini
61 d3b12f5d Paolo Bonzini
        sigaction(info.ssi_signo, NULL, &action);
62 d3b12f5d Paolo Bonzini
        if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
63 d3b12f5d Paolo Bonzini
            action.sa_sigaction(info.ssi_signo,
64 d3b12f5d Paolo Bonzini
                                (siginfo_t *)&info, NULL);
65 d3b12f5d Paolo Bonzini
        } else if (action.sa_handler) {
66 d3b12f5d Paolo Bonzini
            action.sa_handler(info.ssi_signo);
67 d3b12f5d Paolo Bonzini
        }
68 d3b12f5d Paolo Bonzini
    }
69 d3b12f5d Paolo Bonzini
}
70 d3b12f5d Paolo Bonzini
71 d3b12f5d Paolo Bonzini
static int qemu_signal_init(void)
72 d3b12f5d Paolo Bonzini
{
73 d3b12f5d Paolo Bonzini
    int sigfd;
74 d3b12f5d Paolo Bonzini
    sigset_t set;
75 d3b12f5d Paolo Bonzini
76 d3b12f5d Paolo Bonzini
    /*
77 d3b12f5d Paolo Bonzini
     * SIG_IPI must be blocked in the main thread and must not be caught
78 d3b12f5d Paolo Bonzini
     * by sigwait() in the signal thread. Otherwise, the cpu thread will
79 d3b12f5d Paolo Bonzini
     * not catch it reliably.
80 d3b12f5d Paolo Bonzini
     */
81 d3b12f5d Paolo Bonzini
    sigemptyset(&set);
82 d3b12f5d Paolo Bonzini
    sigaddset(&set, SIG_IPI);
83 d3b12f5d Paolo Bonzini
    sigaddset(&set, SIGIO);
84 d3b12f5d Paolo Bonzini
    sigaddset(&set, SIGALRM);
85 d3b12f5d Paolo Bonzini
    sigaddset(&set, SIGBUS);
86 d3b12f5d Paolo Bonzini
    pthread_sigmask(SIG_BLOCK, &set, NULL);
87 d3b12f5d Paolo Bonzini
88 4aa7534d Lai Jiangshan
    sigdelset(&set, SIG_IPI);
89 d3b12f5d Paolo Bonzini
    sigfd = qemu_signalfd(&set);
90 d3b12f5d Paolo Bonzini
    if (sigfd == -1) {
91 d3b12f5d Paolo Bonzini
        fprintf(stderr, "failed to create signalfd\n");
92 d3b12f5d Paolo Bonzini
        return -errno;
93 d3b12f5d Paolo Bonzini
    }
94 d3b12f5d Paolo Bonzini
95 d3b12f5d Paolo Bonzini
    fcntl_setfl(sigfd, O_NONBLOCK);
96 d3b12f5d Paolo Bonzini
97 d3b12f5d Paolo Bonzini
    qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
98 d3b12f5d Paolo Bonzini
                         (void *)(intptr_t)sigfd);
99 d3b12f5d Paolo Bonzini
100 d3b12f5d Paolo Bonzini
    return 0;
101 d3b12f5d Paolo Bonzini
}
102 d3b12f5d Paolo Bonzini
103 d3b12f5d Paolo Bonzini
#else /* _WIN32 */
104 d3b12f5d Paolo Bonzini
105 4c8d0d27 Paolo Bonzini
static int qemu_signal_init(void)
106 d3b12f5d Paolo Bonzini
{
107 d3b12f5d Paolo Bonzini
    return 0;
108 d3b12f5d Paolo Bonzini
}
109 4c8d0d27 Paolo Bonzini
#endif
110 4c8d0d27 Paolo Bonzini
111 4c8d0d27 Paolo Bonzini
static AioContext *qemu_aio_context;
112 d3b12f5d Paolo Bonzini
113 5f3aa1ff Stefan Hajnoczi
AioContext *qemu_get_aio_context(void)
114 5f3aa1ff Stefan Hajnoczi
{
115 5f3aa1ff Stefan Hajnoczi
    return qemu_aio_context;
116 5f3aa1ff Stefan Hajnoczi
}
117 5f3aa1ff Stefan Hajnoczi
118 d3b12f5d Paolo Bonzini
void qemu_notify_event(void)
119 d3b12f5d Paolo Bonzini
{
120 4c8d0d27 Paolo Bonzini
    if (!qemu_aio_context) {
121 ee77dfb2 Michael Roth
        return;
122 ee77dfb2 Michael Roth
    }
123 4c8d0d27 Paolo Bonzini
    aio_notify(qemu_aio_context);
124 d3b12f5d Paolo Bonzini
}
125 d3b12f5d Paolo Bonzini
126 cbff4b34 Stefan Hajnoczi
static GArray *gpollfds;
127 cbff4b34 Stefan Hajnoczi
128 172061a0 Paolo Bonzini
int qemu_init_main_loop(void)
129 d3b12f5d Paolo Bonzini
{
130 d3b12f5d Paolo Bonzini
    int ret;
131 82cbbdc6 Paolo Bonzini
    GSource *src;
132 d3b12f5d Paolo Bonzini
133 172061a0 Paolo Bonzini
    init_clocks();
134 172061a0 Paolo Bonzini
135 d3b12f5d Paolo Bonzini
    ret = qemu_signal_init();
136 d3b12f5d Paolo Bonzini
    if (ret) {
137 d3b12f5d Paolo Bonzini
        return ret;
138 d3b12f5d Paolo Bonzini
    }
139 d3b12f5d Paolo Bonzini
140 cbff4b34 Stefan Hajnoczi
    gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
141 f627aab1 Paolo Bonzini
    qemu_aio_context = aio_context_new();
142 82cbbdc6 Paolo Bonzini
    src = aio_get_g_source(qemu_aio_context);
143 82cbbdc6 Paolo Bonzini
    g_source_attach(src, NULL);
144 82cbbdc6 Paolo Bonzini
    g_source_unref(src);
145 d3b12f5d Paolo Bonzini
    return 0;
146 d3b12f5d Paolo Bonzini
}
147 d3b12f5d Paolo Bonzini
148 d3b12f5d Paolo Bonzini
static int max_priority;
149 d3b12f5d Paolo Bonzini
150 ea26ce76 Paolo Bonzini
#ifndef _WIN32
151 48ce11ff Stefan Hajnoczi
static int glib_pollfds_idx;
152 48ce11ff Stefan Hajnoczi
static int glib_n_poll_fds;
153 48ce11ff Stefan Hajnoczi
154 7b595f35 Alex Bligh
static void glib_pollfds_fill(int64_t *cur_timeout)
155 d3b12f5d Paolo Bonzini
{
156 d3b12f5d Paolo Bonzini
    GMainContext *context = g_main_context_default();
157 4dae83ae Paolo Bonzini
    int timeout = 0;
158 7b595f35 Alex Bligh
    int64_t timeout_ns;
159 48ce11ff Stefan Hajnoczi
    int n;
160 d3b12f5d Paolo Bonzini
161 d3b12f5d Paolo Bonzini
    g_main_context_prepare(context, &max_priority);
162 d3b12f5d Paolo Bonzini
163 48ce11ff Stefan Hajnoczi
    glib_pollfds_idx = gpollfds->len;
164 48ce11ff Stefan Hajnoczi
    n = glib_n_poll_fds;
165 48ce11ff Stefan Hajnoczi
    do {
166 48ce11ff Stefan Hajnoczi
        GPollFD *pfds;
167 48ce11ff Stefan Hajnoczi
        glib_n_poll_fds = n;
168 48ce11ff Stefan Hajnoczi
        g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
169 48ce11ff Stefan Hajnoczi
        pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
170 48ce11ff Stefan Hajnoczi
        n = g_main_context_query(context, max_priority, &timeout, pfds,
171 48ce11ff Stefan Hajnoczi
                                 glib_n_poll_fds);
172 48ce11ff Stefan Hajnoczi
    } while (n != glib_n_poll_fds);
173 d3b12f5d Paolo Bonzini
174 7b595f35 Alex Bligh
    if (timeout < 0) {
175 7b595f35 Alex Bligh
        timeout_ns = -1;
176 7b595f35 Alex Bligh
    } else {
177 7b595f35 Alex Bligh
        timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
178 d3b12f5d Paolo Bonzini
    }
179 7b595f35 Alex Bligh
180 7b595f35 Alex Bligh
    *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
181 d3b12f5d Paolo Bonzini
}
182 d3b12f5d Paolo Bonzini
183 48ce11ff Stefan Hajnoczi
static void glib_pollfds_poll(void)
184 d3b12f5d Paolo Bonzini
{
185 d3b12f5d Paolo Bonzini
    GMainContext *context = g_main_context_default();
186 48ce11ff Stefan Hajnoczi
    GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
187 d3b12f5d Paolo Bonzini
188 48ce11ff Stefan Hajnoczi
    if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
189 d3b12f5d Paolo Bonzini
        g_main_context_dispatch(context);
190 d3b12f5d Paolo Bonzini
    }
191 d3b12f5d Paolo Bonzini
}
192 d3b12f5d Paolo Bonzini
193 893986fe Anthony Liguori
#define MAX_MAIN_LOOP_SPIN (1000)
194 893986fe Anthony Liguori
195 7b595f35 Alex Bligh
static int os_host_main_loop_wait(int64_t timeout)
196 15455536 Paolo Bonzini
{
197 15455536 Paolo Bonzini
    int ret;
198 893986fe Anthony Liguori
    static int spin_counter;
199 15455536 Paolo Bonzini
200 48ce11ff Stefan Hajnoczi
    glib_pollfds_fill(&timeout);
201 15455536 Paolo Bonzini
202 893986fe Anthony Liguori
    /* If the I/O thread is very busy or we are incorrectly busy waiting in
203 893986fe Anthony Liguori
     * the I/O thread, this can lead to starvation of the BQL such that the
204 893986fe Anthony Liguori
     * VCPU threads never run.  To make sure we can detect the later case,
205 893986fe Anthony Liguori
     * print a message to the screen.  If we run into this condition, create
206 893986fe Anthony Liguori
     * a fake timeout in order to give the VCPU threads a chance to run.
207 893986fe Anthony Liguori
     */
208 7b595f35 Alex Bligh
    if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
209 893986fe Anthony Liguori
        static bool notified;
210 893986fe Anthony Liguori
211 893986fe Anthony Liguori
        if (!notified) {
212 893986fe Anthony Liguori
            fprintf(stderr,
213 893986fe Anthony Liguori
                    "main-loop: WARNING: I/O thread spun for %d iterations\n",
214 893986fe Anthony Liguori
                    MAX_MAIN_LOOP_SPIN);
215 893986fe Anthony Liguori
            notified = true;
216 893986fe Anthony Liguori
        }
217 893986fe Anthony Liguori
218 7b595f35 Alex Bligh
        timeout = SCALE_MS;
219 893986fe Anthony Liguori
    }
220 893986fe Anthony Liguori
221 7b595f35 Alex Bligh
    if (timeout) {
222 893986fe Anthony Liguori
        spin_counter = 0;
223 15455536 Paolo Bonzini
        qemu_mutex_unlock_iothread();
224 893986fe Anthony Liguori
    } else {
225 893986fe Anthony Liguori
        spin_counter++;
226 15455536 Paolo Bonzini
    }
227 15455536 Paolo Bonzini
228 7b595f35 Alex Bligh
    ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
229 cbff4b34 Stefan Hajnoczi
230 7b595f35 Alex Bligh
    if (timeout) {
231 15455536 Paolo Bonzini
        qemu_mutex_lock_iothread();
232 15455536 Paolo Bonzini
    }
233 15455536 Paolo Bonzini
234 48ce11ff Stefan Hajnoczi
    glib_pollfds_poll();
235 15455536 Paolo Bonzini
    return ret;
236 15455536 Paolo Bonzini
}
237 15455536 Paolo Bonzini
#else
238 d3b12f5d Paolo Bonzini
/***********************************************************/
239 d3b12f5d Paolo Bonzini
/* Polling handling */
240 d3b12f5d Paolo Bonzini
241 d3b12f5d Paolo Bonzini
typedef struct PollingEntry {
242 d3b12f5d Paolo Bonzini
    PollingFunc *func;
243 d3b12f5d Paolo Bonzini
    void *opaque;
244 d3b12f5d Paolo Bonzini
    struct PollingEntry *next;
245 d3b12f5d Paolo Bonzini
} PollingEntry;
246 d3b12f5d Paolo Bonzini
247 d3b12f5d Paolo Bonzini
static PollingEntry *first_polling_entry;
248 d3b12f5d Paolo Bonzini
249 d3b12f5d Paolo Bonzini
int qemu_add_polling_cb(PollingFunc *func, void *opaque)
250 d3b12f5d Paolo Bonzini
{
251 d3b12f5d Paolo Bonzini
    PollingEntry **ppe, *pe;
252 d3b12f5d Paolo Bonzini
    pe = g_malloc0(sizeof(PollingEntry));
253 d3b12f5d Paolo Bonzini
    pe->func = func;
254 d3b12f5d Paolo Bonzini
    pe->opaque = opaque;
255 d3b12f5d Paolo Bonzini
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
256 d3b12f5d Paolo Bonzini
    *ppe = pe;
257 d3b12f5d Paolo Bonzini
    return 0;
258 d3b12f5d Paolo Bonzini
}
259 d3b12f5d Paolo Bonzini
260 d3b12f5d Paolo Bonzini
void qemu_del_polling_cb(PollingFunc *func, void *opaque)
261 d3b12f5d Paolo Bonzini
{
262 d3b12f5d Paolo Bonzini
    PollingEntry **ppe, *pe;
263 d3b12f5d Paolo Bonzini
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
264 d3b12f5d Paolo Bonzini
        pe = *ppe;
265 d3b12f5d Paolo Bonzini
        if (pe->func == func && pe->opaque == opaque) {
266 d3b12f5d Paolo Bonzini
            *ppe = pe->next;
267 d3b12f5d Paolo Bonzini
            g_free(pe);
268 d3b12f5d Paolo Bonzini
            break;
269 d3b12f5d Paolo Bonzini
        }
270 d3b12f5d Paolo Bonzini
    }
271 d3b12f5d Paolo Bonzini
}
272 d3b12f5d Paolo Bonzini
273 d3b12f5d Paolo Bonzini
/***********************************************************/
274 d3b12f5d Paolo Bonzini
/* Wait objects support */
275 d3b12f5d Paolo Bonzini
typedef struct WaitObjects {
276 d3b12f5d Paolo Bonzini
    int num;
277 06ac7d49 Paolo Bonzini
    int revents[MAXIMUM_WAIT_OBJECTS + 1];
278 d3b12f5d Paolo Bonzini
    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
279 d3b12f5d Paolo Bonzini
    WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
280 d3b12f5d Paolo Bonzini
    void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
281 d3b12f5d Paolo Bonzini
} WaitObjects;
282 d3b12f5d Paolo Bonzini
283 d3b12f5d Paolo Bonzini
static WaitObjects wait_objects = {0};
284 d3b12f5d Paolo Bonzini
285 d3b12f5d Paolo Bonzini
int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
286 d3b12f5d Paolo Bonzini
{
287 d3b12f5d Paolo Bonzini
    WaitObjects *w = &wait_objects;
288 d3b12f5d Paolo Bonzini
    if (w->num >= MAXIMUM_WAIT_OBJECTS) {
289 d3b12f5d Paolo Bonzini
        return -1;
290 d3b12f5d Paolo Bonzini
    }
291 d3b12f5d Paolo Bonzini
    w->events[w->num] = handle;
292 d3b12f5d Paolo Bonzini
    w->func[w->num] = func;
293 d3b12f5d Paolo Bonzini
    w->opaque[w->num] = opaque;
294 06ac7d49 Paolo Bonzini
    w->revents[w->num] = 0;
295 d3b12f5d Paolo Bonzini
    w->num++;
296 d3b12f5d Paolo Bonzini
    return 0;
297 d3b12f5d Paolo Bonzini
}
298 d3b12f5d Paolo Bonzini
299 d3b12f5d Paolo Bonzini
void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
300 d3b12f5d Paolo Bonzini
{
301 d3b12f5d Paolo Bonzini
    int i, found;
302 d3b12f5d Paolo Bonzini
    WaitObjects *w = &wait_objects;
303 d3b12f5d Paolo Bonzini
304 d3b12f5d Paolo Bonzini
    found = 0;
305 d3b12f5d Paolo Bonzini
    for (i = 0; i < w->num; i++) {
306 d3b12f5d Paolo Bonzini
        if (w->events[i] == handle) {
307 d3b12f5d Paolo Bonzini
            found = 1;
308 d3b12f5d Paolo Bonzini
        }
309 d3b12f5d Paolo Bonzini
        if (found) {
310 d3b12f5d Paolo Bonzini
            w->events[i] = w->events[i + 1];
311 d3b12f5d Paolo Bonzini
            w->func[i] = w->func[i + 1];
312 d3b12f5d Paolo Bonzini
            w->opaque[i] = w->opaque[i + 1];
313 06ac7d49 Paolo Bonzini
            w->revents[i] = w->revents[i + 1];
314 d3b12f5d Paolo Bonzini
        }
315 d3b12f5d Paolo Bonzini
    }
316 d3b12f5d Paolo Bonzini
    if (found) {
317 d3b12f5d Paolo Bonzini
        w->num--;
318 d3b12f5d Paolo Bonzini
    }
319 d3b12f5d Paolo Bonzini
}
320 d3b12f5d Paolo Bonzini
321 d3385eb4 Paolo Bonzini
void qemu_fd_register(int fd)
322 d3385eb4 Paolo Bonzini
{
323 4c8d0d27 Paolo Bonzini
    WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
324 4c8d0d27 Paolo Bonzini
                   FD_READ | FD_ACCEPT | FD_CLOSE |
325 d3385eb4 Paolo Bonzini
                   FD_CONNECT | FD_WRITE | FD_OOB);
326 d3385eb4 Paolo Bonzini
}
327 d3385eb4 Paolo Bonzini
328 cbff4b34 Stefan Hajnoczi
static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
329 cbff4b34 Stefan Hajnoczi
                        fd_set *xfds)
330 cbff4b34 Stefan Hajnoczi
{
331 cbff4b34 Stefan Hajnoczi
    int nfds = -1;
332 cbff4b34 Stefan Hajnoczi
    int i;
333 cbff4b34 Stefan Hajnoczi
334 cbff4b34 Stefan Hajnoczi
    for (i = 0; i < pollfds->len; i++) {
335 cbff4b34 Stefan Hajnoczi
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
336 cbff4b34 Stefan Hajnoczi
        int fd = pfd->fd;
337 cbff4b34 Stefan Hajnoczi
        int events = pfd->events;
338 8db165b3 Stefan Hajnoczi
        if (events & G_IO_IN) {
339 cbff4b34 Stefan Hajnoczi
            FD_SET(fd, rfds);
340 cbff4b34 Stefan Hajnoczi
            nfds = MAX(nfds, fd);
341 cbff4b34 Stefan Hajnoczi
        }
342 8db165b3 Stefan Hajnoczi
        if (events & G_IO_OUT) {
343 cbff4b34 Stefan Hajnoczi
            FD_SET(fd, wfds);
344 cbff4b34 Stefan Hajnoczi
            nfds = MAX(nfds, fd);
345 cbff4b34 Stefan Hajnoczi
        }
346 cbff4b34 Stefan Hajnoczi
        if (events & G_IO_PRI) {
347 cbff4b34 Stefan Hajnoczi
            FD_SET(fd, xfds);
348 cbff4b34 Stefan Hajnoczi
            nfds = MAX(nfds, fd);
349 cbff4b34 Stefan Hajnoczi
        }
350 cbff4b34 Stefan Hajnoczi
    }
351 cbff4b34 Stefan Hajnoczi
    return nfds;
352 cbff4b34 Stefan Hajnoczi
}
353 cbff4b34 Stefan Hajnoczi
354 cbff4b34 Stefan Hajnoczi
static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
355 cbff4b34 Stefan Hajnoczi
                         fd_set *wfds, fd_set *xfds)
356 cbff4b34 Stefan Hajnoczi
{
357 cbff4b34 Stefan Hajnoczi
    int i;
358 cbff4b34 Stefan Hajnoczi
359 cbff4b34 Stefan Hajnoczi
    for (i = 0; i < pollfds->len; i++) {
360 cbff4b34 Stefan Hajnoczi
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
361 cbff4b34 Stefan Hajnoczi
        int fd = pfd->fd;
362 cbff4b34 Stefan Hajnoczi
        int revents = 0;
363 cbff4b34 Stefan Hajnoczi
364 cbff4b34 Stefan Hajnoczi
        if (FD_ISSET(fd, rfds)) {
365 8db165b3 Stefan Hajnoczi
            revents |= G_IO_IN;
366 cbff4b34 Stefan Hajnoczi
        }
367 cbff4b34 Stefan Hajnoczi
        if (FD_ISSET(fd, wfds)) {
368 8db165b3 Stefan Hajnoczi
            revents |= G_IO_OUT;
369 cbff4b34 Stefan Hajnoczi
        }
370 cbff4b34 Stefan Hajnoczi
        if (FD_ISSET(fd, xfds)) {
371 cbff4b34 Stefan Hajnoczi
            revents |= G_IO_PRI;
372 cbff4b34 Stefan Hajnoczi
        }
373 cbff4b34 Stefan Hajnoczi
        pfd->revents = revents & pfd->events;
374 cbff4b34 Stefan Hajnoczi
    }
375 cbff4b34 Stefan Hajnoczi
}
376 cbff4b34 Stefan Hajnoczi
377 7b595f35 Alex Bligh
static int os_host_main_loop_wait(int64_t timeout)
378 d3b12f5d Paolo Bonzini
{
379 ea26ce76 Paolo Bonzini
    GMainContext *context = g_main_context_default();
380 48ce11ff Stefan Hajnoczi
    GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
381 134a03e0 Stefan Hajnoczi
    int select_ret = 0;
382 48ce11ff Stefan Hajnoczi
    int g_poll_ret, ret, i, n_poll_fds;
383 d3b12f5d Paolo Bonzini
    PollingEntry *pe;
384 d3385eb4 Paolo Bonzini
    WaitObjects *w = &wait_objects;
385 42fe1c24 Stefan Weil
    gint poll_timeout;
386 7b595f35 Alex Bligh
    int64_t poll_timeout_ns;
387 15455536 Paolo Bonzini
    static struct timeval tv0;
388 9cbaacf9 Stefan Hajnoczi
    fd_set rfds, wfds, xfds;
389 9cbaacf9 Stefan Hajnoczi
    int nfds;
390 d3b12f5d Paolo Bonzini
391 d3b12f5d Paolo Bonzini
    /* XXX: need to suppress polling by better using win32 events */
392 d3b12f5d Paolo Bonzini
    ret = 0;
393 d3b12f5d Paolo Bonzini
    for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
394 d3b12f5d Paolo Bonzini
        ret |= pe->func(pe->opaque);
395 d3b12f5d Paolo Bonzini
    }
396 d3385eb4 Paolo Bonzini
    if (ret != 0) {
397 d3385eb4 Paolo Bonzini
        return ret;
398 d3385eb4 Paolo Bonzini
    }
399 d3b12f5d Paolo Bonzini
400 3cb8c205 Stefan Hajnoczi
    FD_ZERO(&rfds);
401 3cb8c205 Stefan Hajnoczi
    FD_ZERO(&wfds);
402 3cb8c205 Stefan Hajnoczi
    FD_ZERO(&xfds);
403 3cb8c205 Stefan Hajnoczi
    nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
404 3cb8c205 Stefan Hajnoczi
    if (nfds >= 0) {
405 3cb8c205 Stefan Hajnoczi
        select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
406 3cb8c205 Stefan Hajnoczi
        if (select_ret != 0) {
407 3cb8c205 Stefan Hajnoczi
            timeout = 0;
408 3cb8c205 Stefan Hajnoczi
        }
409 3cb8c205 Stefan Hajnoczi
        if (select_ret > 0) {
410 3cb8c205 Stefan Hajnoczi
            pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
411 3cb8c205 Stefan Hajnoczi
        }
412 3cb8c205 Stefan Hajnoczi
    }
413 3cb8c205 Stefan Hajnoczi
414 ea26ce76 Paolo Bonzini
    g_main_context_prepare(context, &max_priority);
415 42fe1c24 Stefan Weil
    n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
416 ea26ce76 Paolo Bonzini
                                      poll_fds, ARRAY_SIZE(poll_fds));
417 ea26ce76 Paolo Bonzini
    g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
418 ea26ce76 Paolo Bonzini
419 06ac7d49 Paolo Bonzini
    for (i = 0; i < w->num; i++) {
420 58b9630d Stefan Weil
        poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
421 ea26ce76 Paolo Bonzini
        poll_fds[n_poll_fds + i].events = G_IO_IN;
422 06ac7d49 Paolo Bonzini
    }
423 06ac7d49 Paolo Bonzini
424 7b595f35 Alex Bligh
    if (poll_timeout < 0) {
425 7b595f35 Alex Bligh
        poll_timeout_ns = -1;
426 7b595f35 Alex Bligh
    } else {
427 7b595f35 Alex Bligh
        poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
428 3239ad04 Stefan Weil
    }
429 3239ad04 Stefan Weil
430 7b595f35 Alex Bligh
    poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
431 7b595f35 Alex Bligh
432 d3385eb4 Paolo Bonzini
    qemu_mutex_unlock_iothread();
433 7b595f35 Alex Bligh
    g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
434 7b595f35 Alex Bligh
435 d3385eb4 Paolo Bonzini
    qemu_mutex_lock_iothread();
436 5e3bc735 Fabien Chouteau
    if (g_poll_ret > 0) {
437 06ac7d49 Paolo Bonzini
        for (i = 0; i < w->num; i++) {
438 ea26ce76 Paolo Bonzini
            w->revents[i] = poll_fds[n_poll_fds + i].revents;
439 d3385eb4 Paolo Bonzini
        }
440 06ac7d49 Paolo Bonzini
        for (i = 0; i < w->num; i++) {
441 06ac7d49 Paolo Bonzini
            if (w->revents[i] && w->func[i]) {
442 06ac7d49 Paolo Bonzini
                w->func[i](w->opaque[i]);
443 d3b12f5d Paolo Bonzini
            }
444 d3b12f5d Paolo Bonzini
        }
445 d3b12f5d Paolo Bonzini
    }
446 d3b12f5d Paolo Bonzini
447 ea26ce76 Paolo Bonzini
    if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
448 ea26ce76 Paolo Bonzini
        g_main_context_dispatch(context);
449 ea26ce76 Paolo Bonzini
    }
450 ea26ce76 Paolo Bonzini
451 5e3bc735 Fabien Chouteau
    return select_ret || g_poll_ret;
452 d3b12f5d Paolo Bonzini
}
453 d3b12f5d Paolo Bonzini
#endif
454 d3b12f5d Paolo Bonzini
455 d3b12f5d Paolo Bonzini
int main_loop_wait(int nonblocking)
456 d3b12f5d Paolo Bonzini
{
457 7c7db755 Stefano Stabellini
    int ret;
458 7c7db755 Stefano Stabellini
    uint32_t timeout = UINT32_MAX;
459 7b595f35 Alex Bligh
    int64_t timeout_ns;
460 d3b12f5d Paolo Bonzini
461 d3b12f5d Paolo Bonzini
    if (nonblocking) {
462 d3b12f5d Paolo Bonzini
        timeout = 0;
463 d3b12f5d Paolo Bonzini
    }
464 d3b12f5d Paolo Bonzini
465 d3b12f5d Paolo Bonzini
    /* poll any events */
466 cbff4b34 Stefan Hajnoczi
    g_array_set_size(gpollfds, 0); /* reset for new iteration */
467 d3b12f5d Paolo Bonzini
    /* XXX: separate device handlers from system ones */
468 d3b12f5d Paolo Bonzini
#ifdef CONFIG_SLIRP
469 a42e9c41 Liu Ping Fan
    slirp_pollfds_fill(gpollfds, &timeout);
470 d3b12f5d Paolo Bonzini
#endif
471 a3e4b4a8 Stefan Hajnoczi
    qemu_iohandler_fill(gpollfds);
472 7b595f35 Alex Bligh
473 7b595f35 Alex Bligh
    if (timeout == UINT32_MAX) {
474 7b595f35 Alex Bligh
        timeout_ns = -1;
475 7b595f35 Alex Bligh
    } else {
476 7b595f35 Alex Bligh
        timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
477 7b595f35 Alex Bligh
    }
478 7b595f35 Alex Bligh
479 7b595f35 Alex Bligh
    timeout_ns = qemu_soonest_timeout(timeout_ns,
480 7b595f35 Alex Bligh
                                      timerlistgroup_deadline_ns(
481 7b595f35 Alex Bligh
                                          &main_loop_tlg));
482 7b595f35 Alex Bligh
483 7b595f35 Alex Bligh
    ret = os_host_main_loop_wait(timeout_ns);
484 a3e4b4a8 Stefan Hajnoczi
    qemu_iohandler_poll(gpollfds, ret);
485 d3b12f5d Paolo Bonzini
#ifdef CONFIG_SLIRP
486 8917c3bd Stefan Hajnoczi
    slirp_pollfds_poll(gpollfds, (ret < 0));
487 d3b12f5d Paolo Bonzini
#endif
488 d3b12f5d Paolo Bonzini
489 40daca54 Alex Bligh
    qemu_clock_run_all_timers();
490 d3b12f5d Paolo Bonzini
491 d3b12f5d Paolo Bonzini
    return ret;
492 d3b12f5d Paolo Bonzini
}
493 f627aab1 Paolo Bonzini
494 f627aab1 Paolo Bonzini
/* Functions to operate on the main QEMU AioContext.  */
495 f627aab1 Paolo Bonzini
496 f627aab1 Paolo Bonzini
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
497 f627aab1 Paolo Bonzini
{
498 f627aab1 Paolo Bonzini
    return aio_bh_new(qemu_aio_context, cb, opaque);
499 f627aab1 Paolo Bonzini
}
500 f627aab1 Paolo Bonzini
501 a915f4bc Paolo Bonzini
bool qemu_aio_wait(void)
502 a915f4bc Paolo Bonzini
{
503 7c0628b2 Paolo Bonzini
    return aio_poll(qemu_aio_context, true);
504 a915f4bc Paolo Bonzini
}
505 a915f4bc Paolo Bonzini
506 f42b2207 Paolo Bonzini
#ifdef CONFIG_POSIX
507 a915f4bc Paolo Bonzini
void qemu_aio_set_fd_handler(int fd,
508 a915f4bc Paolo Bonzini
                             IOHandler *io_read,
509 a915f4bc Paolo Bonzini
                             IOHandler *io_write,
510 a915f4bc Paolo Bonzini
                             void *opaque)
511 a915f4bc Paolo Bonzini
{
512 f2e5dca4 Stefan Hajnoczi
    aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque);
513 a915f4bc Paolo Bonzini
}
514 82cbbdc6 Paolo Bonzini
#endif
515 a915f4bc Paolo Bonzini
516 a915f4bc Paolo Bonzini
void qemu_aio_set_event_notifier(EventNotifier *notifier,
517 f2e5dca4 Stefan Hajnoczi
                                 EventNotifierHandler *io_read)
518 a915f4bc Paolo Bonzini
{
519 f2e5dca4 Stefan Hajnoczi
    aio_set_event_notifier(qemu_aio_context, notifier, io_read);
520 a915f4bc Paolo Bonzini
}