Statistics
| Branch: | Revision:

root / main-loop.c @ 11c7549d

History | View | Annotate | Download (13.1 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "qemu-common.h"
26
#include "qemu/timer.h"
27
#include "slirp/slirp.h"
28
#include "qemu/main-loop.h"
29
#include "block/aio.h"
30

    
31
#ifndef _WIN32
32

    
33
#include "qemu/compatfd.h"
34

    
35
/* If we have signalfd, we mask out the signals we want to handle and then
36
 * use signalfd to listen for them.  We rely on whatever the current signal
37
 * handler is to dispatch the signals when we receive them.
38
 */
39
static void sigfd_handler(void *opaque)
40
{
41
    int fd = (intptr_t)opaque;
42
    struct qemu_signalfd_siginfo info;
43
    struct sigaction action;
44
    ssize_t len;
45

    
46
    while (1) {
47
        do {
48
            len = read(fd, &info, sizeof(info));
49
        } while (len == -1 && errno == EINTR);
50

    
51
        if (len == -1 && errno == EAGAIN) {
52
            break;
53
        }
54

    
55
        if (len != sizeof(info)) {
56
            printf("read from sigfd returned %zd: %m\n", len);
57
            return;
58
        }
59

    
60
        sigaction(info.ssi_signo, NULL, &action);
61
        if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
62
            action.sa_sigaction(info.ssi_signo,
63
                                (siginfo_t *)&info, NULL);
64
        } else if (action.sa_handler) {
65
            action.sa_handler(info.ssi_signo);
66
        }
67
    }
68
}
69

    
70
static int qemu_signal_init(void)
71
{
72
    int sigfd;
73
    sigset_t set;
74

    
75
    /*
76
     * SIG_IPI must be blocked in the main thread and must not be caught
77
     * by sigwait() in the signal thread. Otherwise, the cpu thread will
78
     * not catch it reliably.
79
     */
80
    sigemptyset(&set);
81
    sigaddset(&set, SIG_IPI);
82
    sigaddset(&set, SIGIO);
83
    sigaddset(&set, SIGALRM);
84
    sigaddset(&set, SIGBUS);
85
    pthread_sigmask(SIG_BLOCK, &set, NULL);
86

    
87
    sigdelset(&set, SIG_IPI);
88
    sigfd = qemu_signalfd(&set);
89
    if (sigfd == -1) {
90
        fprintf(stderr, "failed to create signalfd\n");
91
        return -errno;
92
    }
93

    
94
    fcntl_setfl(sigfd, O_NONBLOCK);
95

    
96
    qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
97
                         (void *)(intptr_t)sigfd);
98

    
99
    return 0;
100
}
101

    
102
#else /* _WIN32 */
103

    
104
static int qemu_signal_init(void)
105
{
106
    return 0;
107
}
108
#endif
109

    
110
static AioContext *qemu_aio_context;
111

    
112
AioContext *qemu_get_aio_context(void)
113
{
114
    return qemu_aio_context;
115
}
116

    
117
void qemu_notify_event(void)
118
{
119
    if (!qemu_aio_context) {
120
        return;
121
    }
122
    aio_notify(qemu_aio_context);
123
}
124

    
125
static GArray *gpollfds;
126

    
127
int qemu_init_main_loop(void)
128
{
129
    int ret;
130
    GSource *src;
131

    
132
    init_clocks();
133
    if (init_timer_alarm() < 0) {
134
        fprintf(stderr, "could not initialize alarm timer\n");
135
        exit(1);
136
    }
137

    
138
    ret = qemu_signal_init();
139
    if (ret) {
140
        return ret;
141
    }
142

    
143
    gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
144
    qemu_aio_context = aio_context_new();
145
    src = aio_get_g_source(qemu_aio_context);
146
    g_source_attach(src, NULL);
147
    g_source_unref(src);
148
    return 0;
149
}
150

    
151
static int max_priority;
152

    
153
#ifndef _WIN32
154
static int glib_pollfds_idx;
155
static int glib_n_poll_fds;
156

    
157
static void glib_pollfds_fill(uint32_t *cur_timeout)
158
{
159
    GMainContext *context = g_main_context_default();
160
    int timeout = 0;
161
    int n;
162

    
163
    g_main_context_prepare(context, &max_priority);
164

    
165
    glib_pollfds_idx = gpollfds->len;
166
    n = glib_n_poll_fds;
167
    do {
168
        GPollFD *pfds;
169
        glib_n_poll_fds = n;
170
        g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
171
        pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
172
        n = g_main_context_query(context, max_priority, &timeout, pfds,
173
                                 glib_n_poll_fds);
174
    } while (n != glib_n_poll_fds);
175

    
176
    if (timeout >= 0 && timeout < *cur_timeout) {
177
        *cur_timeout = timeout;
178
    }
179
}
180

    
181
static void glib_pollfds_poll(void)
182
{
183
    GMainContext *context = g_main_context_default();
184
    GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
185

    
186
    if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
187
        g_main_context_dispatch(context);
188
    }
189
}
190

    
191
#define MAX_MAIN_LOOP_SPIN (1000)
192

    
193
static int os_host_main_loop_wait(uint32_t timeout)
194
{
195
    int ret;
196
    static int spin_counter;
197

    
198
    glib_pollfds_fill(&timeout);
199

    
200
    /* If the I/O thread is very busy or we are incorrectly busy waiting in
201
     * the I/O thread, this can lead to starvation of the BQL such that the
202
     * VCPU threads never run.  To make sure we can detect the later case,
203
     * print a message to the screen.  If we run into this condition, create
204
     * a fake timeout in order to give the VCPU threads a chance to run.
205
     */
206
    if (spin_counter > MAX_MAIN_LOOP_SPIN) {
207
        static bool notified;
208

    
209
        if (!notified) {
210
            fprintf(stderr,
211
                    "main-loop: WARNING: I/O thread spun for %d iterations\n",
212
                    MAX_MAIN_LOOP_SPIN);
213
            notified = true;
214
        }
215

    
216
        timeout = 1;
217
    }
218

    
219
    if (timeout > 0) {
220
        spin_counter = 0;
221
        qemu_mutex_unlock_iothread();
222
    } else {
223
        spin_counter++;
224
    }
225

    
226
    ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout);
227

    
228
    if (timeout > 0) {
229
        qemu_mutex_lock_iothread();
230
    }
231

    
232
    glib_pollfds_poll();
233
    return ret;
234
}
235
#else
236
/***********************************************************/
237
/* Polling handling */
238

    
239
typedef struct PollingEntry {
240
    PollingFunc *func;
241
    void *opaque;
242
    struct PollingEntry *next;
243
} PollingEntry;
244

    
245
static PollingEntry *first_polling_entry;
246

    
247
int qemu_add_polling_cb(PollingFunc *func, void *opaque)
248
{
249
    PollingEntry **ppe, *pe;
250
    pe = g_malloc0(sizeof(PollingEntry));
251
    pe->func = func;
252
    pe->opaque = opaque;
253
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
254
    *ppe = pe;
255
    return 0;
256
}
257

    
258
void qemu_del_polling_cb(PollingFunc *func, void *opaque)
259
{
260
    PollingEntry **ppe, *pe;
261
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
262
        pe = *ppe;
263
        if (pe->func == func && pe->opaque == opaque) {
264
            *ppe = pe->next;
265
            g_free(pe);
266
            break;
267
        }
268
    }
269
}
270

    
271
/***********************************************************/
272
/* Wait objects support */
273
typedef struct WaitObjects {
274
    int num;
275
    int revents[MAXIMUM_WAIT_OBJECTS + 1];
276
    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
277
    WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
278
    void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
279
} WaitObjects;
280

    
281
static WaitObjects wait_objects = {0};
282

    
283
int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
284
{
285
    WaitObjects *w = &wait_objects;
286
    if (w->num >= MAXIMUM_WAIT_OBJECTS) {
287
        return -1;
288
    }
289
    w->events[w->num] = handle;
290
    w->func[w->num] = func;
291
    w->opaque[w->num] = opaque;
292
    w->revents[w->num] = 0;
293
    w->num++;
294
    return 0;
295
}
296

    
297
void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
298
{
299
    int i, found;
300
    WaitObjects *w = &wait_objects;
301

    
302
    found = 0;
303
    for (i = 0; i < w->num; i++) {
304
        if (w->events[i] == handle) {
305
            found = 1;
306
        }
307
        if (found) {
308
            w->events[i] = w->events[i + 1];
309
            w->func[i] = w->func[i + 1];
310
            w->opaque[i] = w->opaque[i + 1];
311
            w->revents[i] = w->revents[i + 1];
312
        }
313
    }
314
    if (found) {
315
        w->num--;
316
    }
317
}
318

    
319
void qemu_fd_register(int fd)
320
{
321
    WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
322
                   FD_READ | FD_ACCEPT | FD_CLOSE |
323
                   FD_CONNECT | FD_WRITE | FD_OOB);
324
}
325

    
326
static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
327
                        fd_set *xfds)
328
{
329
    int nfds = -1;
330
    int i;
331

    
332
    for (i = 0; i < pollfds->len; i++) {
333
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
334
        int fd = pfd->fd;
335
        int events = pfd->events;
336
        if (events & G_IO_IN) {
337
            FD_SET(fd, rfds);
338
            nfds = MAX(nfds, fd);
339
        }
340
        if (events & G_IO_OUT) {
341
            FD_SET(fd, wfds);
342
            nfds = MAX(nfds, fd);
343
        }
344
        if (events & G_IO_PRI) {
345
            FD_SET(fd, xfds);
346
            nfds = MAX(nfds, fd);
347
        }
348
    }
349
    return nfds;
350
}
351

    
352
static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
353
                         fd_set *wfds, fd_set *xfds)
354
{
355
    int i;
356

    
357
    for (i = 0; i < pollfds->len; i++) {
358
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
359
        int fd = pfd->fd;
360
        int revents = 0;
361

    
362
        if (FD_ISSET(fd, rfds)) {
363
            revents |= G_IO_IN;
364
        }
365
        if (FD_ISSET(fd, wfds)) {
366
            revents |= G_IO_OUT;
367
        }
368
        if (FD_ISSET(fd, xfds)) {
369
            revents |= G_IO_PRI;
370
        }
371
        pfd->revents = revents & pfd->events;
372
    }
373
}
374

    
375
static int os_host_main_loop_wait(uint32_t timeout)
376
{
377
    GMainContext *context = g_main_context_default();
378
    GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
379
    int select_ret = 0;
380
    int g_poll_ret, ret, i, n_poll_fds;
381
    PollingEntry *pe;
382
    WaitObjects *w = &wait_objects;
383
    gint poll_timeout;
384
    static struct timeval tv0;
385
    fd_set rfds, wfds, xfds;
386
    int nfds;
387

    
388
    /* XXX: need to suppress polling by better using win32 events */
389
    ret = 0;
390
    for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
391
        ret |= pe->func(pe->opaque);
392
    }
393
    if (ret != 0) {
394
        return ret;
395
    }
396

    
397
    FD_ZERO(&rfds);
398
    FD_ZERO(&wfds);
399
    FD_ZERO(&xfds);
400
    nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
401
    if (nfds >= 0) {
402
        select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
403
        if (select_ret != 0) {
404
            timeout = 0;
405
        }
406
        if (select_ret > 0) {
407
            pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
408
        }
409
    }
410

    
411
    g_main_context_prepare(context, &max_priority);
412
    n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
413
                                      poll_fds, ARRAY_SIZE(poll_fds));
414
    g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
415

    
416
    for (i = 0; i < w->num; i++) {
417
        poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
418
        poll_fds[n_poll_fds + i].events = G_IO_IN;
419
    }
420

    
421
    if (poll_timeout < 0 || timeout < poll_timeout) {
422
        poll_timeout = timeout;
423
    }
424

    
425
    qemu_mutex_unlock_iothread();
426
    g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
427
    qemu_mutex_lock_iothread();
428
    if (g_poll_ret > 0) {
429
        for (i = 0; i < w->num; i++) {
430
            w->revents[i] = poll_fds[n_poll_fds + i].revents;
431
        }
432
        for (i = 0; i < w->num; i++) {
433
            if (w->revents[i] && w->func[i]) {
434
                w->func[i](w->opaque[i]);
435
            }
436
        }
437
    }
438

    
439
    if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
440
        g_main_context_dispatch(context);
441
    }
442

    
443
    return select_ret || g_poll_ret;
444
}
445
#endif
446

    
447
int main_loop_wait(int nonblocking)
448
{
449
    int ret;
450
    uint32_t timeout = UINT32_MAX;
451

    
452
    if (nonblocking) {
453
        timeout = 0;
454
    }
455

    
456
    /* poll any events */
457
    g_array_set_size(gpollfds, 0); /* reset for new iteration */
458
    /* XXX: separate device handlers from system ones */
459
#ifdef CONFIG_SLIRP
460
    slirp_update_timeout(&timeout);
461
    slirp_pollfds_fill(gpollfds);
462
#endif
463
    qemu_iohandler_fill(gpollfds);
464
    ret = os_host_main_loop_wait(timeout);
465
    qemu_iohandler_poll(gpollfds, ret);
466
#ifdef CONFIG_SLIRP
467
    slirp_pollfds_poll(gpollfds, (ret < 0));
468
#endif
469

    
470
    qemu_run_all_timers();
471

    
472
    return ret;
473
}
474

    
475
/* Functions to operate on the main QEMU AioContext.  */
476

    
477
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
478
{
479
    return aio_bh_new(qemu_aio_context, cb, opaque);
480
}
481

    
482
bool qemu_aio_wait(void)
483
{
484
    return aio_poll(qemu_aio_context, true);
485
}
486

    
487
#ifdef CONFIG_POSIX
488
void qemu_aio_set_fd_handler(int fd,
489
                             IOHandler *io_read,
490
                             IOHandler *io_write,
491
                             AioFlushHandler *io_flush,
492
                             void *opaque)
493
{
494
    aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush,
495
                       opaque);
496
}
497
#endif
498

    
499
void qemu_aio_set_event_notifier(EventNotifier *notifier,
500
                                 EventNotifierHandler *io_read,
501
                                 AioFlushEventNotifierHandler *io_flush)
502
{
503
    aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush);
504
}