Revision f2e5dca4

b/aio-posix.c
46 46
                        int fd,
47 47
                        IOHandler *io_read,
48 48
                        IOHandler *io_write,
49
                        AioFlushHandler *io_flush,
50 49
                        void *opaque)
51 50
{
52 51
    AioHandler *node;
......
95 94

  
96 95
void aio_set_event_notifier(AioContext *ctx,
97 96
                            EventNotifier *notifier,
98
                            EventNotifierHandler *io_read,
99
                            AioFlushEventNotifierHandler *io_flush)
97
                            EventNotifierHandler *io_read)
100 98
{
101 99
    aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
102
                       (IOHandler *)io_read, NULL,
103
                       (AioFlushHandler *)io_flush, notifier);
100
                       (IOHandler *)io_read, NULL, notifier);
104 101
}
105 102

  
106 103
bool aio_pending(AioContext *ctx)
b/aio-win32.c
30 30

  
31 31
void aio_set_event_notifier(AioContext *ctx,
32 32
                            EventNotifier *e,
33
                            EventNotifierHandler *io_notify,
34
                            AioFlushEventNotifierHandler *io_flush)
33
                            EventNotifierHandler *io_notify)
35 34
{
36 35
    AioHandler *node;
37 36

  
b/async.c
201 201
    AioContext *ctx = (AioContext *) source;
202 202

  
203 203
    thread_pool_free(ctx->thread_pool);
204
    aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
204
    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
205 205
    event_notifier_cleanup(&ctx->notifier);
206 206
    qemu_mutex_destroy(&ctx->bh_lock);
207 207
    g_array_free(ctx->pollfds, TRUE);
......
243 243
    event_notifier_init(&ctx->notifier, false);
244 244
    aio_set_event_notifier(ctx, &ctx->notifier, 
245 245
                           (EventNotifierHandler *)
246
                           event_notifier_test_and_clear, NULL);
246
                           event_notifier_test_and_clear);
247 247

  
248 248
    return ctx;
249 249
}
b/block/curl.c
93 93
    DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
94 94
    switch (action) {
95 95
        case CURL_POLL_IN:
96
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, s);
96
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s);
97 97
            break;
98 98
        case CURL_POLL_OUT:
99
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, s);
99
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s);
100 100
            break;
101 101
        case CURL_POLL_INOUT:
102
            qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
103
                                    NULL, s);
102
            qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s);
104 103
            break;
105 104
        case CURL_POLL_REMOVE:
106
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
105
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL);
107 106
            break;
108 107
    }
109 108

  
b/block/gluster.c
339 339
    }
340 340
    fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
341 341
    qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
342
        qemu_gluster_aio_event_reader, NULL, NULL, s);
342
        qemu_gluster_aio_event_reader, NULL, s);
343 343

  
344 344
out:
345 345
    qemu_opts_del(opts);
......
438 438
        qemu_aio_release(acb);
439 439
        close(s->fds[GLUSTER_FD_READ]);
440 440
        close(s->fds[GLUSTER_FD_WRITE]);
441
        qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
442
            NULL);
441
        qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
443 442
        bs->drv = NULL; /* Make the disk inaccessible */
444 443
        qemu_mutex_unlock_iothread();
445 444
    }
......
595 594

  
596 595
    close(s->fds[GLUSTER_FD_READ]);
597 596
    close(s->fds[GLUSTER_FD_WRITE]);
598
    qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL);
597
    qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
599 598

  
600 599
    if (s->fd) {
601 600
        glfs_close(s->fd);
b/block/iscsi.c
159 159
        qemu_aio_set_fd_handler(iscsi_get_fd(iscsi),
160 160
                      iscsi_process_read,
161 161
                      (ev & POLLOUT) ? iscsi_process_write : NULL,
162
                      NULL,
163 162
                      iscsilun);
164 163

  
165 164
    }
......
1208 1207
        qemu_del_timer(iscsilun->nop_timer);
1209 1208
        qemu_free_timer(iscsilun->nop_timer);
1210 1209
    }
1211
    qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL);
1210
    qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL);
1212 1211
    iscsi_destroy_context(iscsi);
1213 1212
    memset(iscsilun, 0, sizeof(IscsiLun));
1214 1213
}
b/block/linux-aio.c
190 190
        goto out_close_efd;
191 191
    }
192 192

  
193
    qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb,
194
                                NULL);
193
    qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb);
195 194

  
196 195
    return s;
197 196

  
b/block/nbd.c
334 334

  
335 335
    qemu_co_mutex_lock(&s->send_mutex);
336 336
    s->send_coroutine = qemu_coroutine_self();
337
    qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
338
                            NULL, s);
337
    qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s);
339 338
    if (qiov) {
340 339
        if (!s->is_unix) {
341 340
            socket_set_cork(s->sock, 1);
......
354 353
    } else {
355 354
        rc = nbd_send_request(s->sock, request);
356 355
    }
357
    qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
358
                            NULL, s);
356
    qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s);
359 357
    s->send_coroutine = NULL;
360 358
    qemu_co_mutex_unlock(&s->send_mutex);
361 359
    return rc;
......
431 429
    /* Now that we're connected, set the socket to be non-blocking and
432 430
     * kick the reply mechanism.  */
433 431
    qemu_set_nonblock(sock);
434
    qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL,
435
                            NULL, s);
432
    qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s);
436 433

  
437 434
    s->sock = sock;
438 435
    s->size = size;
......
452 449
    request.len = 0;
453 450
    nbd_send_request(s->sock, &request);
454 451

  
455
    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
452
    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
456 453
    closesocket(s->sock);
457 454
}
458 455

  
b/block/rbd.c
545 545
    fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
546 546
    fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
547 547
    qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
548
                            NULL, NULL, s);
548
                            NULL, s);
549 549

  
550 550

  
551 551
    qemu_opts_del(opts);
......
569 569

  
570 570
    close(s->fds[0]);
571 571
    close(s->fds[1]);
572
    qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
572
    qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL);
573 573

  
574 574
    rbd_close(s->image);
575 575
    rados_ioctx_destroy(s->io_ctx);
b/block/sheepdog.c
531 531
    unsigned int *rlen = srco->rlen;
532 532

  
533 533
    co = qemu_coroutine_self();
534
    qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co);
534
    qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co);
535 535

  
536 536
    ret = send_co_req(sockfd, hdr, data, wlen);
537 537
    if (ret < 0) {
538 538
        goto out;
539 539
    }
540 540

  
541
    qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co);
541
    qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co);
542 542

  
543 543
    ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
544 544
    if (ret < sizeof(*hdr)) {
......
563 563
out:
564 564
    /* there is at most one request for this sockfd, so it is safe to
565 565
     * set each handler to NULL. */
566
    qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL);
566
    qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL);
567 567

  
568 568
    srco->ret = ret;
569 569
    srco->finished = true;
......
804 804
        return fd;
805 805
    }
806 806

  
807
    qemu_aio_set_fd_handler(fd, co_read_response, NULL, NULL, s);
807
    qemu_aio_set_fd_handler(fd, co_read_response, NULL, s);
808 808
    return fd;
809 809
}
810 810

  
......
1054 1054

  
1055 1055
    qemu_co_mutex_lock(&s->lock);
1056 1056
    s->co_send = qemu_coroutine_self();
1057
    qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
1058
                            NULL, s);
1057
    qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s);
1059 1058
    socket_set_cork(s->fd, 1);
1060 1059

  
1061 1060
    /* send a header */
......
1076 1075
    }
1077 1076

  
1078 1077
    socket_set_cork(s->fd, 0);
1079
    qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
1080
                            NULL, s);
1078
    qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s);
1081 1079
    qemu_co_mutex_unlock(&s->lock);
1082 1080

  
1083 1081
    return 0;
......
1335 1333
    g_free(buf);
1336 1334
    return 0;
1337 1335
out:
1338
    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
1336
    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
1339 1337
    if (s->fd >= 0) {
1340 1338
        closesocket(s->fd);
1341 1339
    }
......
1563 1561
        error_report("%s, %s", sd_strerror(rsp->result), s->name);
1564 1562
    }
1565 1563

  
1566
    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
1564
    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
1567 1565
    closesocket(s->fd);
1568 1566
    g_free(s->host_spec);
1569 1567
}
b/block/ssh.c
758 758
    DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock,
759 759
            rd_handler, wr_handler);
760 760

  
761
    qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, NULL, co);
761
    qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co);
762 762
}
763 763

  
764 764
static coroutine_fn void clear_fd_handler(BDRVSSHState *s)
765 765
{
766 766
    DPRINTF("s->sock=%d", s->sock);
767
    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
767
    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
768 768
}
769 769

  
770 770
/* A non-blocking call returned EAGAIN, so yield, ensuring the
b/hw/block/dataplane/virtio-blk.c
472 472
        exit(1);
473 473
    }
474 474
    s->host_notifier = *virtio_queue_get_host_notifier(vq);
475
    aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, NULL);
475
    aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
476 476

  
477 477
    /* Set up ioqueue */
478 478
    ioq_init(&s->ioqueue, s->fd, REQ_MAX);
......
480 480
        ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
481 481
    }
482 482
    s->io_notifier = *ioq_get_notifier(&s->ioqueue);
483
    aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, NULL);
483
    aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);
484 484

  
485 485
    s->started = true;
486 486
    trace_virtio_blk_data_plane_start(s);
......
512 512
        qemu_thread_join(&s->thread);
513 513
    }
514 514

  
515
    aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL);
515
    aio_set_event_notifier(s->ctx, &s->io_notifier, NULL);
516 516
    ioq_cleanup(&s->ioqueue);
517 517

  
518
    aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL);
518
    aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
519 519
    k->set_host_notifier(qbus->parent, 0, false);
520 520

  
521 521
    aio_context_unref(s->ctx);
b/include/block/aio.h
74 74
    struct ThreadPool *thread_pool;
75 75
} AioContext;
76 76

  
77
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
78
typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
79

  
80 77
/**
81 78
 * aio_context_new: Allocate a new AioContext.
82 79
 *
......
198 195
bool aio_poll(AioContext *ctx, bool blocking);
199 196

  
200 197
#ifdef CONFIG_POSIX
201
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
202
typedef int (AioFlushHandler)(void *opaque);
203

  
204 198
/* Register a file descriptor and associated callbacks.  Behaves very similarly
205 199
 * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
206 200
 * be invoked when using qemu_aio_wait().
......
212 206
                        int fd,
213 207
                        IOHandler *io_read,
214 208
                        IOHandler *io_write,
215
                        AioFlushHandler *io_flush,
216 209
                        void *opaque);
217 210
#endif
218 211

  
......
225 218
 */
226 219
void aio_set_event_notifier(AioContext *ctx,
227 220
                            EventNotifier *notifier,
228
                            EventNotifierHandler *io_read,
229
                            AioFlushEventNotifierHandler *io_flush);
221
                            EventNotifierHandler *io_read);
230 222

  
231 223
/* Return a GSource that lets the main loop poll the file descriptors attached
232 224
 * to this AioContext.
......
240 232

  
241 233
bool qemu_aio_wait(void);
242 234
void qemu_aio_set_event_notifier(EventNotifier *notifier,
243
                                 EventNotifierHandler *io_read,
244
                                 AioFlushEventNotifierHandler *io_flush);
235
                                 EventNotifierHandler *io_read);
245 236

  
246 237
#ifdef CONFIG_POSIX
247 238
void qemu_aio_set_fd_handler(int fd,
248 239
                             IOHandler *io_read,
249 240
                             IOHandler *io_write,
250
                             AioFlushHandler *io_flush,
251 241
                             void *opaque);
252 242
#endif
253 243

  
b/main-loop.c
489 489
void qemu_aio_set_fd_handler(int fd,
490 490
                             IOHandler *io_read,
491 491
                             IOHandler *io_write,
492
                             AioFlushHandler *io_flush,
493 492
                             void *opaque)
494 493
{
495
    aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush,
496
                       opaque);
494
    aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque);
497 495
}
498 496
#endif
499 497

  
500 498
void qemu_aio_set_event_notifier(EventNotifier *notifier,
501
                                 EventNotifierHandler *io_read,
502
                                 AioFlushEventNotifierHandler *io_flush)
499
                                 EventNotifierHandler *io_read)
503 500
{
504
    aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush);
501
    aio_set_event_notifier(qemu_aio_context, notifier, io_read);
505 502
}
b/tests/test-aio.c
233 233
{
234 234
    EventNotifierTestData data = { .n = 0, .active = 0 };
235 235
    event_notifier_init(&data.e, false);
236
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
236
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
237 237
    g_assert(!aio_poll(ctx, false));
238 238
    g_assert_cmpint(data.n, ==, 0);
239 239

  
240
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
240
    aio_set_event_notifier(ctx, &data.e, NULL);
241 241
    g_assert(!aio_poll(ctx, false));
242 242
    g_assert_cmpint(data.n, ==, 0);
243 243
    event_notifier_cleanup(&data.e);
......
247 247
{
248 248
    EventNotifierTestData data = { .n = 0, .active = 1 };
249 249
    event_notifier_init(&data.e, false);
250
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
250
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
251 251
    g_assert(!aio_poll(ctx, false));
252 252
    g_assert_cmpint(data.n, ==, 0);
253 253
    g_assert_cmpint(data.active, ==, 1);
......
261 261
    g_assert_cmpint(data.n, ==, 1);
262 262
    g_assert_cmpint(data.active, ==, 0);
263 263

  
264
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
264
    aio_set_event_notifier(ctx, &data.e, NULL);
265 265
    g_assert(!aio_poll(ctx, false));
266 266
    g_assert_cmpint(data.n, ==, 1);
267 267

  
......
272 272
{
273 273
    EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
274 274
    event_notifier_init(&data.e, false);
275
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
275
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
276 276
    g_assert(!aio_poll(ctx, false));
277 277
    g_assert_cmpint(data.n, ==, 0);
278 278
    g_assert_cmpint(data.active, ==, 10);
......
288 288
    g_assert_cmpint(data.active, ==, 0);
289 289
    g_assert(!aio_poll(ctx, false));
290 290

  
291
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
291
    aio_set_event_notifier(ctx, &data.e, NULL);
292 292
    g_assert(!aio_poll(ctx, false));
293 293
    event_notifier_cleanup(&data.e);
294 294
}
......
299 299
    EventNotifierTestData dummy = { .n = 0, .active = 1 };
300 300

  
301 301
    event_notifier_init(&data.e, false);
302
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
302
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
303 303

  
304 304
    g_assert(!aio_poll(ctx, false));
305 305
    g_assert_cmpint(data.n, ==, 0);
......
312 312

  
313 313
    /* An active event notifier forces aio_poll to look at EventNotifiers.  */
314 314
    event_notifier_init(&dummy.e, false);
315
    aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL);
315
    aio_set_event_notifier(ctx, &dummy.e, event_ready_cb);
316 316

  
317 317
    event_notifier_set(&data.e);
318 318
    g_assert(aio_poll(ctx, false));
......
332 332
    g_assert_cmpint(dummy.n, ==, 1);
333 333
    g_assert_cmpint(dummy.active, ==, 0);
334 334

  
335
    aio_set_event_notifier(ctx, &dummy.e, NULL, NULL);
335
    aio_set_event_notifier(ctx, &dummy.e, NULL);
336 336
    event_notifier_cleanup(&dummy.e);
337 337

  
338
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
338
    aio_set_event_notifier(ctx, &data.e, NULL);
339 339
    g_assert(!aio_poll(ctx, false));
340 340
    g_assert_cmpint(data.n, ==, 2);
341 341

  
......
515 515
{
516 516
    EventNotifierTestData data = { .n = 0, .active = 0 };
517 517
    event_notifier_init(&data.e, false);
518
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
518
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
519 519
    while (g_main_context_iteration(NULL, false));
520 520
    g_assert_cmpint(data.n, ==, 0);
521 521

  
522
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
522
    aio_set_event_notifier(ctx, &data.e, NULL);
523 523
    while (g_main_context_iteration(NULL, false));
524 524
    g_assert_cmpint(data.n, ==, 0);
525 525
    event_notifier_cleanup(&data.e);
......
529 529
{
530 530
    EventNotifierTestData data = { .n = 0, .active = 1 };
531 531
    event_notifier_init(&data.e, false);
532
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
532
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
533 533
    g_assert(g_main_context_iteration(NULL, false));
534 534
    g_assert_cmpint(data.n, ==, 0);
535 535
    g_assert_cmpint(data.active, ==, 1);
......
543 543
    g_assert_cmpint(data.n, ==, 1);
544 544
    g_assert_cmpint(data.active, ==, 0);
545 545

  
546
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
546
    aio_set_event_notifier(ctx, &data.e, NULL);
547 547
    while (g_main_context_iteration(NULL, false));
548 548
    g_assert_cmpint(data.n, ==, 1);
549 549

  
......
554 554
{
555 555
    EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
556 556
    event_notifier_init(&data.e, false);
557
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
557
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
558 558
    g_assert(g_main_context_iteration(NULL, false));
559 559
    g_assert_cmpint(data.n, ==, 0);
560 560
    g_assert_cmpint(data.active, ==, 10);
......
570 570
    g_assert_cmpint(data.active, ==, 0);
571 571
    g_assert(!g_main_context_iteration(NULL, false));
572 572

  
573
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
573
    aio_set_event_notifier(ctx, &data.e, NULL);
574 574
    while (g_main_context_iteration(NULL, false));
575 575
    event_notifier_cleanup(&data.e);
576 576
}
......
581 581
    EventNotifierTestData dummy = { .n = 0, .active = 1 };
582 582

  
583 583
    event_notifier_init(&data.e, false);
584
    aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
584
    aio_set_event_notifier(ctx, &data.e, event_ready_cb);
585 585

  
586 586
    while (g_main_context_iteration(NULL, false));
587 587
    g_assert_cmpint(data.n, ==, 0);
......
594 594

  
595 595
    /* An active event notifier forces aio_poll to look at EventNotifiers.  */
596 596
    event_notifier_init(&dummy.e, false);
597
    aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL);
597
    aio_set_event_notifier(ctx, &dummy.e, event_ready_cb);
598 598

  
599 599
    event_notifier_set(&data.e);
600 600
    g_assert(g_main_context_iteration(NULL, false));
......
614 614
    g_assert_cmpint(dummy.n, ==, 1);
615 615
    g_assert_cmpint(dummy.active, ==, 0);
616 616

  
617
    aio_set_event_notifier(ctx, &dummy.e, NULL, NULL);
617
    aio_set_event_notifier(ctx, &dummy.e, NULL);
618 618
    event_notifier_cleanup(&dummy.e);
619 619

  
620
    aio_set_event_notifier(ctx, &data.e, NULL, NULL);
620
    aio_set_event_notifier(ctx, &data.e, NULL);
621 621
    while (g_main_context_iteration(NULL, false));
622 622
    g_assert_cmpint(data.n, ==, 2);
623 623

  
b/thread-pool.c
303 303
    QLIST_INIT(&pool->head);
304 304
    QTAILQ_INIT(&pool->request_list);
305 305

  
306
    aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready,
307
                           NULL);
306
    aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready);
308 307
}
309 308

  
310 309
ThreadPool *thread_pool_new(AioContext *ctx)
......
338 337

  
339 338
    qemu_mutex_unlock(&pool->lock);
340 339

  
341
    aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL);
340
    aio_set_event_notifier(pool->ctx, &pool->notifier, NULL);
342 341
    qemu_sem_destroy(&pool->sem);
343 342
    qemu_cond_destroy(&pool->check_cancel);
344 343
    qemu_cond_destroy(&pool->worker_stopped);

Also available in: Unified diff