Revision c16b5a2c

b/block.c
48 48
#define SECTOR_BITS 9
49 49
#define SECTOR_SIZE (1 << SECTOR_BITS)
50 50

  
51
typedef struct BlockDriverAIOCBSync {
52
    BlockDriverAIOCB common;
53
    QEMUBH *bh;
54
    int ret;
55
    /* vector translation state */
56
    QEMUIOVector *qiov;
57
    uint8_t *bounce;
58
    int is_write;
59
} BlockDriverAIOCBSync;
60

  
61 51
static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 52
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 53
        BlockDriverCompletionFunc *cb, void *opaque);
64 54
static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 55
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 56
        BlockDriverCompletionFunc *cb, void *opaque);
67
static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb);
68 57
static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
69 58
                        uint8_t *buf, int nb_sectors);
70 59
static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
......
138 127
    }
139 128
}
140 129

  
141

  
142 130
void bdrv_register(BlockDriver *bdrv)
143 131
{
144 132
    if (!bdrv->bdrv_aio_readv) {
145 133
        /* add AIO emulation layer */
146 134
        bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
147 135
        bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
148
        bdrv->bdrv_aio_cancel = bdrv_aio_cancel_em;
149
        bdrv->aiocb_size = sizeof(BlockDriverAIOCBSync);
150 136
    } else if (!bdrv->bdrv_read) {
151 137
        /* add synchronous IO emulation layer */
152 138
        bdrv->bdrv_read = bdrv_read_em;
153 139
        bdrv->bdrv_write = bdrv_write_em;
154 140
    }
155
    aio_pool_init(&bdrv->aio_pool, bdrv->aiocb_size, bdrv->bdrv_aio_cancel);
156 141
    bdrv->next = first_drv;
157 142
    first_drv = bdrv;
158 143
}
......
1369 1354
/**************************************************************/
1370 1355
/* async block device emulation */
1371 1356

  
1357
typedef struct BlockDriverAIOCBSync {
1358
    BlockDriverAIOCB common;
1359
    QEMUBH *bh;
1360
    int ret;
1361
    /* vector translation state */
1362
    QEMUIOVector *qiov;
1363
    uint8_t *bounce;
1364
    int is_write;
1365
} BlockDriverAIOCBSync;
1366

  
1367
static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
1368
{
1369
    BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
1370
    qemu_bh_cancel(acb->bh);
1371
    qemu_aio_release(acb);
1372
}
1373

  
1374
static AIOPool bdrv_em_aio_pool = {
1375
    .aiocb_size         = sizeof(BlockDriverAIOCBSync),
1376
    .cancel             = bdrv_aio_cancel_em,
1377
};
1378

  
1372 1379
static void bdrv_aio_bh_cb(void *opaque)
1373 1380
{
1374 1381
    BlockDriverAIOCBSync *acb = opaque;
......
1392 1399
{
1393 1400
    BlockDriverAIOCBSync *acb;
1394 1401

  
1395
    acb = qemu_aio_get(bs, cb, opaque);
1402
    acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
1396 1403
    acb->is_write = is_write;
1397 1404
    acb->qiov = qiov;
1398 1405
    acb->bounce = qemu_blockalign(bs, qiov->size);
......
1426 1433
    return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
1427 1434
}
1428 1435

  
1429

  
1430
static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
1431
{
1432
    BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
1433
    qemu_bh_cancel(acb->bh);
1434
    qemu_aio_release(acb);
1435
}
1436

  
1437 1436
/**************************************************************/
1438 1437
/* sync block device emulation */
1439 1438

  
......
1495 1494
    module_call_init(MODULE_INIT_BLOCK);
1496 1495
}
1497 1496

  
1498
void aio_pool_init(AIOPool *pool, int aiocb_size,
1499
                   void (*cancel)(BlockDriverAIOCB *acb))
1500
{
1501
    pool->aiocb_size = aiocb_size;
1502
    pool->cancel = cancel;
1503
    pool->free_aiocb = NULL;
1504
}
1505

  
1506
void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs,
1507
                        BlockDriverCompletionFunc *cb, void *opaque)
1497
void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
1498
                   BlockDriverCompletionFunc *cb, void *opaque)
1508 1499
{
1509 1500
    BlockDriverAIOCB *acb;
1510 1501

  
......
1521 1512
    return acb;
1522 1513
}
1523 1514

  
1524
void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
1525
                   void *opaque)
1526
{
1527
    return qemu_aio_get_pool(&bs->drv->aio_pool, bs, cb, opaque);
1528
}
1529

  
1530 1515
void qemu_aio_release(void *p)
1531 1516
{
1532 1517
    BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
b/block/curl.c
349 349
    return -EINVAL;
350 350
}
351 351

  
352
static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
353
{
354
    // Do we have to implement canceling? Seems to work without...
355
}
356

  
357
static AIOPool curl_aio_pool = {
358
    .aiocb_size         = sizeof(CURLAIOCB),
359
    .cancel             = curl_aio_cancel,
360
};
361

  
352 362
static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs,
353 363
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
354 364
        BlockDriverCompletionFunc *cb, void *opaque)
......
359 369
    size_t end;
360 370
    CURLState *state;
361 371

  
362
    acb = qemu_aio_get(bs, cb, opaque);
372
    acb = qemu_aio_get(&curl_aio_pool, bs, cb, opaque);
363 373
    if (!acb)
364 374
        return NULL;
365 375

  
......
406 416
    return &acb->common;
407 417
}
408 418

  
409
static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
410
{
411
    // Do we have to implement canceling? Seems to work without...
412
}
413

  
414 419
static void curl_close(BlockDriverState *bs)
415 420
{
416 421
    BDRVCURLState *s = bs->opaque;
......
450 455
    .bdrv_close      = curl_close,
451 456
    .bdrv_getlength  = curl_getlength,
452 457

  
453
    .aiocb_size      = sizeof(CURLAIOCB),
454 458
    .bdrv_aio_readv  = curl_aio_readv,
455
    .bdrv_aio_cancel = curl_aio_cancel,
456 459
};
457 460

  
458 461
static BlockDriver bdrv_https = {
......
464 467
    .bdrv_close      = curl_close,
465 468
    .bdrv_getlength  = curl_getlength,
466 469

  
467
    .aiocb_size      = sizeof(CURLAIOCB),
468 470
    .bdrv_aio_readv  = curl_aio_readv,
469
    .bdrv_aio_cancel = curl_aio_cancel,
470 471
};
471 472

  
472 473
static BlockDriver bdrv_ftp = {
......
478 479
    .bdrv_close      = curl_close,
479 480
    .bdrv_getlength  = curl_getlength,
480 481

  
481
    .aiocb_size      = sizeof(CURLAIOCB),
482 482
    .bdrv_aio_readv  = curl_aio_readv,
483
    .bdrv_aio_cancel = curl_aio_cancel,
484 483
};
485 484

  
486 485
static BlockDriver bdrv_ftps = {
......
492 491
    .bdrv_close      = curl_close,
493 492
    .bdrv_getlength  = curl_getlength,
494 493

  
495
    .aiocb_size      = sizeof(CURLAIOCB),
496 494
    .bdrv_aio_readv  = curl_aio_readv,
497
    .bdrv_aio_cancel = curl_aio_cancel,
498 495
};
499 496

  
500 497
static BlockDriver bdrv_tftp = {
......
506 503
    .bdrv_close      = curl_close,
507 504
    .bdrv_getlength  = curl_getlength,
508 505

  
509
    .aiocb_size      = sizeof(CURLAIOCB),
510 506
    .bdrv_aio_readv  = curl_aio_readv,
511
    .bdrv_aio_cancel = curl_aio_cancel,
512 507
};
513 508

  
514 509
static void curl_block_init(void)
b/block/qcow.c
503 503
    BlockDriverAIOCB *hd_aiocb;
504 504
} QCowAIOCB;
505 505

  
506
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
507
{
508
    QCowAIOCB *acb = (QCowAIOCB *)blockacb;
509
    if (acb->hd_aiocb)
510
        bdrv_aio_cancel(acb->hd_aiocb);
511
    qemu_aio_release(acb);
512
}
513

  
514
static AIOPool qcow_aio_pool = {
515
    .aiocb_size         = sizeof(QCowAIOCB),
516
    .cancel             = qcow_aio_cancel,
517
};
506 518

  
507 519
static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
508 520
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
......
510 522
{
511 523
    QCowAIOCB *acb;
512 524

  
513
    acb = qemu_aio_get(bs, cb, opaque);
525
    acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque);
514 526
    if (!acb)
515 527
        return NULL;
516 528
    acb->hd_aiocb = NULL;
......
720 732
    return &acb->common;
721 733
}
722 734

  
723
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
724
{
725
    QCowAIOCB *acb = (QCowAIOCB *)blockacb;
726
    if (acb->hd_aiocb)
727
        bdrv_aio_cancel(acb->hd_aiocb);
728
    qemu_aio_release(acb);
729
}
730

  
731 735
static void qcow_close(BlockDriverState *bs)
732 736
{
733 737
    BDRVQcowState *s = bs->opaque;
......
924 928
    .bdrv_make_empty	= qcow_make_empty,
925 929
    .bdrv_aio_readv	= qcow_aio_readv,
926 930
    .bdrv_aio_writev	= qcow_aio_writev,
927
    .bdrv_aio_cancel	= qcow_aio_cancel,
928
    .aiocb_size		= sizeof(QCowAIOCB),
929 931
    .bdrv_write_compressed = qcow_write_compressed,
930 932
    .bdrv_get_info	= qcow_get_info,
931 933

  
b/block/qcow2.c
1246 1246
    QCowL2Meta l2meta;
1247 1247
} QCowAIOCB;
1248 1248

  
1249
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1250
{
1251
    QCowAIOCB *acb = (QCowAIOCB *)blockacb;
1252
    if (acb->hd_aiocb)
1253
        bdrv_aio_cancel(acb->hd_aiocb);
1254
    qemu_aio_release(acb);
1255
}
1256

  
1257
static AIOPool qcow_aio_pool = {
1258
    .aiocb_size         = sizeof(QCowAIOCB),
1259
    .cancel             = qcow_aio_cancel,
1260
};
1261

  
1249 1262
static void qcow_aio_read_cb(void *opaque, int ret);
1250 1263
static void qcow_aio_read_bh(void *opaque)
1251 1264
{
......
1375 1388
{
1376 1389
    QCowAIOCB *acb;
1377 1390

  
1378
    acb = qemu_aio_get(bs, cb, opaque);
1391
    acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque);
1379 1392
    if (!acb)
1380 1393
        return NULL;
1381 1394
    acb->hd_aiocb = NULL;
......
1498 1511
    return &acb->common;
1499 1512
}
1500 1513

  
1501
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1502
{
1503
    QCowAIOCB *acb = (QCowAIOCB *)blockacb;
1504
    if (acb->hd_aiocb)
1505
        bdrv_aio_cancel(acb->hd_aiocb);
1506
    qemu_aio_release(acb);
1507
}
1508

  
1509 1514
static void qcow_close(BlockDriverState *bs)
1510 1515
{
1511 1516
    BDRVQcowState *s = bs->opaque;
......
2998 3003

  
2999 3004
    .bdrv_aio_readv	= qcow_aio_readv,
3000 3005
    .bdrv_aio_writev	= qcow_aio_writev,
3001
    .bdrv_aio_cancel	= qcow_aio_cancel,
3002
    .aiocb_size		= sizeof(QCowAIOCB),
3003 3006
    .bdrv_write_compressed = qcow_write_compressed,
3004 3007

  
3005 3008
    .bdrv_snapshot_create = qcow_snapshot_create,
b/block/raw-posix.c
599 599
    return 0;
600 600
}
601 601

  
602
static void raw_aio_remove(RawAIOCB *acb)
603
{
604
    RawAIOCB **pacb;
605

  
606
    /* remove the callback from the queue */
607
    pacb = &posix_aio_state->first_aio;
608
    for(;;) {
609
        if (*pacb == NULL) {
610
            fprintf(stderr, "raw_aio_remove: aio request not found!\n");
611
            break;
612
        } else if (*pacb == acb) {
613
            *pacb = acb->next;
614
            qemu_aio_release(acb);
615
            break;
616
        }
617
        pacb = &(*pacb)->next;
618
    }
619
}
620

  
621
static void raw_aio_cancel(BlockDriverAIOCB *blockacb)
622
{
623
    int ret;
624
    RawAIOCB *acb = (RawAIOCB *)blockacb;
625

  
626
    ret = qemu_paio_cancel(acb->aiocb.aio_fildes, &acb->aiocb);
627
    if (ret == QEMU_PAIO_NOTCANCELED) {
628
        /* fail safe: if the aio could not be canceled, we wait for
629
           it */
630
        while (qemu_paio_error(&acb->aiocb) == EINPROGRESS);
631
    }
632

  
633
    raw_aio_remove(acb);
634
}
635

  
636
static AIOPool raw_aio_pool = {
637
    .aiocb_size         = sizeof(RawAIOCB),
638
    .cancel             = raw_aio_cancel,
639
};
640

  
602 641
static RawAIOCB *raw_aio_setup(BlockDriverState *bs, int64_t sector_num,
603 642
        QEMUIOVector *qiov, int nb_sectors,
604 643
        BlockDriverCompletionFunc *cb, void *opaque)
......
609 648
    if (fd_open(bs) < 0)
610 649
        return NULL;
611 650

  
612
    acb = qemu_aio_get(bs, cb, opaque);
651
    acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
613 652
    if (!acb)
614 653
        return NULL;
615 654
    acb->aiocb.aio_fildes = s->fd;
......
633 672
    return acb;
634 673
}
635 674

  
636
static void raw_aio_remove(RawAIOCB *acb)
637
{
638
    RawAIOCB **pacb;
639

  
640
    /* remove the callback from the queue */
641
    pacb = &posix_aio_state->first_aio;
642
    for(;;) {
643
        if (*pacb == NULL) {
644
            fprintf(stderr, "raw_aio_remove: aio request not found!\n");
645
            break;
646
        } else if (*pacb == acb) {
647
            *pacb = acb->next;
648
            qemu_aio_release(acb);
649
            break;
650
        }
651
        pacb = &(*pacb)->next;
652
    }
653
}
654

  
655 675
static BlockDriverAIOCB *raw_aio_readv(BlockDriverState *bs,
656 676
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
657 677
        BlockDriverCompletionFunc *cb, void *opaque)
......
683 703
    }
684 704
    return &acb->common;
685 705
}
686

  
687
static void raw_aio_cancel(BlockDriverAIOCB *blockacb)
688
{
689
    int ret;
690
    RawAIOCB *acb = (RawAIOCB *)blockacb;
691

  
692
    ret = qemu_paio_cancel(acb->aiocb.aio_fildes, &acb->aiocb);
693
    if (ret == QEMU_PAIO_NOTCANCELED) {
694
        /* fail safe: if the aio could not be canceled, we wait for
695
           it */
696
        while (qemu_paio_error(&acb->aiocb) == EINPROGRESS);
697
    }
698

  
699
    raw_aio_remove(acb);
700
}
701 706
#else /* CONFIG_AIO */
702 707
static int posix_aio_init(void)
703 708
{
......
871 876
#ifdef CONFIG_AIO
872 877
    .bdrv_aio_readv = raw_aio_readv,
873 878
    .bdrv_aio_writev = raw_aio_writev,
874
    .bdrv_aio_cancel = raw_aio_cancel,
875
    .aiocb_size = sizeof(RawAIOCB),
876 879
#endif
877 880

  
878 881
    .bdrv_truncate = raw_truncate,
......
1205 1208
    if (fd_open(bs) < 0)
1206 1209
        return NULL;
1207 1210

  
1208
    acb = qemu_aio_get(bs, cb, opaque);
1211
    acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
1209 1212
    if (!acb)
1210 1213
        return NULL;
1211 1214
    acb->aiocb.aio_fildes = s->fd;
......
1417 1420
#ifdef CONFIG_AIO
1418 1421
    .bdrv_aio_readv	= raw_aio_readv,
1419 1422
    .bdrv_aio_writev	= raw_aio_writev,
1420
    .bdrv_aio_cancel	= raw_aio_cancel,
1421
    .aiocb_size		= sizeof(RawAIOCB),
1422 1423
#endif
1423 1424

  
1424 1425
    .bdrv_read          = raw_read,
b/block_int.h
67 67
    BlockDriverAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
68 68
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
69 69
        BlockDriverCompletionFunc *cb, void *opaque);
70
    void (*bdrv_aio_cancel)(BlockDriverAIOCB *acb);
71
    int aiocb_size;
72 70

  
73 71
    const char *protocol_name;
74 72
    int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
......
102 100
        unsigned long int req, void *buf,
103 101
        BlockDriverCompletionFunc *cb, void *opaque);
104 102

  
105
    AIOPool aio_pool;
106

  
107 103
    /* List of options for creating images, terminated by name == NULL */
108 104
    QEMUOptionParameter *create_options;
109 105

  
......
173 169

  
174 170
void get_tmp_filename(char *filename, int size);
175 171

  
176
void aio_pool_init(AIOPool *pool, int aiocb_size,
177
                   void (*cancel)(BlockDriverAIOCB *acb));
178

  
179
void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
180
                   void *opaque);
181
void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs,
182
                        BlockDriverCompletionFunc *cb, void *opaque);
172
void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
173
                   BlockDriverCompletionFunc *cb, void *opaque);
183 174
void qemu_aio_release(void *p);
184 175

  
185 176
void *qemu_blockalign(BlockDriverState *bs, size_t size);
b/dma-helpers.c
10 10
#include "dma.h"
11 11
#include "block_int.h"
12 12

  
13
static AIOPool dma_aio_pool;
14

  
15 13
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
16 14
{
17 15
    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
......
132 130
    }
133 131
}
134 132

  
133
static void dma_aio_cancel(BlockDriverAIOCB *acb)
134
{
135
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
136

  
137
    if (dbs->acb) {
138
        bdrv_aio_cancel(dbs->acb);
139
    }
140
}
141

  
142
static AIOPool dma_aio_pool = {
143
    .aiocb_size         = sizeof(DMAAIOCB),
144
    .cancel             = dma_aio_cancel,
145
};
146

  
135 147
static BlockDriverAIOCB *dma_bdrv_io(
136 148
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
137 149
    BlockDriverCompletionFunc *cb, void *opaque,
138 150
    int is_write)
139 151
{
140
    DMAAIOCB *dbs =  qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
152
    DMAAIOCB *dbs =  qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
141 153

  
142 154
    dbs->acb = NULL;
143 155
    dbs->bs = bs;
......
170 182
{
171 183
    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
172 184
}
173

  
174
static void dma_aio_cancel(BlockDriverAIOCB *acb)
175
{
176
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
177

  
178
    if (dbs->acb) {
179
        bdrv_aio_cancel(dbs->acb);
180
    }
181
}
182

  
183
void dma_helper_init(void)
184
{
185
    aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
186
}
b/dma.h
38 38
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
39 39
                                 QEMUSGList *sg, uint64_t sector,
40 40
                                 BlockDriverCompletionFunc *cb, void *opaque);
41
void dma_helper_init(void);
42

  
43 41
#endif
b/vl.c
5753 5753
    cpu_exec_init_all(tb_size * 1024 * 1024);
5754 5754

  
5755 5755
    bdrv_init();
5756
    dma_helper_init();
5757 5756

  
5758 5757
    /* we always create the cdrom drive, even if no disk is there */
5759 5758

  

Also available in: Unified diff