Revision c3adb5b9 dma-helpers.c

b/dma-helpers.c
43 43
    QEMUSGList *sg;
44 44
    uint64_t sector_num;
45 45
    bool to_dev;
46
    bool in_cancel;
46 47
    int sg_cur_index;
47 48
    target_phys_addr_t sg_cur_byte;
48 49
    QEMUIOVector iov;
......
58 59

  
59 60
    qemu_bh_delete(dbs->bh);
60 61
    dbs->bh = NULL;
61
    dma_bdrv_cb(opaque, 0);
62
    dma_bdrv_cb(dbs, 0);
62 63
}
63 64

  
64 65
static void continue_after_map_failure(void *opaque)
......
78 79
                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
79 80
                                  dbs->iov.iov[i].iov_len);
80 81
    }
82
    qemu_iovec_reset(&dbs->iov);
83
}
84

  
85
static void dma_complete(DMAAIOCB *dbs, int ret)
86
{
87
    dma_bdrv_unmap(dbs);
88
    if (dbs->common.cb) {
89
        dbs->common.cb(dbs->common.opaque, ret);
90
    }
91
    qemu_iovec_destroy(&dbs->iov);
92
    if (dbs->bh) {
93
        qemu_bh_delete(dbs->bh);
94
        dbs->bh = NULL;
95
    }
96
    if (!dbs->in_cancel) {
97
        /* Requests may complete while dma_aio_cancel is in progress.  In
98
         * this case, the AIOCB should not be released because it is still
99
         * referenced by dma_aio_cancel.  */
100
        qemu_aio_release(dbs);
101
    }
81 102
}
82 103

  
83 104
static void dma_bdrv_cb(void *opaque, int ret)
......
89 110
    dbs->acb = NULL;
90 111
    dbs->sector_num += dbs->iov.size / 512;
91 112
    dma_bdrv_unmap(dbs);
92
    qemu_iovec_reset(&dbs->iov);
93 113

  
94 114
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
95
        dbs->common.cb(dbs->common.opaque, ret);
96
        qemu_iovec_destroy(&dbs->iov);
97
        qemu_aio_release(dbs);
115
        dma_complete(dbs, ret);
98 116
        return;
99 117
    }
100 118

  
......
120 138
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
121 139
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
122 140
    if (!dbs->acb) {
123
        dma_bdrv_unmap(dbs);
124
        qemu_iovec_destroy(&dbs->iov);
125
        return;
141
        dma_complete(dbs, -EIO);
126 142
    }
127 143
}
128 144

  
......
131 147
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
132 148

  
133 149
    if (dbs->acb) {
134
        bdrv_aio_cancel(dbs->acb);
150
        BlockDriverAIOCB *acb = dbs->acb;
151
        dbs->acb = NULL;
152
        dbs->in_cancel = true;
153
        bdrv_aio_cancel(acb);
154
        dbs->in_cancel = false;
135 155
    }
156
    dbs->common.cb = NULL;
157
    dma_complete(dbs, 0);
136 158
}
137 159

  
138 160
static AIOPool dma_aio_pool = {
......
158 180
    dbs->bh = NULL;
159 181
    qemu_iovec_init(&dbs->iov, sg->nsg);
160 182
    dma_bdrv_cb(dbs, 0);
161
    if (!dbs->acb) {
162
        qemu_aio_release(dbs);
163
        return NULL;
164
    }
165 183
    return &dbs->common;
166 184
}
167 185

  

Also available in: Unified diff