Revision c65bcef3 dma-helpers.c

b/dma-helpers.c
26 26
    return 0;
27 27
}
28 28

  
29
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
29
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
30 30
{
31 31
    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
32 32
    qsg->nsg = 0;
33 33
    qsg->nalloc = alloc_hint;
34 34
    qsg->size = 0;
35
    qsg->dma = dma;
35 36
}
36 37

  
37 38
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
......
90 91
    int i;
91 92

  
92 93
    for (i = 0; i < dbs->iov.niov; ++i) {
93
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
94
                                  dbs->iov.iov[i].iov_len,
95
                                  dbs->dir != DMA_DIRECTION_TO_DEVICE,
96
                                  dbs->iov.iov[i].iov_len);
94
        dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
95
                         dbs->iov.iov[i].iov_len, dbs->dir,
96
                         dbs->iov.iov[i].iov_len);
97 97
    }
98 98
    qemu_iovec_reset(&dbs->iov);
99 99
}
......
122 122
static void dma_bdrv_cb(void *opaque, int ret)
123 123
{
124 124
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
125
    target_phys_addr_t cur_addr, cur_len;
125
    dma_addr_t cur_addr, cur_len;
126 126
    void *mem;
127 127

  
128 128
    trace_dma_bdrv_cb(dbs, ret);
......
139 139
    while (dbs->sg_cur_index < dbs->sg->nsg) {
140 140
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
141 141
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
142
        mem = cpu_physical_memory_map(cur_addr, &cur_len,
143
                                      dbs->dir != DMA_DIRECTION_TO_DEVICE);
142
        mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
144 143
        if (!mem)
145 144
            break;
146 145
        qemu_iovec_add(&dbs->iov, mem, cur_len);
......
225 224
}
226 225

  
227 226

  
228
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
227
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
228
                           DMADirection dir)
229 229
{
230 230
    uint64_t resid;
231 231
    int sg_cur_index;
......
236 236
    while (len > 0) {
237 237
        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
238 238
        int32_t xfer = MIN(len, entry.len);
239
        cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
239
        dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
240 240
        ptr += xfer;
241 241
        len -= xfer;
242 242
        resid -= xfer;
......
247 247

  
248 248
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
249 249
{
250
    return dma_buf_rw(ptr, len, sg, 0);
250
    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
251 251
}
252 252

  
253 253
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
254 254
{
255
    return dma_buf_rw(ptr, len, sg, 1);
255
    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
256 256
}
257 257

  
258 258
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,

Also available in: Unified diff