Statistics
| Branch: | Revision:

root / dma-helpers.c @ db895a1e

History | View | Annotate | Download (11 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
11 c57c4658 Kevin Wolf
#include "trace.h"
12 1de7afc9 Paolo Bonzini
#include "qemu/range.h"
13 1de7afc9 Paolo Bonzini
#include "qemu/thread.h"
14 244ab90e aliguori
15 e5332e63 David Gibson
/* #define DEBUG_IOMMU */
16 e5332e63 David Gibson
17 b90600ee Avi Kivity
static void do_dma_memory_set(AddressSpace *as,
18 b90600ee Avi Kivity
                              dma_addr_t addr, uint8_t c, dma_addr_t len)
19 d86a77f8 David Gibson
{
20 d86a77f8 David Gibson
#define FILLBUF_SIZE 512
21 d86a77f8 David Gibson
    uint8_t fillbuf[FILLBUF_SIZE];
22 d86a77f8 David Gibson
    int l;
23 d86a77f8 David Gibson
24 d86a77f8 David Gibson
    memset(fillbuf, c, FILLBUF_SIZE);
25 d86a77f8 David Gibson
    while (len > 0) {
26 d86a77f8 David Gibson
        l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
27 b90600ee Avi Kivity
        address_space_rw(as, addr, fillbuf, l, true);
28 bc9b78de Benjamin Herrenschmidt
        len -= l;
29 bc9b78de Benjamin Herrenschmidt
        addr += l;
30 d86a77f8 David Gibson
    }
31 e5332e63 David Gibson
}
32 e5332e63 David Gibson
33 e5332e63 David Gibson
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
34 e5332e63 David Gibson
{
35 7a0bac4d Benjamin Herrenschmidt
    dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE);
36 7a0bac4d Benjamin Herrenschmidt
37 e5332e63 David Gibson
    if (dma_has_iommu(dma)) {
38 e5332e63 David Gibson
        return iommu_dma_memory_set(dma, addr, c, len);
39 e5332e63 David Gibson
    }
40 b90600ee Avi Kivity
    do_dma_memory_set(dma->as, addr, c, len);
41 e5332e63 David Gibson
42 d86a77f8 David Gibson
    return 0;
43 d86a77f8 David Gibson
}
44 d86a77f8 David Gibson
45 c65bcef3 David Gibson
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
46 244ab90e aliguori
{
47 7267c094 Anthony Liguori
    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
48 244ab90e aliguori
    qsg->nsg = 0;
49 244ab90e aliguori
    qsg->nalloc = alloc_hint;
50 244ab90e aliguori
    qsg->size = 0;
51 c65bcef3 David Gibson
    qsg->dma = dma;
52 244ab90e aliguori
}
53 244ab90e aliguori
54 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
55 244ab90e aliguori
{
56 244ab90e aliguori
    if (qsg->nsg == qsg->nalloc) {
57 244ab90e aliguori
        qsg->nalloc = 2 * qsg->nalloc + 1;
58 7267c094 Anthony Liguori
        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
59 244ab90e aliguori
    }
60 244ab90e aliguori
    qsg->sg[qsg->nsg].base = base;
61 244ab90e aliguori
    qsg->sg[qsg->nsg].len = len;
62 244ab90e aliguori
    qsg->size += len;
63 244ab90e aliguori
    ++qsg->nsg;
64 244ab90e aliguori
}
65 244ab90e aliguori
66 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg)
67 244ab90e aliguori
{
68 7267c094 Anthony Liguori
    g_free(qsg->sg);
69 ea8d82a1 Jason Baron
    memset(qsg, 0, sizeof(*qsg));
70 244ab90e aliguori
}
71 244ab90e aliguori
72 59a703eb aliguori
typedef struct {
73 37b7842c aliguori
    BlockDriverAIOCB common;
74 59a703eb aliguori
    BlockDriverState *bs;
75 59a703eb aliguori
    BlockDriverAIOCB *acb;
76 59a703eb aliguori
    QEMUSGList *sg;
77 59a703eb aliguori
    uint64_t sector_num;
78 43cf8ae6 David Gibson
    DMADirection dir;
79 c3adb5b9 Paolo Bonzini
    bool in_cancel;
80 59a703eb aliguori
    int sg_cur_index;
81 d3231181 David Gibson
    dma_addr_t sg_cur_byte;
82 59a703eb aliguori
    QEMUIOVector iov;
83 59a703eb aliguori
    QEMUBH *bh;
84 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func;
85 37b7842c aliguori
} DMAAIOCB;
86 59a703eb aliguori
87 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret);
88 59a703eb aliguori
89 59a703eb aliguori
static void reschedule_dma(void *opaque)
90 59a703eb aliguori
{
91 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
92 59a703eb aliguori
93 59a703eb aliguori
    qemu_bh_delete(dbs->bh);
94 59a703eb aliguori
    dbs->bh = NULL;
95 c3adb5b9 Paolo Bonzini
    dma_bdrv_cb(dbs, 0);
96 59a703eb aliguori
}
97 59a703eb aliguori
98 59a703eb aliguori
static void continue_after_map_failure(void *opaque)
99 59a703eb aliguori
{
100 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
101 59a703eb aliguori
102 59a703eb aliguori
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
103 59a703eb aliguori
    qemu_bh_schedule(dbs->bh);
104 59a703eb aliguori
}
105 59a703eb aliguori
106 7403b14e aliguori
static void dma_bdrv_unmap(DMAAIOCB *dbs)
107 59a703eb aliguori
{
108 59a703eb aliguori
    int i;
109 59a703eb aliguori
110 59a703eb aliguori
    for (i = 0; i < dbs->iov.niov; ++i) {
111 c65bcef3 David Gibson
        dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
112 c65bcef3 David Gibson
                         dbs->iov.iov[i].iov_len, dbs->dir,
113 c65bcef3 David Gibson
                         dbs->iov.iov[i].iov_len);
114 59a703eb aliguori
    }
115 c3adb5b9 Paolo Bonzini
    qemu_iovec_reset(&dbs->iov);
116 c3adb5b9 Paolo Bonzini
}
117 c3adb5b9 Paolo Bonzini
118 c3adb5b9 Paolo Bonzini
static void dma_complete(DMAAIOCB *dbs, int ret)
119 c3adb5b9 Paolo Bonzini
{
120 c57c4658 Kevin Wolf
    trace_dma_complete(dbs, ret, dbs->common.cb);
121 c57c4658 Kevin Wolf
122 c3adb5b9 Paolo Bonzini
    dma_bdrv_unmap(dbs);
123 c3adb5b9 Paolo Bonzini
    if (dbs->common.cb) {
124 c3adb5b9 Paolo Bonzini
        dbs->common.cb(dbs->common.opaque, ret);
125 c3adb5b9 Paolo Bonzini
    }
126 c3adb5b9 Paolo Bonzini
    qemu_iovec_destroy(&dbs->iov);
127 c3adb5b9 Paolo Bonzini
    if (dbs->bh) {
128 c3adb5b9 Paolo Bonzini
        qemu_bh_delete(dbs->bh);
129 c3adb5b9 Paolo Bonzini
        dbs->bh = NULL;
130 c3adb5b9 Paolo Bonzini
    }
131 c3adb5b9 Paolo Bonzini
    if (!dbs->in_cancel) {
132 c3adb5b9 Paolo Bonzini
        /* Requests may complete while dma_aio_cancel is in progress.  In
133 c3adb5b9 Paolo Bonzini
         * this case, the AIOCB should not be released because it is still
134 c3adb5b9 Paolo Bonzini
         * referenced by dma_aio_cancel.  */
135 c3adb5b9 Paolo Bonzini
        qemu_aio_release(dbs);
136 c3adb5b9 Paolo Bonzini
    }
137 7403b14e aliguori
}
138 7403b14e aliguori
139 856ae5c3 blueswir1
static void dma_bdrv_cb(void *opaque, int ret)
140 7403b14e aliguori
{
141 7403b14e aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
142 c65bcef3 David Gibson
    dma_addr_t cur_addr, cur_len;
143 7403b14e aliguori
    void *mem;
144 7403b14e aliguori
145 c57c4658 Kevin Wolf
    trace_dma_bdrv_cb(dbs, ret);
146 c57c4658 Kevin Wolf
147 7403b14e aliguori
    dbs->acb = NULL;
148 7403b14e aliguori
    dbs->sector_num += dbs->iov.size / 512;
149 7403b14e aliguori
    dma_bdrv_unmap(dbs);
150 59a703eb aliguori
151 59a703eb aliguori
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
152 c3adb5b9 Paolo Bonzini
        dma_complete(dbs, ret);
153 59a703eb aliguori
        return;
154 59a703eb aliguori
    }
155 59a703eb aliguori
156 59a703eb aliguori
    while (dbs->sg_cur_index < dbs->sg->nsg) {
157 59a703eb aliguori
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
158 59a703eb aliguori
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
159 c65bcef3 David Gibson
        mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
160 59a703eb aliguori
        if (!mem)
161 59a703eb aliguori
            break;
162 59a703eb aliguori
        qemu_iovec_add(&dbs->iov, mem, cur_len);
163 59a703eb aliguori
        dbs->sg_cur_byte += cur_len;
164 59a703eb aliguori
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
165 59a703eb aliguori
            dbs->sg_cur_byte = 0;
166 59a703eb aliguori
            ++dbs->sg_cur_index;
167 59a703eb aliguori
        }
168 59a703eb aliguori
    }
169 59a703eb aliguori
170 59a703eb aliguori
    if (dbs->iov.size == 0) {
171 c57c4658 Kevin Wolf
        trace_dma_map_wait(dbs);
172 59a703eb aliguori
        cpu_register_map_client(dbs, continue_after_map_failure);
173 59a703eb aliguori
        return;
174 59a703eb aliguori
    }
175 59a703eb aliguori
176 cb144ccb Christoph Hellwig
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
177 cb144ccb Christoph Hellwig
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
178 6bee44ea Paolo Bonzini
    assert(dbs->acb);
179 59a703eb aliguori
}
180 59a703eb aliguori
181 c16b5a2c Christoph Hellwig
static void dma_aio_cancel(BlockDriverAIOCB *acb)
182 c16b5a2c Christoph Hellwig
{
183 c16b5a2c Christoph Hellwig
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
184 c16b5a2c Christoph Hellwig
185 c57c4658 Kevin Wolf
    trace_dma_aio_cancel(dbs);
186 c57c4658 Kevin Wolf
187 c16b5a2c Christoph Hellwig
    if (dbs->acb) {
188 c3adb5b9 Paolo Bonzini
        BlockDriverAIOCB *acb = dbs->acb;
189 c3adb5b9 Paolo Bonzini
        dbs->acb = NULL;
190 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = true;
191 c3adb5b9 Paolo Bonzini
        bdrv_aio_cancel(acb);
192 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = false;
193 c16b5a2c Christoph Hellwig
    }
194 c3adb5b9 Paolo Bonzini
    dbs->common.cb = NULL;
195 c3adb5b9 Paolo Bonzini
    dma_complete(dbs, 0);
196 c16b5a2c Christoph Hellwig
}
197 c16b5a2c Christoph Hellwig
198 d7331bed Stefan Hajnoczi
static const AIOCBInfo dma_aiocb_info = {
199 c16b5a2c Christoph Hellwig
    .aiocb_size         = sizeof(DMAAIOCB),
200 c16b5a2c Christoph Hellwig
    .cancel             = dma_aio_cancel,
201 c16b5a2c Christoph Hellwig
};
202 c16b5a2c Christoph Hellwig
203 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(
204 59a703eb aliguori
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
205 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
206 43cf8ae6 David Gibson
    void *opaque, DMADirection dir)
207 59a703eb aliguori
{
208 d7331bed Stefan Hajnoczi
    DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
209 59a703eb aliguori
210 43cf8ae6 David Gibson
    trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
211 c57c4658 Kevin Wolf
212 37b7842c aliguori
    dbs->acb = NULL;
213 59a703eb aliguori
    dbs->bs = bs;
214 59a703eb aliguori
    dbs->sg = sg;
215 59a703eb aliguori
    dbs->sector_num = sector_num;
216 59a703eb aliguori
    dbs->sg_cur_index = 0;
217 59a703eb aliguori
    dbs->sg_cur_byte = 0;
218 43cf8ae6 David Gibson
    dbs->dir = dir;
219 cb144ccb Christoph Hellwig
    dbs->io_func = io_func;
220 59a703eb aliguori
    dbs->bh = NULL;
221 59a703eb aliguori
    qemu_iovec_init(&dbs->iov, sg->nsg);
222 59a703eb aliguori
    dma_bdrv_cb(dbs, 0);
223 37b7842c aliguori
    return &dbs->common;
224 59a703eb aliguori
}
225 59a703eb aliguori
226 59a703eb aliguori
227 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
228 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
229 59a703eb aliguori
                                void (*cb)(void *opaque, int ret), void *opaque)
230 59a703eb aliguori
{
231 43cf8ae6 David Gibson
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
232 43cf8ae6 David Gibson
                       DMA_DIRECTION_FROM_DEVICE);
233 59a703eb aliguori
}
234 59a703eb aliguori
235 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
236 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
237 59a703eb aliguori
                                 void (*cb)(void *opaque, int ret), void *opaque)
238 59a703eb aliguori
{
239 43cf8ae6 David Gibson
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
240 43cf8ae6 David Gibson
                       DMA_DIRECTION_TO_DEVICE);
241 59a703eb aliguori
}
242 8171ee35 Paolo Bonzini
243 8171ee35 Paolo Bonzini
244 c65bcef3 David Gibson
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
245 c65bcef3 David Gibson
                           DMADirection dir)
246 8171ee35 Paolo Bonzini
{
247 8171ee35 Paolo Bonzini
    uint64_t resid;
248 8171ee35 Paolo Bonzini
    int sg_cur_index;
249 8171ee35 Paolo Bonzini
250 8171ee35 Paolo Bonzini
    resid = sg->size;
251 8171ee35 Paolo Bonzini
    sg_cur_index = 0;
252 8171ee35 Paolo Bonzini
    len = MIN(len, resid);
253 8171ee35 Paolo Bonzini
    while (len > 0) {
254 8171ee35 Paolo Bonzini
        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
255 8171ee35 Paolo Bonzini
        int32_t xfer = MIN(len, entry.len);
256 c65bcef3 David Gibson
        dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
257 8171ee35 Paolo Bonzini
        ptr += xfer;
258 8171ee35 Paolo Bonzini
        len -= xfer;
259 8171ee35 Paolo Bonzini
        resid -= xfer;
260 8171ee35 Paolo Bonzini
    }
261 8171ee35 Paolo Bonzini
262 8171ee35 Paolo Bonzini
    return resid;
263 8171ee35 Paolo Bonzini
}
264 8171ee35 Paolo Bonzini
265 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
266 8171ee35 Paolo Bonzini
{
267 c65bcef3 David Gibson
    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
268 8171ee35 Paolo Bonzini
}
269 8171ee35 Paolo Bonzini
270 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
271 8171ee35 Paolo Bonzini
{
272 c65bcef3 David Gibson
    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
273 8171ee35 Paolo Bonzini
}
274 84a69356 Paolo Bonzini
275 84a69356 Paolo Bonzini
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
276 84a69356 Paolo Bonzini
                    QEMUSGList *sg, enum BlockAcctType type)
277 84a69356 Paolo Bonzini
{
278 84a69356 Paolo Bonzini
    bdrv_acct_start(bs, cookie, sg->size, type);
279 84a69356 Paolo Bonzini
}
280 e5332e63 David Gibson
281 e5332e63 David Gibson
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
282 e5332e63 David Gibson
                            DMADirection dir)
283 e5332e63 David Gibson
{
284 a8170e5e Avi Kivity
    hwaddr paddr, plen;
285 e5332e63 David Gibson
286 e5332e63 David Gibson
#ifdef DEBUG_IOMMU
287 e5332e63 David Gibson
    fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
288 e5332e63 David Gibson
            " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
289 e5332e63 David Gibson
#endif
290 e5332e63 David Gibson
291 e5332e63 David Gibson
    while (len) {
292 e5332e63 David Gibson
        if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
293 e5332e63 David Gibson
            return false;
294 e5332e63 David Gibson
        }
295 e5332e63 David Gibson
296 e5332e63 David Gibson
        /* The translation might be valid for larger regions. */
297 e5332e63 David Gibson
        if (plen > len) {
298 e5332e63 David Gibson
            plen = len;
299 e5332e63 David Gibson
        }
300 e5332e63 David Gibson
301 51644ab7 Paolo Bonzini
        if (!address_space_access_valid(dma->as, paddr, len,
302 51644ab7 Paolo Bonzini
                                        dir == DMA_DIRECTION_FROM_DEVICE)) {
303 51644ab7 Paolo Bonzini
            return false;
304 51644ab7 Paolo Bonzini
        }
305 51644ab7 Paolo Bonzini
306 e5332e63 David Gibson
        len -= plen;
307 e5332e63 David Gibson
        addr += plen;
308 e5332e63 David Gibson
    }
309 e5332e63 David Gibson
310 e5332e63 David Gibson
    return true;
311 e5332e63 David Gibson
}
312 e5332e63 David Gibson
313 e5332e63 David Gibson
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
314 e5332e63 David Gibson
                        void *buf, dma_addr_t len, DMADirection dir)
315 e5332e63 David Gibson
{
316 a8170e5e Avi Kivity
    hwaddr paddr, plen;
317 e5332e63 David Gibson
    int err;
318 e5332e63 David Gibson
319 e5332e63 David Gibson
#ifdef DEBUG_IOMMU
320 e5332e63 David Gibson
    fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
321 e5332e63 David Gibson
            DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
322 e5332e63 David Gibson
#endif
323 e5332e63 David Gibson
324 e5332e63 David Gibson
    while (len) {
325 e5332e63 David Gibson
        err = dma->translate(dma, addr, &paddr, &plen, dir);
326 e5332e63 David Gibson
        if (err) {
327 e5332e63 David Gibson
            /*
328 e5332e63 David Gibson
             * In case of failure on reads from the guest, we clean the
329 e5332e63 David Gibson
             * destination buffer so that a device that doesn't test
330 e5332e63 David Gibson
             * for errors will not expose qemu internal memory.
331 e5332e63 David Gibson
             */
332 e5332e63 David Gibson
            memset(buf, 0, len);
333 e5332e63 David Gibson
            return -1;
334 e5332e63 David Gibson
        }
335 e5332e63 David Gibson
336 e5332e63 David Gibson
        /* The translation might be valid for larger regions. */
337 e5332e63 David Gibson
        if (plen > len) {
338 e5332e63 David Gibson
            plen = len;
339 e5332e63 David Gibson
        }
340 e5332e63 David Gibson
341 b90600ee Avi Kivity
        address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE);
342 e5332e63 David Gibson
343 e5332e63 David Gibson
        len -= plen;
344 e5332e63 David Gibson
        addr += plen;
345 e5332e63 David Gibson
        buf += plen;
346 e5332e63 David Gibson
    }
347 e5332e63 David Gibson
348 e5332e63 David Gibson
    return 0;
349 e5332e63 David Gibson
}
350 e5332e63 David Gibson
351 e5332e63 David Gibson
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
352 e5332e63 David Gibson
                         dma_addr_t len)
353 e5332e63 David Gibson
{
354 a8170e5e Avi Kivity
    hwaddr paddr, plen;
355 e5332e63 David Gibson
    int err;
356 e5332e63 David Gibson
357 e5332e63 David Gibson
#ifdef DEBUG_IOMMU
358 e5332e63 David Gibson
    fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
359 e5332e63 David Gibson
            " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
360 e5332e63 David Gibson
#endif
361 e5332e63 David Gibson
362 e5332e63 David Gibson
    while (len) {
363 e5332e63 David Gibson
        err = dma->translate(dma, addr, &paddr, &plen,
364 e5332e63 David Gibson
                             DMA_DIRECTION_FROM_DEVICE);
365 e5332e63 David Gibson
        if (err) {
366 e5332e63 David Gibson
            return err;
367 e5332e63 David Gibson
        }
368 e5332e63 David Gibson
369 e5332e63 David Gibson
        /* The translation might be valid for larger regions. */
370 e5332e63 David Gibson
        if (plen > len) {
371 e5332e63 David Gibson
            plen = len;
372 e5332e63 David Gibson
        }
373 e5332e63 David Gibson
374 b90600ee Avi Kivity
        do_dma_memory_set(dma->as, paddr, c, plen);
375 e5332e63 David Gibson
376 e5332e63 David Gibson
        len -= plen;
377 e5332e63 David Gibson
        addr += plen;
378 e5332e63 David Gibson
    }
379 e5332e63 David Gibson
380 e5332e63 David Gibson
    return 0;
381 e5332e63 David Gibson
}
382 e5332e63 David Gibson
383 b90600ee Avi Kivity
void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
384 e5332e63 David Gibson
                      DMAMapFunc map, DMAUnmapFunc unmap)
385 e5332e63 David Gibson
{
386 e5332e63 David Gibson
#ifdef DEBUG_IOMMU
387 e5332e63 David Gibson
    fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
388 e5332e63 David Gibson
            dma, translate, map, unmap);
389 e5332e63 David Gibson
#endif
390 b90600ee Avi Kivity
    dma->as = as;
391 e5332e63 David Gibson
    dma->translate = translate;
392 e5332e63 David Gibson
    dma->map = map;
393 e5332e63 David Gibson
    dma->unmap = unmap;
394 e5332e63 David Gibson
}
395 e5332e63 David Gibson
396 e5332e63 David Gibson
void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
397 e5332e63 David Gibson
                           DMADirection dir)
398 e5332e63 David Gibson
{
399 e5332e63 David Gibson
    int err;
400 a8170e5e Avi Kivity
    hwaddr paddr, plen;
401 e5332e63 David Gibson
    void *buf;
402 e5332e63 David Gibson
403 e5332e63 David Gibson
    if (dma->map) {
404 e5332e63 David Gibson
        return dma->map(dma, addr, len, dir);
405 e5332e63 David Gibson
    }
406 e5332e63 David Gibson
407 e5332e63 David Gibson
    plen = *len;
408 e5332e63 David Gibson
    err = dma->translate(dma, addr, &paddr, &plen, dir);
409 e5332e63 David Gibson
    if (err) {
410 e5332e63 David Gibson
        return NULL;
411 e5332e63 David Gibson
    }
412 e5332e63 David Gibson
413 e5332e63 David Gibson
    /*
414 e5332e63 David Gibson
     * If this is true, the virtual region is contiguous,
415 e5332e63 David Gibson
     * but the translated physical region isn't. We just
416 b90600ee Avi Kivity
     * clamp *len, much like address_space_map() does.
417 e5332e63 David Gibson
     */
418 e5332e63 David Gibson
    if (plen < *len) {
419 e5332e63 David Gibson
        *len = plen;
420 e5332e63 David Gibson
    }
421 e5332e63 David Gibson
422 b90600ee Avi Kivity
    buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE);
423 e5332e63 David Gibson
    *len = plen;
424 e5332e63 David Gibson
425 e5332e63 David Gibson
    return buf;
426 e5332e63 David Gibson
}
427 e5332e63 David Gibson
428 e5332e63 David Gibson
void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
429 e5332e63 David Gibson
                            DMADirection dir, dma_addr_t access_len)
430 e5332e63 David Gibson
{
431 e5332e63 David Gibson
    if (dma->unmap) {
432 e5332e63 David Gibson
        dma->unmap(dma, buffer, len, dir, access_len);
433 e5332e63 David Gibson
        return;
434 e5332e63 David Gibson
    }
435 e5332e63 David Gibson
436 b90600ee Avi Kivity
    address_space_unmap(dma->as, buffer, len, dir == DMA_DIRECTION_FROM_DEVICE,
437 b90600ee Avi Kivity
                        access_len);
438 e5332e63 David Gibson
439 e5332e63 David Gibson
}