root / dma-helpers.c @ 15e58a21
History | View | Annotate | Download (10.8 kB)
1 | 244ab90e | aliguori | /*
|
---|---|---|---|
2 | 244ab90e | aliguori | * DMA helper functions
|
3 | 244ab90e | aliguori | *
|
4 | 244ab90e | aliguori | * Copyright (c) 2009 Red Hat
|
5 | 244ab90e | aliguori | *
|
6 | 244ab90e | aliguori | * This work is licensed under the terms of the GNU General Public License
|
7 | 244ab90e | aliguori | * (GNU GPL), version 2 or later.
|
8 | 244ab90e | aliguori | */
|
9 | 244ab90e | aliguori | |
10 | 244ab90e | aliguori | #include "dma.h" |
11 | c57c4658 | Kevin Wolf | #include "trace.h" |
12 | e5332e63 | David Gibson | #include "range.h" |
13 | e5332e63 | David Gibson | #include "qemu-thread.h" |
14 | 244ab90e | aliguori | |
15 | e5332e63 | David Gibson | /* #define DEBUG_IOMMU */
|
16 | e5332e63 | David Gibson | |
17 | e5332e63 | David Gibson | static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) |
18 | d86a77f8 | David Gibson | { |
19 | d86a77f8 | David Gibson | #define FILLBUF_SIZE 512 |
20 | d86a77f8 | David Gibson | uint8_t fillbuf[FILLBUF_SIZE]; |
21 | d86a77f8 | David Gibson | int l;
|
22 | d86a77f8 | David Gibson | |
23 | d86a77f8 | David Gibson | memset(fillbuf, c, FILLBUF_SIZE); |
24 | d86a77f8 | David Gibson | while (len > 0) { |
25 | d86a77f8 | David Gibson | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
26 | d86a77f8 | David Gibson | cpu_physical_memory_rw(addr, fillbuf, l, true);
|
27 | d86a77f8 | David Gibson | len -= len; |
28 | d86a77f8 | David Gibson | addr += len; |
29 | d86a77f8 | David Gibson | } |
30 | e5332e63 | David Gibson | } |
31 | e5332e63 | David Gibson | |
32 | e5332e63 | David Gibson | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
33 | e5332e63 | David Gibson | { |
34 | 7a0bac4d | Benjamin Herrenschmidt | dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); |
35 | 7a0bac4d | Benjamin Herrenschmidt | |
36 | e5332e63 | David Gibson | if (dma_has_iommu(dma)) {
|
37 | e5332e63 | David Gibson | return iommu_dma_memory_set(dma, addr, c, len);
|
38 | e5332e63 | David Gibson | } |
39 | e5332e63 | David Gibson | do_dma_memory_set(addr, c, len); |
40 | e5332e63 | David Gibson | |
41 | d86a77f8 | David Gibson | return 0; |
42 | d86a77f8 | David Gibson | } |
43 | d86a77f8 | David Gibson | |
44 | c65bcef3 | David Gibson | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
45 | 244ab90e | aliguori | { |
46 | 7267c094 | Anthony Liguori | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
47 | 244ab90e | aliguori | qsg->nsg = 0;
|
48 | 244ab90e | aliguori | qsg->nalloc = alloc_hint; |
49 | 244ab90e | aliguori | qsg->size = 0;
|
50 | c65bcef3 | David Gibson | qsg->dma = dma; |
51 | 244ab90e | aliguori | } |
52 | 244ab90e | aliguori | |
53 | d3231181 | David Gibson | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
54 | 244ab90e | aliguori | { |
55 | 244ab90e | aliguori | if (qsg->nsg == qsg->nalloc) {
|
56 | 244ab90e | aliguori | qsg->nalloc = 2 * qsg->nalloc + 1; |
57 | 7267c094 | Anthony Liguori | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
58 | 244ab90e | aliguori | } |
59 | 244ab90e | aliguori | qsg->sg[qsg->nsg].base = base; |
60 | 244ab90e | aliguori | qsg->sg[qsg->nsg].len = len; |
61 | 244ab90e | aliguori | qsg->size += len; |
62 | 244ab90e | aliguori | ++qsg->nsg; |
63 | 244ab90e | aliguori | } |
64 | 244ab90e | aliguori | |
65 | 244ab90e | aliguori | void qemu_sglist_destroy(QEMUSGList *qsg)
|
66 | 244ab90e | aliguori | { |
67 | 7267c094 | Anthony Liguori | g_free(qsg->sg); |
68 | 244ab90e | aliguori | } |
69 | 244ab90e | aliguori | |
70 | 59a703eb | aliguori | typedef struct { |
71 | 37b7842c | aliguori | BlockDriverAIOCB common; |
72 | 59a703eb | aliguori | BlockDriverState *bs; |
73 | 59a703eb | aliguori | BlockDriverAIOCB *acb; |
74 | 59a703eb | aliguori | QEMUSGList *sg; |
75 | 59a703eb | aliguori | uint64_t sector_num; |
76 | 43cf8ae6 | David Gibson | DMADirection dir; |
77 | c3adb5b9 | Paolo Bonzini | bool in_cancel;
|
78 | 59a703eb | aliguori | int sg_cur_index;
|
79 | d3231181 | David Gibson | dma_addr_t sg_cur_byte; |
80 | 59a703eb | aliguori | QEMUIOVector iov; |
81 | 59a703eb | aliguori | QEMUBH *bh; |
82 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func; |
83 | 37b7842c | aliguori | } DMAAIOCB; |
84 | 59a703eb | aliguori | |
85 | 59a703eb | aliguori | static void dma_bdrv_cb(void *opaque, int ret); |
86 | 59a703eb | aliguori | |
87 | 59a703eb | aliguori | static void reschedule_dma(void *opaque) |
88 | 59a703eb | aliguori | { |
89 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
90 | 59a703eb | aliguori | |
91 | 59a703eb | aliguori | qemu_bh_delete(dbs->bh); |
92 | 59a703eb | aliguori | dbs->bh = NULL;
|
93 | c3adb5b9 | Paolo Bonzini | dma_bdrv_cb(dbs, 0);
|
94 | 59a703eb | aliguori | } |
95 | 59a703eb | aliguori | |
96 | 59a703eb | aliguori | static void continue_after_map_failure(void *opaque) |
97 | 59a703eb | aliguori | { |
98 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
99 | 59a703eb | aliguori | |
100 | 59a703eb | aliguori | dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
101 | 59a703eb | aliguori | qemu_bh_schedule(dbs->bh); |
102 | 59a703eb | aliguori | } |
103 | 59a703eb | aliguori | |
104 | 7403b14e | aliguori | static void dma_bdrv_unmap(DMAAIOCB *dbs) |
105 | 59a703eb | aliguori | { |
106 | 59a703eb | aliguori | int i;
|
107 | 59a703eb | aliguori | |
108 | 59a703eb | aliguori | for (i = 0; i < dbs->iov.niov; ++i) { |
109 | c65bcef3 | David Gibson | dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
110 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len, dbs->dir, |
111 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len); |
112 | 59a703eb | aliguori | } |
113 | c3adb5b9 | Paolo Bonzini | qemu_iovec_reset(&dbs->iov); |
114 | c3adb5b9 | Paolo Bonzini | } |
115 | c3adb5b9 | Paolo Bonzini | |
116 | c3adb5b9 | Paolo Bonzini | static void dma_complete(DMAAIOCB *dbs, int ret) |
117 | c3adb5b9 | Paolo Bonzini | { |
118 | c57c4658 | Kevin Wolf | trace_dma_complete(dbs, ret, dbs->common.cb); |
119 | c57c4658 | Kevin Wolf | |
120 | c3adb5b9 | Paolo Bonzini | dma_bdrv_unmap(dbs); |
121 | c3adb5b9 | Paolo Bonzini | if (dbs->common.cb) {
|
122 | c3adb5b9 | Paolo Bonzini | dbs->common.cb(dbs->common.opaque, ret); |
123 | c3adb5b9 | Paolo Bonzini | } |
124 | c3adb5b9 | Paolo Bonzini | qemu_iovec_destroy(&dbs->iov); |
125 | c3adb5b9 | Paolo Bonzini | if (dbs->bh) {
|
126 | c3adb5b9 | Paolo Bonzini | qemu_bh_delete(dbs->bh); |
127 | c3adb5b9 | Paolo Bonzini | dbs->bh = NULL;
|
128 | c3adb5b9 | Paolo Bonzini | } |
129 | c3adb5b9 | Paolo Bonzini | if (!dbs->in_cancel) {
|
130 | c3adb5b9 | Paolo Bonzini | /* Requests may complete while dma_aio_cancel is in progress. In
|
131 | c3adb5b9 | Paolo Bonzini | * this case, the AIOCB should not be released because it is still
|
132 | c3adb5b9 | Paolo Bonzini | * referenced by dma_aio_cancel. */
|
133 | c3adb5b9 | Paolo Bonzini | qemu_aio_release(dbs); |
134 | c3adb5b9 | Paolo Bonzini | } |
135 | 7403b14e | aliguori | } |
136 | 7403b14e | aliguori | |
137 | 856ae5c3 | blueswir1 | static void dma_bdrv_cb(void *opaque, int ret) |
138 | 7403b14e | aliguori | { |
139 | 7403b14e | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
140 | c65bcef3 | David Gibson | dma_addr_t cur_addr, cur_len; |
141 | 7403b14e | aliguori | void *mem;
|
142 | 7403b14e | aliguori | |
143 | c57c4658 | Kevin Wolf | trace_dma_bdrv_cb(dbs, ret); |
144 | c57c4658 | Kevin Wolf | |
145 | 7403b14e | aliguori | dbs->acb = NULL;
|
146 | 7403b14e | aliguori | dbs->sector_num += dbs->iov.size / 512;
|
147 | 7403b14e | aliguori | dma_bdrv_unmap(dbs); |
148 | 59a703eb | aliguori | |
149 | 59a703eb | aliguori | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
150 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, ret); |
151 | 59a703eb | aliguori | return;
|
152 | 59a703eb | aliguori | } |
153 | 59a703eb | aliguori | |
154 | 59a703eb | aliguori | while (dbs->sg_cur_index < dbs->sg->nsg) {
|
155 | 59a703eb | aliguori | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
156 | 59a703eb | aliguori | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
157 | c65bcef3 | David Gibson | mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
158 | 59a703eb | aliguori | if (!mem)
|
159 | 59a703eb | aliguori | break;
|
160 | 59a703eb | aliguori | qemu_iovec_add(&dbs->iov, mem, cur_len); |
161 | 59a703eb | aliguori | dbs->sg_cur_byte += cur_len; |
162 | 59a703eb | aliguori | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
163 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
164 | 59a703eb | aliguori | ++dbs->sg_cur_index; |
165 | 59a703eb | aliguori | } |
166 | 59a703eb | aliguori | } |
167 | 59a703eb | aliguori | |
168 | 59a703eb | aliguori | if (dbs->iov.size == 0) { |
169 | c57c4658 | Kevin Wolf | trace_dma_map_wait(dbs); |
170 | 59a703eb | aliguori | cpu_register_map_client(dbs, continue_after_map_failure); |
171 | 59a703eb | aliguori | return;
|
172 | 59a703eb | aliguori | } |
173 | 59a703eb | aliguori | |
174 | cb144ccb | Christoph Hellwig | dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
175 | cb144ccb | Christoph Hellwig | dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
176 | 6bee44ea | Paolo Bonzini | assert(dbs->acb); |
177 | 59a703eb | aliguori | } |
178 | 59a703eb | aliguori | |
179 | c16b5a2c | Christoph Hellwig | static void dma_aio_cancel(BlockDriverAIOCB *acb) |
180 | c16b5a2c | Christoph Hellwig | { |
181 | c16b5a2c | Christoph Hellwig | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
182 | c16b5a2c | Christoph Hellwig | |
183 | c57c4658 | Kevin Wolf | trace_dma_aio_cancel(dbs); |
184 | c57c4658 | Kevin Wolf | |
185 | c16b5a2c | Christoph Hellwig | if (dbs->acb) {
|
186 | c3adb5b9 | Paolo Bonzini | BlockDriverAIOCB *acb = dbs->acb; |
187 | c3adb5b9 | Paolo Bonzini | dbs->acb = NULL;
|
188 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = true;
|
189 | c3adb5b9 | Paolo Bonzini | bdrv_aio_cancel(acb); |
190 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = false;
|
191 | c16b5a2c | Christoph Hellwig | } |
192 | c3adb5b9 | Paolo Bonzini | dbs->common.cb = NULL;
|
193 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, 0);
|
194 | c16b5a2c | Christoph Hellwig | } |
195 | c16b5a2c | Christoph Hellwig | |
196 | c16b5a2c | Christoph Hellwig | static AIOPool dma_aio_pool = {
|
197 | c16b5a2c | Christoph Hellwig | .aiocb_size = sizeof(DMAAIOCB),
|
198 | c16b5a2c | Christoph Hellwig | .cancel = dma_aio_cancel, |
199 | c16b5a2c | Christoph Hellwig | }; |
200 | c16b5a2c | Christoph Hellwig | |
201 | cb144ccb | Christoph Hellwig | BlockDriverAIOCB *dma_bdrv_io( |
202 | 59a703eb | aliguori | BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
203 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
204 | 43cf8ae6 | David Gibson | void *opaque, DMADirection dir)
|
205 | 59a703eb | aliguori | { |
206 | cb144ccb | Christoph Hellwig | DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
207 | 59a703eb | aliguori | |
208 | 43cf8ae6 | David Gibson | trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
209 | c57c4658 | Kevin Wolf | |
210 | 37b7842c | aliguori | dbs->acb = NULL;
|
211 | 59a703eb | aliguori | dbs->bs = bs; |
212 | 59a703eb | aliguori | dbs->sg = sg; |
213 | 59a703eb | aliguori | dbs->sector_num = sector_num; |
214 | 59a703eb | aliguori | dbs->sg_cur_index = 0;
|
215 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
216 | 43cf8ae6 | David Gibson | dbs->dir = dir; |
217 | cb144ccb | Christoph Hellwig | dbs->io_func = io_func; |
218 | 59a703eb | aliguori | dbs->bh = NULL;
|
219 | 59a703eb | aliguori | qemu_iovec_init(&dbs->iov, sg->nsg); |
220 | 59a703eb | aliguori | dma_bdrv_cb(dbs, 0);
|
221 | 37b7842c | aliguori | return &dbs->common;
|
222 | 59a703eb | aliguori | } |
223 | 59a703eb | aliguori | |
224 | 59a703eb | aliguori | |
225 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
226 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
227 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
228 | 59a703eb | aliguori | { |
229 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
230 | 43cf8ae6 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
231 | 59a703eb | aliguori | } |
232 | 59a703eb | aliguori | |
233 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
234 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
235 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
236 | 59a703eb | aliguori | { |
237 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
238 | 43cf8ae6 | David Gibson | DMA_DIRECTION_TO_DEVICE); |
239 | 59a703eb | aliguori | } |
240 | 8171ee35 | Paolo Bonzini | |
241 | 8171ee35 | Paolo Bonzini | |
242 | c65bcef3 | David Gibson | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
243 | c65bcef3 | David Gibson | DMADirection dir) |
244 | 8171ee35 | Paolo Bonzini | { |
245 | 8171ee35 | Paolo Bonzini | uint64_t resid; |
246 | 8171ee35 | Paolo Bonzini | int sg_cur_index;
|
247 | 8171ee35 | Paolo Bonzini | |
248 | 8171ee35 | Paolo Bonzini | resid = sg->size; |
249 | 8171ee35 | Paolo Bonzini | sg_cur_index = 0;
|
250 | 8171ee35 | Paolo Bonzini | len = MIN(len, resid); |
251 | 8171ee35 | Paolo Bonzini | while (len > 0) { |
252 | 8171ee35 | Paolo Bonzini | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
253 | 8171ee35 | Paolo Bonzini | int32_t xfer = MIN(len, entry.len); |
254 | c65bcef3 | David Gibson | dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
255 | 8171ee35 | Paolo Bonzini | ptr += xfer; |
256 | 8171ee35 | Paolo Bonzini | len -= xfer; |
257 | 8171ee35 | Paolo Bonzini | resid -= xfer; |
258 | 8171ee35 | Paolo Bonzini | } |
259 | 8171ee35 | Paolo Bonzini | |
260 | 8171ee35 | Paolo Bonzini | return resid;
|
261 | 8171ee35 | Paolo Bonzini | } |
262 | 8171ee35 | Paolo Bonzini | |
263 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
264 | 8171ee35 | Paolo Bonzini | { |
265 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
266 | 8171ee35 | Paolo Bonzini | } |
267 | 8171ee35 | Paolo Bonzini | |
268 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
269 | 8171ee35 | Paolo Bonzini | { |
270 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
271 | 8171ee35 | Paolo Bonzini | } |
272 | 84a69356 | Paolo Bonzini | |
273 | 84a69356 | Paolo Bonzini | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
274 | 84a69356 | Paolo Bonzini | QEMUSGList *sg, enum BlockAcctType type)
|
275 | 84a69356 | Paolo Bonzini | { |
276 | 84a69356 | Paolo Bonzini | bdrv_acct_start(bs, cookie, sg->size, type); |
277 | 84a69356 | Paolo Bonzini | } |
278 | e5332e63 | David Gibson | |
279 | e5332e63 | David Gibson | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
|
280 | e5332e63 | David Gibson | DMADirection dir) |
281 | e5332e63 | David Gibson | { |
282 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
283 | e5332e63 | David Gibson | |
284 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
285 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
|
286 | e5332e63 | David Gibson | " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
287 | e5332e63 | David Gibson | #endif
|
288 | e5332e63 | David Gibson | |
289 | e5332e63 | David Gibson | while (len) {
|
290 | e5332e63 | David Gibson | if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { |
291 | e5332e63 | David Gibson | return false; |
292 | e5332e63 | David Gibson | } |
293 | e5332e63 | David Gibson | |
294 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
295 | e5332e63 | David Gibson | if (plen > len) {
|
296 | e5332e63 | David Gibson | plen = len; |
297 | e5332e63 | David Gibson | } |
298 | e5332e63 | David Gibson | |
299 | e5332e63 | David Gibson | len -= plen; |
300 | e5332e63 | David Gibson | addr += plen; |
301 | e5332e63 | David Gibson | } |
302 | e5332e63 | David Gibson | |
303 | e5332e63 | David Gibson | return true; |
304 | e5332e63 | David Gibson | } |
305 | e5332e63 | David Gibson | |
306 | e5332e63 | David Gibson | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
|
307 | e5332e63 | David Gibson | void *buf, dma_addr_t len, DMADirection dir)
|
308 | e5332e63 | David Gibson | { |
309 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
310 | e5332e63 | David Gibson | int err;
|
311 | e5332e63 | David Gibson | |
312 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
313 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" |
314 | e5332e63 | David Gibson | DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
315 | e5332e63 | David Gibson | #endif
|
316 | e5332e63 | David Gibson | |
317 | e5332e63 | David Gibson | while (len) {
|
318 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, dir); |
319 | e5332e63 | David Gibson | if (err) {
|
320 | e5332e63 | David Gibson | /*
|
321 | e5332e63 | David Gibson | * In case of failure on reads from the guest, we clean the
|
322 | e5332e63 | David Gibson | * destination buffer so that a device that doesn't test
|
323 | e5332e63 | David Gibson | * for errors will not expose qemu internal memory.
|
324 | e5332e63 | David Gibson | */
|
325 | e5332e63 | David Gibson | memset(buf, 0, len);
|
326 | e5332e63 | David Gibson | return -1; |
327 | e5332e63 | David Gibson | } |
328 | e5332e63 | David Gibson | |
329 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
330 | e5332e63 | David Gibson | if (plen > len) {
|
331 | e5332e63 | David Gibson | plen = len; |
332 | e5332e63 | David Gibson | } |
333 | e5332e63 | David Gibson | |
334 | e5332e63 | David Gibson | cpu_physical_memory_rw(paddr, buf, plen, |
335 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE); |
336 | e5332e63 | David Gibson | |
337 | e5332e63 | David Gibson | len -= plen; |
338 | e5332e63 | David Gibson | addr += plen; |
339 | e5332e63 | David Gibson | buf += plen; |
340 | e5332e63 | David Gibson | } |
341 | e5332e63 | David Gibson | |
342 | e5332e63 | David Gibson | return 0; |
343 | e5332e63 | David Gibson | } |
344 | e5332e63 | David Gibson | |
345 | e5332e63 | David Gibson | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
|
346 | e5332e63 | David Gibson | dma_addr_t len) |
347 | e5332e63 | David Gibson | { |
348 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
349 | e5332e63 | David Gibson | int err;
|
350 | e5332e63 | David Gibson | |
351 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
352 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
|
353 | e5332e63 | David Gibson | " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); |
354 | e5332e63 | David Gibson | #endif
|
355 | e5332e63 | David Gibson | |
356 | e5332e63 | David Gibson | while (len) {
|
357 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, |
358 | e5332e63 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
359 | e5332e63 | David Gibson | if (err) {
|
360 | e5332e63 | David Gibson | return err;
|
361 | e5332e63 | David Gibson | } |
362 | e5332e63 | David Gibson | |
363 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
364 | e5332e63 | David Gibson | if (plen > len) {
|
365 | e5332e63 | David Gibson | plen = len; |
366 | e5332e63 | David Gibson | } |
367 | e5332e63 | David Gibson | |
368 | e5332e63 | David Gibson | do_dma_memory_set(paddr, c, plen); |
369 | e5332e63 | David Gibson | |
370 | e5332e63 | David Gibson | len -= plen; |
371 | e5332e63 | David Gibson | addr += plen; |
372 | e5332e63 | David Gibson | } |
373 | e5332e63 | David Gibson | |
374 | e5332e63 | David Gibson | return 0; |
375 | e5332e63 | David Gibson | } |
376 | e5332e63 | David Gibson | |
377 | e5332e63 | David Gibson | void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
|
378 | e5332e63 | David Gibson | DMAMapFunc map, DMAUnmapFunc unmap) |
379 | e5332e63 | David Gibson | { |
380 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
381 | e5332e63 | David Gibson | fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
|
382 | e5332e63 | David Gibson | dma, translate, map, unmap); |
383 | e5332e63 | David Gibson | #endif
|
384 | e5332e63 | David Gibson | dma->translate = translate; |
385 | e5332e63 | David Gibson | dma->map = map; |
386 | e5332e63 | David Gibson | dma->unmap = unmap; |
387 | e5332e63 | David Gibson | } |
388 | e5332e63 | David Gibson | |
389 | e5332e63 | David Gibson | void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
|
390 | e5332e63 | David Gibson | DMADirection dir) |
391 | e5332e63 | David Gibson | { |
392 | e5332e63 | David Gibson | int err;
|
393 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
394 | e5332e63 | David Gibson | void *buf;
|
395 | e5332e63 | David Gibson | |
396 | e5332e63 | David Gibson | if (dma->map) {
|
397 | e5332e63 | David Gibson | return dma->map(dma, addr, len, dir);
|
398 | e5332e63 | David Gibson | } |
399 | e5332e63 | David Gibson | |
400 | e5332e63 | David Gibson | plen = *len; |
401 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, dir); |
402 | e5332e63 | David Gibson | if (err) {
|
403 | e5332e63 | David Gibson | return NULL; |
404 | e5332e63 | David Gibson | } |
405 | e5332e63 | David Gibson | |
406 | e5332e63 | David Gibson | /*
|
407 | e5332e63 | David Gibson | * If this is true, the virtual region is contiguous,
|
408 | e5332e63 | David Gibson | * but the translated physical region isn't. We just
|
409 | e5332e63 | David Gibson | * clamp *len, much like cpu_physical_memory_map() does.
|
410 | e5332e63 | David Gibson | */
|
411 | e5332e63 | David Gibson | if (plen < *len) {
|
412 | e5332e63 | David Gibson | *len = plen; |
413 | e5332e63 | David Gibson | } |
414 | e5332e63 | David Gibson | |
415 | e5332e63 | David Gibson | buf = cpu_physical_memory_map(paddr, &plen, |
416 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE); |
417 | e5332e63 | David Gibson | *len = plen; |
418 | e5332e63 | David Gibson | |
419 | e5332e63 | David Gibson | return buf;
|
420 | e5332e63 | David Gibson | } |
421 | e5332e63 | David Gibson | |
422 | e5332e63 | David Gibson | void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, |
423 | e5332e63 | David Gibson | DMADirection dir, dma_addr_t access_len) |
424 | e5332e63 | David Gibson | { |
425 | e5332e63 | David Gibson | if (dma->unmap) {
|
426 | e5332e63 | David Gibson | dma->unmap(dma, buffer, len, dir, access_len); |
427 | e5332e63 | David Gibson | return;
|
428 | e5332e63 | David Gibson | } |
429 | e5332e63 | David Gibson | |
430 | e5332e63 | David Gibson | cpu_physical_memory_unmap(buffer, len, |
431 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE, |
432 | e5332e63 | David Gibson | access_len); |
433 | e5332e63 | David Gibson | |
434 | e5332e63 | David Gibson | } |