root / dma-helpers.c @ bc9b78de
History | View | Annotate | Download (10.9 kB)
1 |
/*
|
---|---|
2 |
* DMA helper functions
|
3 |
*
|
4 |
* Copyright (c) 2009 Red Hat
|
5 |
*
|
6 |
* This work is licensed under the terms of the GNU General Public License
|
7 |
* (GNU GPL), version 2 or later.
|
8 |
*/
|
9 |
|
10 |
#include "dma.h" |
11 |
#include "trace.h" |
12 |
#include "range.h" |
13 |
#include "qemu-thread.h" |
14 |
|
15 |
/* #define DEBUG_IOMMU */
|
16 |
|
17 |
static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) |
18 |
{ |
19 |
#define FILLBUF_SIZE 512 |
20 |
uint8_t fillbuf[FILLBUF_SIZE]; |
21 |
int l;
|
22 |
|
23 |
memset(fillbuf, c, FILLBUF_SIZE); |
24 |
while (len > 0) { |
25 |
l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
26 |
cpu_physical_memory_rw(addr, fillbuf, l, true);
|
27 |
len -= l; |
28 |
addr += l; |
29 |
} |
30 |
} |
31 |
|
32 |
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
33 |
{ |
34 |
dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); |
35 |
|
36 |
if (dma_has_iommu(dma)) {
|
37 |
return iommu_dma_memory_set(dma, addr, c, len);
|
38 |
} |
39 |
do_dma_memory_set(addr, c, len); |
40 |
|
41 |
return 0; |
42 |
} |
43 |
|
44 |
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
45 |
{ |
46 |
qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
47 |
qsg->nsg = 0;
|
48 |
qsg->nalloc = alloc_hint; |
49 |
qsg->size = 0;
|
50 |
qsg->dma = dma; |
51 |
} |
52 |
|
53 |
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
54 |
{ |
55 |
if (qsg->nsg == qsg->nalloc) {
|
56 |
qsg->nalloc = 2 * qsg->nalloc + 1; |
57 |
qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
58 |
} |
59 |
qsg->sg[qsg->nsg].base = base; |
60 |
qsg->sg[qsg->nsg].len = len; |
61 |
qsg->size += len; |
62 |
++qsg->nsg; |
63 |
} |
64 |
|
65 |
void qemu_sglist_destroy(QEMUSGList *qsg)
|
66 |
{ |
67 |
g_free(qsg->sg); |
68 |
memset(qsg, 0, sizeof(*qsg)); |
69 |
} |
70 |
|
71 |
typedef struct { |
72 |
BlockDriverAIOCB common; |
73 |
BlockDriverState *bs; |
74 |
BlockDriverAIOCB *acb; |
75 |
QEMUSGList *sg; |
76 |
uint64_t sector_num; |
77 |
DMADirection dir; |
78 |
bool in_cancel;
|
79 |
int sg_cur_index;
|
80 |
dma_addr_t sg_cur_byte; |
81 |
QEMUIOVector iov; |
82 |
QEMUBH *bh; |
83 |
DMAIOFunc *io_func; |
84 |
} DMAAIOCB; |
85 |
|
86 |
static void dma_bdrv_cb(void *opaque, int ret); |
87 |
|
88 |
static void reschedule_dma(void *opaque) |
89 |
{ |
90 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
91 |
|
92 |
qemu_bh_delete(dbs->bh); |
93 |
dbs->bh = NULL;
|
94 |
dma_bdrv_cb(dbs, 0);
|
95 |
} |
96 |
|
97 |
static void continue_after_map_failure(void *opaque) |
98 |
{ |
99 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
100 |
|
101 |
dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
102 |
qemu_bh_schedule(dbs->bh); |
103 |
} |
104 |
|
105 |
static void dma_bdrv_unmap(DMAAIOCB *dbs) |
106 |
{ |
107 |
int i;
|
108 |
|
109 |
for (i = 0; i < dbs->iov.niov; ++i) { |
110 |
dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
111 |
dbs->iov.iov[i].iov_len, dbs->dir, |
112 |
dbs->iov.iov[i].iov_len); |
113 |
} |
114 |
qemu_iovec_reset(&dbs->iov); |
115 |
} |
116 |
|
117 |
static void dma_complete(DMAAIOCB *dbs, int ret) |
118 |
{ |
119 |
trace_dma_complete(dbs, ret, dbs->common.cb); |
120 |
|
121 |
dma_bdrv_unmap(dbs); |
122 |
if (dbs->common.cb) {
|
123 |
dbs->common.cb(dbs->common.opaque, ret); |
124 |
} |
125 |
qemu_iovec_destroy(&dbs->iov); |
126 |
if (dbs->bh) {
|
127 |
qemu_bh_delete(dbs->bh); |
128 |
dbs->bh = NULL;
|
129 |
} |
130 |
if (!dbs->in_cancel) {
|
131 |
/* Requests may complete while dma_aio_cancel is in progress. In
|
132 |
* this case, the AIOCB should not be released because it is still
|
133 |
* referenced by dma_aio_cancel. */
|
134 |
qemu_aio_release(dbs); |
135 |
} |
136 |
} |
137 |
|
138 |
static void dma_bdrv_cb(void *opaque, int ret) |
139 |
{ |
140 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
141 |
dma_addr_t cur_addr, cur_len; |
142 |
void *mem;
|
143 |
|
144 |
trace_dma_bdrv_cb(dbs, ret); |
145 |
|
146 |
dbs->acb = NULL;
|
147 |
dbs->sector_num += dbs->iov.size / 512;
|
148 |
dma_bdrv_unmap(dbs); |
149 |
|
150 |
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
151 |
dma_complete(dbs, ret); |
152 |
return;
|
153 |
} |
154 |
|
155 |
while (dbs->sg_cur_index < dbs->sg->nsg) {
|
156 |
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
157 |
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
158 |
mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
159 |
if (!mem)
|
160 |
break;
|
161 |
qemu_iovec_add(&dbs->iov, mem, cur_len); |
162 |
dbs->sg_cur_byte += cur_len; |
163 |
if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
164 |
dbs->sg_cur_byte = 0;
|
165 |
++dbs->sg_cur_index; |
166 |
} |
167 |
} |
168 |
|
169 |
if (dbs->iov.size == 0) { |
170 |
trace_dma_map_wait(dbs); |
171 |
cpu_register_map_client(dbs, continue_after_map_failure); |
172 |
return;
|
173 |
} |
174 |
|
175 |
dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
176 |
dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
177 |
assert(dbs->acb); |
178 |
} |
179 |
|
180 |
static void dma_aio_cancel(BlockDriverAIOCB *acb) |
181 |
{ |
182 |
DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
183 |
|
184 |
trace_dma_aio_cancel(dbs); |
185 |
|
186 |
if (dbs->acb) {
|
187 |
BlockDriverAIOCB *acb = dbs->acb; |
188 |
dbs->acb = NULL;
|
189 |
dbs->in_cancel = true;
|
190 |
bdrv_aio_cancel(acb); |
191 |
dbs->in_cancel = false;
|
192 |
} |
193 |
dbs->common.cb = NULL;
|
194 |
dma_complete(dbs, 0);
|
195 |
} |
196 |
|
197 |
static AIOPool dma_aio_pool = {
|
198 |
.aiocb_size = sizeof(DMAAIOCB),
|
199 |
.cancel = dma_aio_cancel, |
200 |
}; |
201 |
|
202 |
BlockDriverAIOCB *dma_bdrv_io( |
203 |
BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
204 |
DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
205 |
void *opaque, DMADirection dir)
|
206 |
{ |
207 |
DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
208 |
|
209 |
trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
210 |
|
211 |
dbs->acb = NULL;
|
212 |
dbs->bs = bs; |
213 |
dbs->sg = sg; |
214 |
dbs->sector_num = sector_num; |
215 |
dbs->sg_cur_index = 0;
|
216 |
dbs->sg_cur_byte = 0;
|
217 |
dbs->dir = dir; |
218 |
dbs->io_func = io_func; |
219 |
dbs->bh = NULL;
|
220 |
qemu_iovec_init(&dbs->iov, sg->nsg); |
221 |
dma_bdrv_cb(dbs, 0);
|
222 |
return &dbs->common;
|
223 |
} |
224 |
|
225 |
|
226 |
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
227 |
QEMUSGList *sg, uint64_t sector, |
228 |
void (*cb)(void *opaque, int ret), void *opaque) |
229 |
{ |
230 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
231 |
DMA_DIRECTION_FROM_DEVICE); |
232 |
} |
233 |
|
234 |
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
235 |
QEMUSGList *sg, uint64_t sector, |
236 |
void (*cb)(void *opaque, int ret), void *opaque) |
237 |
{ |
238 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
239 |
DMA_DIRECTION_TO_DEVICE); |
240 |
} |
241 |
|
242 |
|
243 |
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
244 |
DMADirection dir) |
245 |
{ |
246 |
uint64_t resid; |
247 |
int sg_cur_index;
|
248 |
|
249 |
resid = sg->size; |
250 |
sg_cur_index = 0;
|
251 |
len = MIN(len, resid); |
252 |
while (len > 0) { |
253 |
ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
254 |
int32_t xfer = MIN(len, entry.len); |
255 |
dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
256 |
ptr += xfer; |
257 |
len -= xfer; |
258 |
resid -= xfer; |
259 |
} |
260 |
|
261 |
return resid;
|
262 |
} |
263 |
|
264 |
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
265 |
{ |
266 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
267 |
} |
268 |
|
269 |
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
270 |
{ |
271 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
272 |
} |
273 |
|
274 |
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
275 |
QEMUSGList *sg, enum BlockAcctType type)
|
276 |
{ |
277 |
bdrv_acct_start(bs, cookie, sg->size, type); |
278 |
} |
279 |
|
280 |
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
|
281 |
DMADirection dir) |
282 |
{ |
283 |
target_phys_addr_t paddr, plen; |
284 |
|
285 |
#ifdef DEBUG_IOMMU
|
286 |
fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
|
287 |
" len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
288 |
#endif
|
289 |
|
290 |
while (len) {
|
291 |
if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { |
292 |
return false; |
293 |
} |
294 |
|
295 |
/* The translation might be valid for larger regions. */
|
296 |
if (plen > len) {
|
297 |
plen = len; |
298 |
} |
299 |
|
300 |
len -= plen; |
301 |
addr += plen; |
302 |
} |
303 |
|
304 |
return true; |
305 |
} |
306 |
|
307 |
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
|
308 |
void *buf, dma_addr_t len, DMADirection dir)
|
309 |
{ |
310 |
target_phys_addr_t paddr, plen; |
311 |
int err;
|
312 |
|
313 |
#ifdef DEBUG_IOMMU
|
314 |
fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" |
315 |
DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
316 |
#endif
|
317 |
|
318 |
while (len) {
|
319 |
err = dma->translate(dma, addr, &paddr, &plen, dir); |
320 |
if (err) {
|
321 |
/*
|
322 |
* In case of failure on reads from the guest, we clean the
|
323 |
* destination buffer so that a device that doesn't test
|
324 |
* for errors will not expose qemu internal memory.
|
325 |
*/
|
326 |
memset(buf, 0, len);
|
327 |
return -1; |
328 |
} |
329 |
|
330 |
/* The translation might be valid for larger regions. */
|
331 |
if (plen > len) {
|
332 |
plen = len; |
333 |
} |
334 |
|
335 |
cpu_physical_memory_rw(paddr, buf, plen, |
336 |
dir == DMA_DIRECTION_FROM_DEVICE); |
337 |
|
338 |
len -= plen; |
339 |
addr += plen; |
340 |
buf += plen; |
341 |
} |
342 |
|
343 |
return 0; |
344 |
} |
345 |
|
346 |
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
|
347 |
dma_addr_t len) |
348 |
{ |
349 |
target_phys_addr_t paddr, plen; |
350 |
int err;
|
351 |
|
352 |
#ifdef DEBUG_IOMMU
|
353 |
fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
|
354 |
" len=0x" DMA_ADDR_FMT "\n", dma, addr, len); |
355 |
#endif
|
356 |
|
357 |
while (len) {
|
358 |
err = dma->translate(dma, addr, &paddr, &plen, |
359 |
DMA_DIRECTION_FROM_DEVICE); |
360 |
if (err) {
|
361 |
return err;
|
362 |
} |
363 |
|
364 |
/* The translation might be valid for larger regions. */
|
365 |
if (plen > len) {
|
366 |
plen = len; |
367 |
} |
368 |
|
369 |
do_dma_memory_set(paddr, c, plen); |
370 |
|
371 |
len -= plen; |
372 |
addr += plen; |
373 |
} |
374 |
|
375 |
return 0; |
376 |
} |
377 |
|
378 |
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
|
379 |
DMAMapFunc map, DMAUnmapFunc unmap) |
380 |
{ |
381 |
#ifdef DEBUG_IOMMU
|
382 |
fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
|
383 |
dma, translate, map, unmap); |
384 |
#endif
|
385 |
dma->translate = translate; |
386 |
dma->map = map; |
387 |
dma->unmap = unmap; |
388 |
} |
389 |
|
390 |
void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
|
391 |
DMADirection dir) |
392 |
{ |
393 |
int err;
|
394 |
target_phys_addr_t paddr, plen; |
395 |
void *buf;
|
396 |
|
397 |
if (dma->map) {
|
398 |
return dma->map(dma, addr, len, dir);
|
399 |
} |
400 |
|
401 |
plen = *len; |
402 |
err = dma->translate(dma, addr, &paddr, &plen, dir); |
403 |
if (err) {
|
404 |
return NULL; |
405 |
} |
406 |
|
407 |
/*
|
408 |
* If this is true, the virtual region is contiguous,
|
409 |
* but the translated physical region isn't. We just
|
410 |
* clamp *len, much like cpu_physical_memory_map() does.
|
411 |
*/
|
412 |
if (plen < *len) {
|
413 |
*len = plen; |
414 |
} |
415 |
|
416 |
buf = cpu_physical_memory_map(paddr, &plen, |
417 |
dir == DMA_DIRECTION_FROM_DEVICE); |
418 |
*len = plen; |
419 |
|
420 |
return buf;
|
421 |
} |
422 |
|
423 |
void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, |
424 |
DMADirection dir, dma_addr_t access_len) |
425 |
{ |
426 |
if (dma->unmap) {
|
427 |
dma->unmap(dma, buffer, len, dir, access_len); |
428 |
return;
|
429 |
} |
430 |
|
431 |
cpu_physical_memory_unmap(buffer, len, |
432 |
dir == DMA_DIRECTION_FROM_DEVICE, |
433 |
access_len); |
434 |
|
435 |
} |