root / dma-helpers.c @ 7a0bac4d
History | View | Annotate | Download (10.8 kB)
1 |
/*
|
---|---|
2 |
* DMA helper functions
|
3 |
*
|
4 |
* Copyright (c) 2009 Red Hat
|
5 |
*
|
6 |
* This work is licensed under the terms of the GNU General Public License
|
7 |
* (GNU GPL), version 2 or later.
|
8 |
*/
|
9 |
|
10 |
#include "dma.h" |
11 |
#include "trace.h" |
12 |
#include "range.h" |
13 |
#include "qemu-thread.h" |
14 |
|
15 |
/* #define DEBUG_IOMMU */
|
16 |
|
17 |
static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) |
18 |
{ |
19 |
#define FILLBUF_SIZE 512 |
20 |
uint8_t fillbuf[FILLBUF_SIZE]; |
21 |
int l;
|
22 |
|
23 |
memset(fillbuf, c, FILLBUF_SIZE); |
24 |
while (len > 0) { |
25 |
l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
26 |
cpu_physical_memory_rw(addr, fillbuf, l, true);
|
27 |
len -= len; |
28 |
addr += len; |
29 |
} |
30 |
} |
31 |
|
32 |
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
33 |
{ |
34 |
dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); |
35 |
|
36 |
if (dma_has_iommu(dma)) {
|
37 |
return iommu_dma_memory_set(dma, addr, c, len);
|
38 |
} |
39 |
do_dma_memory_set(addr, c, len); |
40 |
|
41 |
return 0; |
42 |
} |
43 |
|
44 |
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
45 |
{ |
46 |
qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
47 |
qsg->nsg = 0;
|
48 |
qsg->nalloc = alloc_hint; |
49 |
qsg->size = 0;
|
50 |
qsg->dma = dma; |
51 |
} |
52 |
|
53 |
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
54 |
{ |
55 |
if (qsg->nsg == qsg->nalloc) {
|
56 |
qsg->nalloc = 2 * qsg->nalloc + 1; |
57 |
qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
58 |
} |
59 |
qsg->sg[qsg->nsg].base = base; |
60 |
qsg->sg[qsg->nsg].len = len; |
61 |
qsg->size += len; |
62 |
++qsg->nsg; |
63 |
} |
64 |
|
65 |
void qemu_sglist_destroy(QEMUSGList *qsg)
|
66 |
{ |
67 |
g_free(qsg->sg); |
68 |
} |
69 |
|
70 |
typedef struct { |
71 |
BlockDriverAIOCB common; |
72 |
BlockDriverState *bs; |
73 |
BlockDriverAIOCB *acb; |
74 |
QEMUSGList *sg; |
75 |
uint64_t sector_num; |
76 |
DMADirection dir; |
77 |
bool in_cancel;
|
78 |
int sg_cur_index;
|
79 |
dma_addr_t sg_cur_byte; |
80 |
QEMUIOVector iov; |
81 |
QEMUBH *bh; |
82 |
DMAIOFunc *io_func; |
83 |
} DMAAIOCB; |
84 |
|
85 |
static void dma_bdrv_cb(void *opaque, int ret); |
86 |
|
87 |
static void reschedule_dma(void *opaque) |
88 |
{ |
89 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
90 |
|
91 |
qemu_bh_delete(dbs->bh); |
92 |
dbs->bh = NULL;
|
93 |
dma_bdrv_cb(dbs, 0);
|
94 |
} |
95 |
|
96 |
static void continue_after_map_failure(void *opaque) |
97 |
{ |
98 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
99 |
|
100 |
dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
101 |
qemu_bh_schedule(dbs->bh); |
102 |
} |
103 |
|
104 |
static void dma_bdrv_unmap(DMAAIOCB *dbs) |
105 |
{ |
106 |
int i;
|
107 |
|
108 |
for (i = 0; i < dbs->iov.niov; ++i) { |
109 |
dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
110 |
dbs->iov.iov[i].iov_len, dbs->dir, |
111 |
dbs->iov.iov[i].iov_len); |
112 |
} |
113 |
qemu_iovec_reset(&dbs->iov); |
114 |
} |
115 |
|
116 |
static void dma_complete(DMAAIOCB *dbs, int ret) |
117 |
{ |
118 |
trace_dma_complete(dbs, ret, dbs->common.cb); |
119 |
|
120 |
dma_bdrv_unmap(dbs); |
121 |
if (dbs->common.cb) {
|
122 |
dbs->common.cb(dbs->common.opaque, ret); |
123 |
} |
124 |
qemu_iovec_destroy(&dbs->iov); |
125 |
if (dbs->bh) {
|
126 |
qemu_bh_delete(dbs->bh); |
127 |
dbs->bh = NULL;
|
128 |
} |
129 |
if (!dbs->in_cancel) {
|
130 |
/* Requests may complete while dma_aio_cancel is in progress. In
|
131 |
* this case, the AIOCB should not be released because it is still
|
132 |
* referenced by dma_aio_cancel. */
|
133 |
qemu_aio_release(dbs); |
134 |
} |
135 |
} |
136 |
|
137 |
static void dma_bdrv_cb(void *opaque, int ret) |
138 |
{ |
139 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
140 |
dma_addr_t cur_addr, cur_len; |
141 |
void *mem;
|
142 |
|
143 |
trace_dma_bdrv_cb(dbs, ret); |
144 |
|
145 |
dbs->acb = NULL;
|
146 |
dbs->sector_num += dbs->iov.size / 512;
|
147 |
dma_bdrv_unmap(dbs); |
148 |
|
149 |
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
150 |
dma_complete(dbs, ret); |
151 |
return;
|
152 |
} |
153 |
|
154 |
while (dbs->sg_cur_index < dbs->sg->nsg) {
|
155 |
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
156 |
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
157 |
mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
158 |
if (!mem)
|
159 |
break;
|
160 |
qemu_iovec_add(&dbs->iov, mem, cur_len); |
161 |
dbs->sg_cur_byte += cur_len; |
162 |
if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
163 |
dbs->sg_cur_byte = 0;
|
164 |
++dbs->sg_cur_index; |
165 |
} |
166 |
} |
167 |
|
168 |
if (dbs->iov.size == 0) { |
169 |
trace_dma_map_wait(dbs); |
170 |
cpu_register_map_client(dbs, continue_after_map_failure); |
171 |
return;
|
172 |
} |
173 |
|
174 |
dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
175 |
dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
176 |
assert(dbs->acb); |
177 |
} |
178 |
|
179 |
static void dma_aio_cancel(BlockDriverAIOCB *acb) |
180 |
{ |
181 |
DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
182 |
|
183 |
trace_dma_aio_cancel(dbs); |
184 |
|
185 |
if (dbs->acb) {
|
186 |
BlockDriverAIOCB *acb = dbs->acb; |
187 |
dbs->acb = NULL;
|
188 |
dbs->in_cancel = true;
|
189 |
bdrv_aio_cancel(acb); |
190 |
dbs->in_cancel = false;
|
191 |
} |
192 |
dbs->common.cb = NULL;
|
193 |
dma_complete(dbs, 0);
|
194 |
} |
195 |
|
196 |
static AIOPool dma_aio_pool = {
|
197 |
.aiocb_size = sizeof(DMAAIOCB),
|
198 |
.cancel = dma_aio_cancel, |
199 |
}; |
200 |
|
201 |
BlockDriverAIOCB *dma_bdrv_io( |
202 |
BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
203 |
DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
204 |
void *opaque, DMADirection dir)
|
205 |
{ |
206 |
DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
207 |
|
208 |
trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
209 |
|
210 |
dbs->acb = NULL;
|
211 |
dbs->bs = bs; |
212 |
dbs->sg = sg; |
213 |
dbs->sector_num = sector_num; |
214 |
dbs->sg_cur_index = 0;
|
215 |
dbs->sg_cur_byte = 0;
|
216 |
dbs->dir = dir; |
217 |
dbs->io_func = io_func; |
218 |
dbs->bh = NULL;
|
219 |
qemu_iovec_init(&dbs->iov, sg->nsg); |
220 |
dma_bdrv_cb(dbs, 0);
|
221 |
return &dbs->common;
|
222 |
} |
223 |
|
224 |
|
225 |
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
226 |
QEMUSGList *sg, uint64_t sector, |
227 |
void (*cb)(void *opaque, int ret), void *opaque) |
228 |
{ |
229 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
230 |
DMA_DIRECTION_FROM_DEVICE); |
231 |
} |
232 |
|
233 |
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
234 |
QEMUSGList *sg, uint64_t sector, |
235 |
void (*cb)(void *opaque, int ret), void *opaque) |
236 |
{ |
237 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
238 |
DMA_DIRECTION_TO_DEVICE); |
239 |
} |
240 |
|
241 |
|
242 |
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
243 |
DMADirection dir) |
244 |
{ |
245 |
uint64_t resid; |
246 |
int sg_cur_index;
|
247 |
|
248 |
resid = sg->size; |
249 |
sg_cur_index = 0;
|
250 |
len = MIN(len, resid); |
251 |
while (len > 0) { |
252 |
ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
253 |
int32_t xfer = MIN(len, entry.len); |
254 |
dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
255 |
ptr += xfer; |
256 |
len -= xfer; |
257 |
resid -= xfer; |
258 |
} |
259 |
|
260 |
return resid;
|
261 |
} |
262 |
|
263 |
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
264 |
{ |
265 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
266 |
} |
267 |
|
268 |
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
269 |
{ |
270 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
271 |
} |
272 |
|
273 |
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
274 |
QEMUSGList *sg, enum BlockAcctType type)
|
275 |
{ |
276 |
bdrv_acct_start(bs, cookie, sg->size, type); |
277 |
} |
278 |
|
279 |
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
|
280 |
DMADirection dir) |
281 |
{ |
282 |
target_phys_addr_t paddr, plen; |
283 |
|
284 |
#ifdef DEBUG_IOMMU
|
285 |
fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
|
286 |
" len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
287 |
#endif
|
288 |
|
289 |
while (len) {
|
290 |
if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { |
291 |
return false; |
292 |
} |
293 |
|
294 |
/* The translation might be valid for larger regions. */
|
295 |
if (plen > len) {
|
296 |
plen = len; |
297 |
} |
298 |
|
299 |
len -= plen; |
300 |
addr += plen; |
301 |
} |
302 |
|
303 |
return true; |
304 |
} |
305 |
|
306 |
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
|
307 |
void *buf, dma_addr_t len, DMADirection dir)
|
308 |
{ |
309 |
target_phys_addr_t paddr, plen; |
310 |
int err;
|
311 |
|
312 |
#ifdef DEBUG_IOMMU
|
313 |
fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" |
314 |
DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
315 |
#endif
|
316 |
|
317 |
while (len) {
|
318 |
err = dma->translate(dma, addr, &paddr, &plen, dir); |
319 |
if (err) {
|
320 |
/*
|
321 |
* In case of failure on reads from the guest, we clean the
|
322 |
* destination buffer so that a device that doesn't test
|
323 |
* for errors will not expose qemu internal memory.
|
324 |
*/
|
325 |
memset(buf, 0, len);
|
326 |
return -1; |
327 |
} |
328 |
|
329 |
/* The translation might be valid for larger regions. */
|
330 |
if (plen > len) {
|
331 |
plen = len; |
332 |
} |
333 |
|
334 |
cpu_physical_memory_rw(paddr, buf, plen, |
335 |
dir == DMA_DIRECTION_FROM_DEVICE); |
336 |
|
337 |
len -= plen; |
338 |
addr += plen; |
339 |
buf += plen; |
340 |
} |
341 |
|
342 |
return 0; |
343 |
} |
344 |
|
345 |
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
|
346 |
dma_addr_t len) |
347 |
{ |
348 |
target_phys_addr_t paddr, plen; |
349 |
int err;
|
350 |
|
351 |
#ifdef DEBUG_IOMMU
|
352 |
fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
|
353 |
" len=0x" DMA_ADDR_FMT "\n", dma, addr, len); |
354 |
#endif
|
355 |
|
356 |
while (len) {
|
357 |
err = dma->translate(dma, addr, &paddr, &plen, |
358 |
DMA_DIRECTION_FROM_DEVICE); |
359 |
if (err) {
|
360 |
return err;
|
361 |
} |
362 |
|
363 |
/* The translation might be valid for larger regions. */
|
364 |
if (plen > len) {
|
365 |
plen = len; |
366 |
} |
367 |
|
368 |
do_dma_memory_set(paddr, c, plen); |
369 |
|
370 |
len -= plen; |
371 |
addr += plen; |
372 |
} |
373 |
|
374 |
return 0; |
375 |
} |
376 |
|
377 |
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
|
378 |
DMAMapFunc map, DMAUnmapFunc unmap) |
379 |
{ |
380 |
#ifdef DEBUG_IOMMU
|
381 |
fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
|
382 |
dma, translate, map, unmap); |
383 |
#endif
|
384 |
dma->translate = translate; |
385 |
dma->map = map; |
386 |
dma->unmap = unmap; |
387 |
} |
388 |
|
389 |
void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
|
390 |
DMADirection dir) |
391 |
{ |
392 |
int err;
|
393 |
target_phys_addr_t paddr, plen; |
394 |
void *buf;
|
395 |
|
396 |
if (dma->map) {
|
397 |
return dma->map(dma, addr, len, dir);
|
398 |
} |
399 |
|
400 |
plen = *len; |
401 |
err = dma->translate(dma, addr, &paddr, &plen, dir); |
402 |
if (err) {
|
403 |
return NULL; |
404 |
} |
405 |
|
406 |
/*
|
407 |
* If this is true, the virtual region is contiguous,
|
408 |
* but the translated physical region isn't. We just
|
409 |
* clamp *len, much like cpu_physical_memory_map() does.
|
410 |
*/
|
411 |
if (plen < *len) {
|
412 |
*len = plen; |
413 |
} |
414 |
|
415 |
buf = cpu_physical_memory_map(paddr, &plen, |
416 |
dir == DMA_DIRECTION_FROM_DEVICE); |
417 |
*len = plen; |
418 |
|
419 |
return buf;
|
420 |
} |
421 |
|
422 |
void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, |
423 |
DMADirection dir, dma_addr_t access_len) |
424 |
{ |
425 |
if (dma->unmap) {
|
426 |
dma->unmap(dma, buffer, len, dir, access_len); |
427 |
return;
|
428 |
} |
429 |
|
430 |
cpu_physical_memory_unmap(buffer, len, |
431 |
dir == DMA_DIRECTION_FROM_DEVICE, |
432 |
access_len); |
433 |
|
434 |
} |