root / dma-helpers.c @ ea8d82a1
History | View | Annotate | Download (10.9 kB)
1 | 244ab90e | aliguori | /*
|
---|---|---|---|
2 | 244ab90e | aliguori | * DMA helper functions
|
3 | 244ab90e | aliguori | *
|
4 | 244ab90e | aliguori | * Copyright (c) 2009 Red Hat
|
5 | 244ab90e | aliguori | *
|
6 | 244ab90e | aliguori | * This work is licensed under the terms of the GNU General Public License
|
7 | 244ab90e | aliguori | * (GNU GPL), version 2 or later.
|
8 | 244ab90e | aliguori | */
|
9 | 244ab90e | aliguori | |
10 | 244ab90e | aliguori | #include "dma.h" |
11 | c57c4658 | Kevin Wolf | #include "trace.h" |
12 | e5332e63 | David Gibson | #include "range.h" |
13 | e5332e63 | David Gibson | #include "qemu-thread.h" |
14 | 244ab90e | aliguori | |
15 | e5332e63 | David Gibson | /* #define DEBUG_IOMMU */
|
16 | e5332e63 | David Gibson | |
17 | e5332e63 | David Gibson | static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) |
18 | d86a77f8 | David Gibson | { |
19 | d86a77f8 | David Gibson | #define FILLBUF_SIZE 512 |
20 | d86a77f8 | David Gibson | uint8_t fillbuf[FILLBUF_SIZE]; |
21 | d86a77f8 | David Gibson | int l;
|
22 | d86a77f8 | David Gibson | |
23 | d86a77f8 | David Gibson | memset(fillbuf, c, FILLBUF_SIZE); |
24 | d86a77f8 | David Gibson | while (len > 0) { |
25 | d86a77f8 | David Gibson | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
26 | d86a77f8 | David Gibson | cpu_physical_memory_rw(addr, fillbuf, l, true);
|
27 | d86a77f8 | David Gibson | len -= len; |
28 | d86a77f8 | David Gibson | addr += len; |
29 | d86a77f8 | David Gibson | } |
30 | e5332e63 | David Gibson | } |
31 | e5332e63 | David Gibson | |
32 | e5332e63 | David Gibson | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
33 | e5332e63 | David Gibson | { |
34 | 7a0bac4d | Benjamin Herrenschmidt | dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); |
35 | 7a0bac4d | Benjamin Herrenschmidt | |
36 | e5332e63 | David Gibson | if (dma_has_iommu(dma)) {
|
37 | e5332e63 | David Gibson | return iommu_dma_memory_set(dma, addr, c, len);
|
38 | e5332e63 | David Gibson | } |
39 | e5332e63 | David Gibson | do_dma_memory_set(addr, c, len); |
40 | e5332e63 | David Gibson | |
41 | d86a77f8 | David Gibson | return 0; |
42 | d86a77f8 | David Gibson | } |
43 | d86a77f8 | David Gibson | |
44 | c65bcef3 | David Gibson | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
45 | 244ab90e | aliguori | { |
46 | 7267c094 | Anthony Liguori | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
47 | 244ab90e | aliguori | qsg->nsg = 0;
|
48 | 244ab90e | aliguori | qsg->nalloc = alloc_hint; |
49 | 244ab90e | aliguori | qsg->size = 0;
|
50 | c65bcef3 | David Gibson | qsg->dma = dma; |
51 | 244ab90e | aliguori | } |
52 | 244ab90e | aliguori | |
53 | d3231181 | David Gibson | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
54 | 244ab90e | aliguori | { |
55 | 244ab90e | aliguori | if (qsg->nsg == qsg->nalloc) {
|
56 | 244ab90e | aliguori | qsg->nalloc = 2 * qsg->nalloc + 1; |
57 | 7267c094 | Anthony Liguori | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
58 | 244ab90e | aliguori | } |
59 | 244ab90e | aliguori | qsg->sg[qsg->nsg].base = base; |
60 | 244ab90e | aliguori | qsg->sg[qsg->nsg].len = len; |
61 | 244ab90e | aliguori | qsg->size += len; |
62 | 244ab90e | aliguori | ++qsg->nsg; |
63 | 244ab90e | aliguori | } |
64 | 244ab90e | aliguori | |
65 | 244ab90e | aliguori | void qemu_sglist_destroy(QEMUSGList *qsg)
|
66 | 244ab90e | aliguori | { |
67 | 7267c094 | Anthony Liguori | g_free(qsg->sg); |
68 | ea8d82a1 | Jason Baron | memset(qsg, 0, sizeof(*qsg)); |
69 | 244ab90e | aliguori | } |
70 | 244ab90e | aliguori | |
71 | 59a703eb | aliguori | typedef struct { |
72 | 37b7842c | aliguori | BlockDriverAIOCB common; |
73 | 59a703eb | aliguori | BlockDriverState *bs; |
74 | 59a703eb | aliguori | BlockDriverAIOCB *acb; |
75 | 59a703eb | aliguori | QEMUSGList *sg; |
76 | 59a703eb | aliguori | uint64_t sector_num; |
77 | 43cf8ae6 | David Gibson | DMADirection dir; |
78 | c3adb5b9 | Paolo Bonzini | bool in_cancel;
|
79 | 59a703eb | aliguori | int sg_cur_index;
|
80 | d3231181 | David Gibson | dma_addr_t sg_cur_byte; |
81 | 59a703eb | aliguori | QEMUIOVector iov; |
82 | 59a703eb | aliguori | QEMUBH *bh; |
83 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func; |
84 | 37b7842c | aliguori | } DMAAIOCB; |
85 | 59a703eb | aliguori | |
86 | 59a703eb | aliguori | static void dma_bdrv_cb(void *opaque, int ret); |
87 | 59a703eb | aliguori | |
88 | 59a703eb | aliguori | static void reschedule_dma(void *opaque) |
89 | 59a703eb | aliguori | { |
90 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
91 | 59a703eb | aliguori | |
92 | 59a703eb | aliguori | qemu_bh_delete(dbs->bh); |
93 | 59a703eb | aliguori | dbs->bh = NULL;
|
94 | c3adb5b9 | Paolo Bonzini | dma_bdrv_cb(dbs, 0);
|
95 | 59a703eb | aliguori | } |
96 | 59a703eb | aliguori | |
97 | 59a703eb | aliguori | static void continue_after_map_failure(void *opaque) |
98 | 59a703eb | aliguori | { |
99 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
100 | 59a703eb | aliguori | |
101 | 59a703eb | aliguori | dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
102 | 59a703eb | aliguori | qemu_bh_schedule(dbs->bh); |
103 | 59a703eb | aliguori | } |
104 | 59a703eb | aliguori | |
105 | 7403b14e | aliguori | static void dma_bdrv_unmap(DMAAIOCB *dbs) |
106 | 59a703eb | aliguori | { |
107 | 59a703eb | aliguori | int i;
|
108 | 59a703eb | aliguori | |
109 | 59a703eb | aliguori | for (i = 0; i < dbs->iov.niov; ++i) { |
110 | c65bcef3 | David Gibson | dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
111 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len, dbs->dir, |
112 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len); |
113 | 59a703eb | aliguori | } |
114 | c3adb5b9 | Paolo Bonzini | qemu_iovec_reset(&dbs->iov); |
115 | c3adb5b9 | Paolo Bonzini | } |
116 | c3adb5b9 | Paolo Bonzini | |
117 | c3adb5b9 | Paolo Bonzini | static void dma_complete(DMAAIOCB *dbs, int ret) |
118 | c3adb5b9 | Paolo Bonzini | { |
119 | c57c4658 | Kevin Wolf | trace_dma_complete(dbs, ret, dbs->common.cb); |
120 | c57c4658 | Kevin Wolf | |
121 | c3adb5b9 | Paolo Bonzini | dma_bdrv_unmap(dbs); |
122 | c3adb5b9 | Paolo Bonzini | if (dbs->common.cb) {
|
123 | c3adb5b9 | Paolo Bonzini | dbs->common.cb(dbs->common.opaque, ret); |
124 | c3adb5b9 | Paolo Bonzini | } |
125 | c3adb5b9 | Paolo Bonzini | qemu_iovec_destroy(&dbs->iov); |
126 | c3adb5b9 | Paolo Bonzini | if (dbs->bh) {
|
127 | c3adb5b9 | Paolo Bonzini | qemu_bh_delete(dbs->bh); |
128 | c3adb5b9 | Paolo Bonzini | dbs->bh = NULL;
|
129 | c3adb5b9 | Paolo Bonzini | } |
130 | c3adb5b9 | Paolo Bonzini | if (!dbs->in_cancel) {
|
131 | c3adb5b9 | Paolo Bonzini | /* Requests may complete while dma_aio_cancel is in progress. In
|
132 | c3adb5b9 | Paolo Bonzini | * this case, the AIOCB should not be released because it is still
|
133 | c3adb5b9 | Paolo Bonzini | * referenced by dma_aio_cancel. */
|
134 | c3adb5b9 | Paolo Bonzini | qemu_aio_release(dbs); |
135 | c3adb5b9 | Paolo Bonzini | } |
136 | 7403b14e | aliguori | } |
137 | 7403b14e | aliguori | |
138 | 856ae5c3 | blueswir1 | static void dma_bdrv_cb(void *opaque, int ret) |
139 | 7403b14e | aliguori | { |
140 | 7403b14e | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
141 | c65bcef3 | David Gibson | dma_addr_t cur_addr, cur_len; |
142 | 7403b14e | aliguori | void *mem;
|
143 | 7403b14e | aliguori | |
144 | c57c4658 | Kevin Wolf | trace_dma_bdrv_cb(dbs, ret); |
145 | c57c4658 | Kevin Wolf | |
146 | 7403b14e | aliguori | dbs->acb = NULL;
|
147 | 7403b14e | aliguori | dbs->sector_num += dbs->iov.size / 512;
|
148 | 7403b14e | aliguori | dma_bdrv_unmap(dbs); |
149 | 59a703eb | aliguori | |
150 | 59a703eb | aliguori | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
151 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, ret); |
152 | 59a703eb | aliguori | return;
|
153 | 59a703eb | aliguori | } |
154 | 59a703eb | aliguori | |
155 | 59a703eb | aliguori | while (dbs->sg_cur_index < dbs->sg->nsg) {
|
156 | 59a703eb | aliguori | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
157 | 59a703eb | aliguori | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
158 | c65bcef3 | David Gibson | mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
159 | 59a703eb | aliguori | if (!mem)
|
160 | 59a703eb | aliguori | break;
|
161 | 59a703eb | aliguori | qemu_iovec_add(&dbs->iov, mem, cur_len); |
162 | 59a703eb | aliguori | dbs->sg_cur_byte += cur_len; |
163 | 59a703eb | aliguori | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
164 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
165 | 59a703eb | aliguori | ++dbs->sg_cur_index; |
166 | 59a703eb | aliguori | } |
167 | 59a703eb | aliguori | } |
168 | 59a703eb | aliguori | |
169 | 59a703eb | aliguori | if (dbs->iov.size == 0) { |
170 | c57c4658 | Kevin Wolf | trace_dma_map_wait(dbs); |
171 | 59a703eb | aliguori | cpu_register_map_client(dbs, continue_after_map_failure); |
172 | 59a703eb | aliguori | return;
|
173 | 59a703eb | aliguori | } |
174 | 59a703eb | aliguori | |
175 | cb144ccb | Christoph Hellwig | dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
176 | cb144ccb | Christoph Hellwig | dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
177 | 6bee44ea | Paolo Bonzini | assert(dbs->acb); |
178 | 59a703eb | aliguori | } |
179 | 59a703eb | aliguori | |
180 | c16b5a2c | Christoph Hellwig | static void dma_aio_cancel(BlockDriverAIOCB *acb) |
181 | c16b5a2c | Christoph Hellwig | { |
182 | c16b5a2c | Christoph Hellwig | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
183 | c16b5a2c | Christoph Hellwig | |
184 | c57c4658 | Kevin Wolf | trace_dma_aio_cancel(dbs); |
185 | c57c4658 | Kevin Wolf | |
186 | c16b5a2c | Christoph Hellwig | if (dbs->acb) {
|
187 | c3adb5b9 | Paolo Bonzini | BlockDriverAIOCB *acb = dbs->acb; |
188 | c3adb5b9 | Paolo Bonzini | dbs->acb = NULL;
|
189 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = true;
|
190 | c3adb5b9 | Paolo Bonzini | bdrv_aio_cancel(acb); |
191 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = false;
|
192 | c16b5a2c | Christoph Hellwig | } |
193 | c3adb5b9 | Paolo Bonzini | dbs->common.cb = NULL;
|
194 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, 0);
|
195 | c16b5a2c | Christoph Hellwig | } |
196 | c16b5a2c | Christoph Hellwig | |
197 | c16b5a2c | Christoph Hellwig | static AIOPool dma_aio_pool = {
|
198 | c16b5a2c | Christoph Hellwig | .aiocb_size = sizeof(DMAAIOCB),
|
199 | c16b5a2c | Christoph Hellwig | .cancel = dma_aio_cancel, |
200 | c16b5a2c | Christoph Hellwig | }; |
201 | c16b5a2c | Christoph Hellwig | |
202 | cb144ccb | Christoph Hellwig | BlockDriverAIOCB *dma_bdrv_io( |
203 | 59a703eb | aliguori | BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
204 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
205 | 43cf8ae6 | David Gibson | void *opaque, DMADirection dir)
|
206 | 59a703eb | aliguori | { |
207 | cb144ccb | Christoph Hellwig | DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
208 | 59a703eb | aliguori | |
209 | 43cf8ae6 | David Gibson | trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
210 | c57c4658 | Kevin Wolf | |
211 | 37b7842c | aliguori | dbs->acb = NULL;
|
212 | 59a703eb | aliguori | dbs->bs = bs; |
213 | 59a703eb | aliguori | dbs->sg = sg; |
214 | 59a703eb | aliguori | dbs->sector_num = sector_num; |
215 | 59a703eb | aliguori | dbs->sg_cur_index = 0;
|
216 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
217 | 43cf8ae6 | David Gibson | dbs->dir = dir; |
218 | cb144ccb | Christoph Hellwig | dbs->io_func = io_func; |
219 | 59a703eb | aliguori | dbs->bh = NULL;
|
220 | 59a703eb | aliguori | qemu_iovec_init(&dbs->iov, sg->nsg); |
221 | 59a703eb | aliguori | dma_bdrv_cb(dbs, 0);
|
222 | 37b7842c | aliguori | return &dbs->common;
|
223 | 59a703eb | aliguori | } |
224 | 59a703eb | aliguori | |
225 | 59a703eb | aliguori | |
226 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
227 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
228 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
229 | 59a703eb | aliguori | { |
230 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
231 | 43cf8ae6 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
232 | 59a703eb | aliguori | } |
233 | 59a703eb | aliguori | |
234 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
235 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
236 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
237 | 59a703eb | aliguori | { |
238 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
239 | 43cf8ae6 | David Gibson | DMA_DIRECTION_TO_DEVICE); |
240 | 59a703eb | aliguori | } |
241 | 8171ee35 | Paolo Bonzini | |
242 | 8171ee35 | Paolo Bonzini | |
243 | c65bcef3 | David Gibson | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
244 | c65bcef3 | David Gibson | DMADirection dir) |
245 | 8171ee35 | Paolo Bonzini | { |
246 | 8171ee35 | Paolo Bonzini | uint64_t resid; |
247 | 8171ee35 | Paolo Bonzini | int sg_cur_index;
|
248 | 8171ee35 | Paolo Bonzini | |
249 | 8171ee35 | Paolo Bonzini | resid = sg->size; |
250 | 8171ee35 | Paolo Bonzini | sg_cur_index = 0;
|
251 | 8171ee35 | Paolo Bonzini | len = MIN(len, resid); |
252 | 8171ee35 | Paolo Bonzini | while (len > 0) { |
253 | 8171ee35 | Paolo Bonzini | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
254 | 8171ee35 | Paolo Bonzini | int32_t xfer = MIN(len, entry.len); |
255 | c65bcef3 | David Gibson | dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
256 | 8171ee35 | Paolo Bonzini | ptr += xfer; |
257 | 8171ee35 | Paolo Bonzini | len -= xfer; |
258 | 8171ee35 | Paolo Bonzini | resid -= xfer; |
259 | 8171ee35 | Paolo Bonzini | } |
260 | 8171ee35 | Paolo Bonzini | |
261 | 8171ee35 | Paolo Bonzini | return resid;
|
262 | 8171ee35 | Paolo Bonzini | } |
263 | 8171ee35 | Paolo Bonzini | |
264 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
265 | 8171ee35 | Paolo Bonzini | { |
266 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
267 | 8171ee35 | Paolo Bonzini | } |
268 | 8171ee35 | Paolo Bonzini | |
269 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
270 | 8171ee35 | Paolo Bonzini | { |
271 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
272 | 8171ee35 | Paolo Bonzini | } |
273 | 84a69356 | Paolo Bonzini | |
274 | 84a69356 | Paolo Bonzini | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
275 | 84a69356 | Paolo Bonzini | QEMUSGList *sg, enum BlockAcctType type)
|
276 | 84a69356 | Paolo Bonzini | { |
277 | 84a69356 | Paolo Bonzini | bdrv_acct_start(bs, cookie, sg->size, type); |
278 | 84a69356 | Paolo Bonzini | } |
279 | e5332e63 | David Gibson | |
280 | e5332e63 | David Gibson | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
|
281 | e5332e63 | David Gibson | DMADirection dir) |
282 | e5332e63 | David Gibson | { |
283 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
284 | e5332e63 | David Gibson | |
285 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
286 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
|
287 | e5332e63 | David Gibson | " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); |
288 | e5332e63 | David Gibson | #endif
|
289 | e5332e63 | David Gibson | |
290 | e5332e63 | David Gibson | while (len) {
|
291 | e5332e63 | David Gibson | if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { |
292 | e5332e63 | David Gibson | return false; |
293 | e5332e63 | David Gibson | } |
294 | e5332e63 | David Gibson | |
295 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
296 | e5332e63 | David Gibson | if (plen > len) {
|
297 | e5332e63 | David Gibson | plen = len; |
298 | e5332e63 | David Gibson | } |
299 | e5332e63 | David Gibson | |
300 | e5332e63 | David Gibson | len -= plen; |
301 | e5332e63 | David Gibson | addr += plen; |
302 | e5332e63 | David Gibson | } |
303 | e5332e63 | David Gibson | |
304 | e5332e63 | David Gibson | return true; |
305 | e5332e63 | David Gibson | } |
306 | e5332e63 | David Gibson | |
307 | e5332e63 | David Gibson | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
|
308 | e5332e63 | David Gibson | void *buf, dma_addr_t len, DMADirection dir)
|
309 | e5332e63 | David Gibson | { |
310 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
311 | e5332e63 | David Gibson | int err;
|
312 | e5332e63 | David Gibson | |
313 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
314 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" |
315 | e5332e63 | David Gibson | DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
|
316 | e5332e63 | David Gibson | #endif
|
317 | e5332e63 | David Gibson | |
318 | e5332e63 | David Gibson | while (len) {
|
319 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, dir); |
320 | e5332e63 | David Gibson | if (err) {
|
321 | e5332e63 | David Gibson | /*
|
322 | e5332e63 | David Gibson | * In case of failure on reads from the guest, we clean the
|
323 | e5332e63 | David Gibson | * destination buffer so that a device that doesn't test
|
324 | e5332e63 | David Gibson | * for errors will not expose qemu internal memory.
|
325 | e5332e63 | David Gibson | */
|
326 | e5332e63 | David Gibson | memset(buf, 0, len);
|
327 | e5332e63 | David Gibson | return -1; |
328 | e5332e63 | David Gibson | } |
329 | e5332e63 | David Gibson | |
330 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
331 | e5332e63 | David Gibson | if (plen > len) {
|
332 | e5332e63 | David Gibson | plen = len; |
333 | e5332e63 | David Gibson | } |
334 | e5332e63 | David Gibson | |
335 | e5332e63 | David Gibson | cpu_physical_memory_rw(paddr, buf, plen, |
336 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE); |
337 | e5332e63 | David Gibson | |
338 | e5332e63 | David Gibson | len -= plen; |
339 | e5332e63 | David Gibson | addr += plen; |
340 | e5332e63 | David Gibson | buf += plen; |
341 | e5332e63 | David Gibson | } |
342 | e5332e63 | David Gibson | |
343 | e5332e63 | David Gibson | return 0; |
344 | e5332e63 | David Gibson | } |
345 | e5332e63 | David Gibson | |
346 | e5332e63 | David Gibson | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
|
347 | e5332e63 | David Gibson | dma_addr_t len) |
348 | e5332e63 | David Gibson | { |
349 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
350 | e5332e63 | David Gibson | int err;
|
351 | e5332e63 | David Gibson | |
352 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
353 | e5332e63 | David Gibson | fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
|
354 | e5332e63 | David Gibson | " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); |
355 | e5332e63 | David Gibson | #endif
|
356 | e5332e63 | David Gibson | |
357 | e5332e63 | David Gibson | while (len) {
|
358 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, |
359 | e5332e63 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
360 | e5332e63 | David Gibson | if (err) {
|
361 | e5332e63 | David Gibson | return err;
|
362 | e5332e63 | David Gibson | } |
363 | e5332e63 | David Gibson | |
364 | e5332e63 | David Gibson | /* The translation might be valid for larger regions. */
|
365 | e5332e63 | David Gibson | if (plen > len) {
|
366 | e5332e63 | David Gibson | plen = len; |
367 | e5332e63 | David Gibson | } |
368 | e5332e63 | David Gibson | |
369 | e5332e63 | David Gibson | do_dma_memory_set(paddr, c, plen); |
370 | e5332e63 | David Gibson | |
371 | e5332e63 | David Gibson | len -= plen; |
372 | e5332e63 | David Gibson | addr += plen; |
373 | e5332e63 | David Gibson | } |
374 | e5332e63 | David Gibson | |
375 | e5332e63 | David Gibson | return 0; |
376 | e5332e63 | David Gibson | } |
377 | e5332e63 | David Gibson | |
378 | e5332e63 | David Gibson | void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
|
379 | e5332e63 | David Gibson | DMAMapFunc map, DMAUnmapFunc unmap) |
380 | e5332e63 | David Gibson | { |
381 | e5332e63 | David Gibson | #ifdef DEBUG_IOMMU
|
382 | e5332e63 | David Gibson | fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
|
383 | e5332e63 | David Gibson | dma, translate, map, unmap); |
384 | e5332e63 | David Gibson | #endif
|
385 | e5332e63 | David Gibson | dma->translate = translate; |
386 | e5332e63 | David Gibson | dma->map = map; |
387 | e5332e63 | David Gibson | dma->unmap = unmap; |
388 | e5332e63 | David Gibson | } |
389 | e5332e63 | David Gibson | |
390 | e5332e63 | David Gibson | void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
|
391 | e5332e63 | David Gibson | DMADirection dir) |
392 | e5332e63 | David Gibson | { |
393 | e5332e63 | David Gibson | int err;
|
394 | e5332e63 | David Gibson | target_phys_addr_t paddr, plen; |
395 | e5332e63 | David Gibson | void *buf;
|
396 | e5332e63 | David Gibson | |
397 | e5332e63 | David Gibson | if (dma->map) {
|
398 | e5332e63 | David Gibson | return dma->map(dma, addr, len, dir);
|
399 | e5332e63 | David Gibson | } |
400 | e5332e63 | David Gibson | |
401 | e5332e63 | David Gibson | plen = *len; |
402 | e5332e63 | David Gibson | err = dma->translate(dma, addr, &paddr, &plen, dir); |
403 | e5332e63 | David Gibson | if (err) {
|
404 | e5332e63 | David Gibson | return NULL; |
405 | e5332e63 | David Gibson | } |
406 | e5332e63 | David Gibson | |
407 | e5332e63 | David Gibson | /*
|
408 | e5332e63 | David Gibson | * If this is true, the virtual region is contiguous,
|
409 | e5332e63 | David Gibson | * but the translated physical region isn't. We just
|
410 | e5332e63 | David Gibson | * clamp *len, much like cpu_physical_memory_map() does.
|
411 | e5332e63 | David Gibson | */
|
412 | e5332e63 | David Gibson | if (plen < *len) {
|
413 | e5332e63 | David Gibson | *len = plen; |
414 | e5332e63 | David Gibson | } |
415 | e5332e63 | David Gibson | |
416 | e5332e63 | David Gibson | buf = cpu_physical_memory_map(paddr, &plen, |
417 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE); |
418 | e5332e63 | David Gibson | *len = plen; |
419 | e5332e63 | David Gibson | |
420 | e5332e63 | David Gibson | return buf;
|
421 | e5332e63 | David Gibson | } |
422 | e5332e63 | David Gibson | |
423 | e5332e63 | David Gibson | void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, |
424 | e5332e63 | David Gibson | DMADirection dir, dma_addr_t access_len) |
425 | e5332e63 | David Gibson | { |
426 | e5332e63 | David Gibson | if (dma->unmap) {
|
427 | e5332e63 | David Gibson | dma->unmap(dma, buffer, len, dir, access_len); |
428 | e5332e63 | David Gibson | return;
|
429 | e5332e63 | David Gibson | } |
430 | e5332e63 | David Gibson | |
431 | e5332e63 | David Gibson | cpu_physical_memory_unmap(buffer, len, |
432 | e5332e63 | David Gibson | dir == DMA_DIRECTION_FROM_DEVICE, |
433 | e5332e63 | David Gibson | access_len); |
434 | e5332e63 | David Gibson | |
435 | e5332e63 | David Gibson | } |