root / dma-helpers.c @ feature-archipelago
History | View | Annotate | Download (7 kB)
1 | 244ab90e | aliguori | /*
|
---|---|---|---|
2 | 244ab90e | aliguori | * DMA helper functions
|
3 | 244ab90e | aliguori | *
|
4 | 244ab90e | aliguori | * Copyright (c) 2009 Red Hat
|
5 | 244ab90e | aliguori | *
|
6 | 244ab90e | aliguori | * This work is licensed under the terms of the GNU General Public License
|
7 | 244ab90e | aliguori | * (GNU GPL), version 2 or later.
|
8 | 244ab90e | aliguori | */
|
9 | 244ab90e | aliguori | |
10 | 9c17d615 | Paolo Bonzini | #include "sysemu/dma.h" |
11 | c57c4658 | Kevin Wolf | #include "trace.h" |
12 | 1de7afc9 | Paolo Bonzini | #include "qemu/range.h" |
13 | 1de7afc9 | Paolo Bonzini | #include "qemu/thread.h" |
14 | 6a1751b7 | Alex Bligh | #include "qemu/main-loop.h" |
15 | 244ab90e | aliguori | |
16 | e5332e63 | David Gibson | /* #define DEBUG_IOMMU */
|
17 | e5332e63 | David Gibson | |
18 | df32fd1c | Paolo Bonzini | int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
19 | d86a77f8 | David Gibson | { |
20 | df32fd1c | Paolo Bonzini | dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); |
21 | 24addbc7 | Paolo Bonzini | |
22 | d86a77f8 | David Gibson | #define FILLBUF_SIZE 512 |
23 | d86a77f8 | David Gibson | uint8_t fillbuf[FILLBUF_SIZE]; |
24 | d86a77f8 | David Gibson | int l;
|
25 | 24addbc7 | Paolo Bonzini | bool error = false; |
26 | d86a77f8 | David Gibson | |
27 | d86a77f8 | David Gibson | memset(fillbuf, c, FILLBUF_SIZE); |
28 | d86a77f8 | David Gibson | while (len > 0) { |
29 | d86a77f8 | David Gibson | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
30 | 24addbc7 | Paolo Bonzini | error |= address_space_rw(as, addr, fillbuf, l, true);
|
31 | bc9b78de | Benjamin Herrenschmidt | len -= l; |
32 | bc9b78de | Benjamin Herrenschmidt | addr += l; |
33 | d86a77f8 | David Gibson | } |
34 | e5332e63 | David Gibson | |
35 | 24addbc7 | Paolo Bonzini | return error;
|
36 | d86a77f8 | David Gibson | } |
37 | d86a77f8 | David Gibson | |
38 | f487b677 | Paolo Bonzini | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
39 | f487b677 | Paolo Bonzini | AddressSpace *as) |
40 | 244ab90e | aliguori | { |
41 | 7267c094 | Anthony Liguori | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
42 | 244ab90e | aliguori | qsg->nsg = 0;
|
43 | 244ab90e | aliguori | qsg->nalloc = alloc_hint; |
44 | 244ab90e | aliguori | qsg->size = 0;
|
45 | df32fd1c | Paolo Bonzini | qsg->as = as; |
46 | f487b677 | Paolo Bonzini | qsg->dev = dev; |
47 | f487b677 | Paolo Bonzini | object_ref(OBJECT(dev)); |
48 | 244ab90e | aliguori | } |
49 | 244ab90e | aliguori | |
50 | d3231181 | David Gibson | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
51 | 244ab90e | aliguori | { |
52 | 244ab90e | aliguori | if (qsg->nsg == qsg->nalloc) {
|
53 | 244ab90e | aliguori | qsg->nalloc = 2 * qsg->nalloc + 1; |
54 | 7267c094 | Anthony Liguori | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
55 | 244ab90e | aliguori | } |
56 | 244ab90e | aliguori | qsg->sg[qsg->nsg].base = base; |
57 | 244ab90e | aliguori | qsg->sg[qsg->nsg].len = len; |
58 | 244ab90e | aliguori | qsg->size += len; |
59 | 244ab90e | aliguori | ++qsg->nsg; |
60 | 244ab90e | aliguori | } |
61 | 244ab90e | aliguori | |
62 | 244ab90e | aliguori | void qemu_sglist_destroy(QEMUSGList *qsg)
|
63 | 244ab90e | aliguori | { |
64 | f487b677 | Paolo Bonzini | object_unref(OBJECT(qsg->dev)); |
65 | 7267c094 | Anthony Liguori | g_free(qsg->sg); |
66 | ea8d82a1 | Jason Baron | memset(qsg, 0, sizeof(*qsg)); |
67 | 244ab90e | aliguori | } |
68 | 244ab90e | aliguori | |
69 | 59a703eb | aliguori | typedef struct { |
70 | 37b7842c | aliguori | BlockDriverAIOCB common; |
71 | 59a703eb | aliguori | BlockDriverState *bs; |
72 | 59a703eb | aliguori | BlockDriverAIOCB *acb; |
73 | 59a703eb | aliguori | QEMUSGList *sg; |
74 | 59a703eb | aliguori | uint64_t sector_num; |
75 | 43cf8ae6 | David Gibson | DMADirection dir; |
76 | c3adb5b9 | Paolo Bonzini | bool in_cancel;
|
77 | 59a703eb | aliguori | int sg_cur_index;
|
78 | d3231181 | David Gibson | dma_addr_t sg_cur_byte; |
79 | 59a703eb | aliguori | QEMUIOVector iov; |
80 | 59a703eb | aliguori | QEMUBH *bh; |
81 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func; |
82 | 37b7842c | aliguori | } DMAAIOCB; |
83 | 59a703eb | aliguori | |
84 | 59a703eb | aliguori | static void dma_bdrv_cb(void *opaque, int ret); |
85 | 59a703eb | aliguori | |
86 | 59a703eb | aliguori | static void reschedule_dma(void *opaque) |
87 | 59a703eb | aliguori | { |
88 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
89 | 59a703eb | aliguori | |
90 | 59a703eb | aliguori | qemu_bh_delete(dbs->bh); |
91 | 59a703eb | aliguori | dbs->bh = NULL;
|
92 | c3adb5b9 | Paolo Bonzini | dma_bdrv_cb(dbs, 0);
|
93 | 59a703eb | aliguori | } |
94 | 59a703eb | aliguori | |
95 | 59a703eb | aliguori | static void continue_after_map_failure(void *opaque) |
96 | 59a703eb | aliguori | { |
97 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
98 | 59a703eb | aliguori | |
99 | 59a703eb | aliguori | dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
100 | 59a703eb | aliguori | qemu_bh_schedule(dbs->bh); |
101 | 59a703eb | aliguori | } |
102 | 59a703eb | aliguori | |
103 | 7403b14e | aliguori | static void dma_bdrv_unmap(DMAAIOCB *dbs) |
104 | 59a703eb | aliguori | { |
105 | 59a703eb | aliguori | int i;
|
106 | 59a703eb | aliguori | |
107 | 59a703eb | aliguori | for (i = 0; i < dbs->iov.niov; ++i) { |
108 | df32fd1c | Paolo Bonzini | dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, |
109 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len, dbs->dir, |
110 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len); |
111 | 59a703eb | aliguori | } |
112 | c3adb5b9 | Paolo Bonzini | qemu_iovec_reset(&dbs->iov); |
113 | c3adb5b9 | Paolo Bonzini | } |
114 | c3adb5b9 | Paolo Bonzini | |
115 | c3adb5b9 | Paolo Bonzini | static void dma_complete(DMAAIOCB *dbs, int ret) |
116 | c3adb5b9 | Paolo Bonzini | { |
117 | c57c4658 | Kevin Wolf | trace_dma_complete(dbs, ret, dbs->common.cb); |
118 | c57c4658 | Kevin Wolf | |
119 | c3adb5b9 | Paolo Bonzini | dma_bdrv_unmap(dbs); |
120 | c3adb5b9 | Paolo Bonzini | if (dbs->common.cb) {
|
121 | c3adb5b9 | Paolo Bonzini | dbs->common.cb(dbs->common.opaque, ret); |
122 | c3adb5b9 | Paolo Bonzini | } |
123 | c3adb5b9 | Paolo Bonzini | qemu_iovec_destroy(&dbs->iov); |
124 | c3adb5b9 | Paolo Bonzini | if (dbs->bh) {
|
125 | c3adb5b9 | Paolo Bonzini | qemu_bh_delete(dbs->bh); |
126 | c3adb5b9 | Paolo Bonzini | dbs->bh = NULL;
|
127 | c3adb5b9 | Paolo Bonzini | } |
128 | c3adb5b9 | Paolo Bonzini | if (!dbs->in_cancel) {
|
129 | c3adb5b9 | Paolo Bonzini | /* Requests may complete while dma_aio_cancel is in progress. In
|
130 | c3adb5b9 | Paolo Bonzini | * this case, the AIOCB should not be released because it is still
|
131 | c3adb5b9 | Paolo Bonzini | * referenced by dma_aio_cancel. */
|
132 | c3adb5b9 | Paolo Bonzini | qemu_aio_release(dbs); |
133 | c3adb5b9 | Paolo Bonzini | } |
134 | 7403b14e | aliguori | } |
135 | 7403b14e | aliguori | |
136 | 856ae5c3 | blueswir1 | static void dma_bdrv_cb(void *opaque, int ret) |
137 | 7403b14e | aliguori | { |
138 | 7403b14e | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
139 | c65bcef3 | David Gibson | dma_addr_t cur_addr, cur_len; |
140 | 7403b14e | aliguori | void *mem;
|
141 | 7403b14e | aliguori | |
142 | c57c4658 | Kevin Wolf | trace_dma_bdrv_cb(dbs, ret); |
143 | c57c4658 | Kevin Wolf | |
144 | 7403b14e | aliguori | dbs->acb = NULL;
|
145 | 7403b14e | aliguori | dbs->sector_num += dbs->iov.size / 512;
|
146 | 7403b14e | aliguori | dma_bdrv_unmap(dbs); |
147 | 59a703eb | aliguori | |
148 | 59a703eb | aliguori | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
149 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, ret); |
150 | 59a703eb | aliguori | return;
|
151 | 59a703eb | aliguori | } |
152 | 59a703eb | aliguori | |
153 | 59a703eb | aliguori | while (dbs->sg_cur_index < dbs->sg->nsg) {
|
154 | 59a703eb | aliguori | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
155 | 59a703eb | aliguori | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
156 | df32fd1c | Paolo Bonzini | mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); |
157 | 59a703eb | aliguori | if (!mem)
|
158 | 59a703eb | aliguori | break;
|
159 | 59a703eb | aliguori | qemu_iovec_add(&dbs->iov, mem, cur_len); |
160 | 59a703eb | aliguori | dbs->sg_cur_byte += cur_len; |
161 | 59a703eb | aliguori | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
162 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
163 | 59a703eb | aliguori | ++dbs->sg_cur_index; |
164 | 59a703eb | aliguori | } |
165 | 59a703eb | aliguori | } |
166 | 59a703eb | aliguori | |
167 | 59a703eb | aliguori | if (dbs->iov.size == 0) { |
168 | c57c4658 | Kevin Wolf | trace_dma_map_wait(dbs); |
169 | 59a703eb | aliguori | cpu_register_map_client(dbs, continue_after_map_failure); |
170 | 59a703eb | aliguori | return;
|
171 | 59a703eb | aliguori | } |
172 | 59a703eb | aliguori | |
173 | cb144ccb | Christoph Hellwig | dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
174 | cb144ccb | Christoph Hellwig | dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
175 | 6bee44ea | Paolo Bonzini | assert(dbs->acb); |
176 | 59a703eb | aliguori | } |
177 | 59a703eb | aliguori | |
178 | c16b5a2c | Christoph Hellwig | static void dma_aio_cancel(BlockDriverAIOCB *acb) |
179 | c16b5a2c | Christoph Hellwig | { |
180 | c16b5a2c | Christoph Hellwig | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
181 | c16b5a2c | Christoph Hellwig | |
182 | c57c4658 | Kevin Wolf | trace_dma_aio_cancel(dbs); |
183 | c57c4658 | Kevin Wolf | |
184 | c16b5a2c | Christoph Hellwig | if (dbs->acb) {
|
185 | c3adb5b9 | Paolo Bonzini | BlockDriverAIOCB *acb = dbs->acb; |
186 | c3adb5b9 | Paolo Bonzini | dbs->acb = NULL;
|
187 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = true;
|
188 | c3adb5b9 | Paolo Bonzini | bdrv_aio_cancel(acb); |
189 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = false;
|
190 | c16b5a2c | Christoph Hellwig | } |
191 | c3adb5b9 | Paolo Bonzini | dbs->common.cb = NULL;
|
192 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, 0);
|
193 | c16b5a2c | Christoph Hellwig | } |
194 | c16b5a2c | Christoph Hellwig | |
195 | d7331bed | Stefan Hajnoczi | static const AIOCBInfo dma_aiocb_info = { |
196 | c16b5a2c | Christoph Hellwig | .aiocb_size = sizeof(DMAAIOCB),
|
197 | c16b5a2c | Christoph Hellwig | .cancel = dma_aio_cancel, |
198 | c16b5a2c | Christoph Hellwig | }; |
199 | c16b5a2c | Christoph Hellwig | |
200 | cb144ccb | Christoph Hellwig | BlockDriverAIOCB *dma_bdrv_io( |
201 | 59a703eb | aliguori | BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
202 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
203 | 43cf8ae6 | David Gibson | void *opaque, DMADirection dir)
|
204 | 59a703eb | aliguori | { |
205 | d7331bed | Stefan Hajnoczi | DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque); |
206 | 59a703eb | aliguori | |
207 | 43cf8ae6 | David Gibson | trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
208 | c57c4658 | Kevin Wolf | |
209 | 37b7842c | aliguori | dbs->acb = NULL;
|
210 | 59a703eb | aliguori | dbs->bs = bs; |
211 | 59a703eb | aliguori | dbs->sg = sg; |
212 | 59a703eb | aliguori | dbs->sector_num = sector_num; |
213 | 59a703eb | aliguori | dbs->sg_cur_index = 0;
|
214 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
215 | 43cf8ae6 | David Gibson | dbs->dir = dir; |
216 | cb144ccb | Christoph Hellwig | dbs->io_func = io_func; |
217 | 59a703eb | aliguori | dbs->bh = NULL;
|
218 | 59a703eb | aliguori | qemu_iovec_init(&dbs->iov, sg->nsg); |
219 | 59a703eb | aliguori | dma_bdrv_cb(dbs, 0);
|
220 | 37b7842c | aliguori | return &dbs->common;
|
221 | 59a703eb | aliguori | } |
222 | 59a703eb | aliguori | |
223 | 59a703eb | aliguori | |
224 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
225 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
226 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
227 | 59a703eb | aliguori | { |
228 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
229 | 43cf8ae6 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
230 | 59a703eb | aliguori | } |
231 | 59a703eb | aliguori | |
232 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
233 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
234 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
235 | 59a703eb | aliguori | { |
236 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
237 | 43cf8ae6 | David Gibson | DMA_DIRECTION_TO_DEVICE); |
238 | 59a703eb | aliguori | } |
239 | 8171ee35 | Paolo Bonzini | |
240 | 8171ee35 | Paolo Bonzini | |
241 | c65bcef3 | David Gibson | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
242 | c65bcef3 | David Gibson | DMADirection dir) |
243 | 8171ee35 | Paolo Bonzini | { |
244 | 8171ee35 | Paolo Bonzini | uint64_t resid; |
245 | 8171ee35 | Paolo Bonzini | int sg_cur_index;
|
246 | 8171ee35 | Paolo Bonzini | |
247 | 8171ee35 | Paolo Bonzini | resid = sg->size; |
248 | 8171ee35 | Paolo Bonzini | sg_cur_index = 0;
|
249 | 8171ee35 | Paolo Bonzini | len = MIN(len, resid); |
250 | 8171ee35 | Paolo Bonzini | while (len > 0) { |
251 | 8171ee35 | Paolo Bonzini | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
252 | 8171ee35 | Paolo Bonzini | int32_t xfer = MIN(len, entry.len); |
253 | df32fd1c | Paolo Bonzini | dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); |
254 | 8171ee35 | Paolo Bonzini | ptr += xfer; |
255 | 8171ee35 | Paolo Bonzini | len -= xfer; |
256 | 8171ee35 | Paolo Bonzini | resid -= xfer; |
257 | 8171ee35 | Paolo Bonzini | } |
258 | 8171ee35 | Paolo Bonzini | |
259 | 8171ee35 | Paolo Bonzini | return resid;
|
260 | 8171ee35 | Paolo Bonzini | } |
261 | 8171ee35 | Paolo Bonzini | |
262 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
263 | 8171ee35 | Paolo Bonzini | { |
264 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
265 | 8171ee35 | Paolo Bonzini | } |
266 | 8171ee35 | Paolo Bonzini | |
267 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
268 | 8171ee35 | Paolo Bonzini | { |
269 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
270 | 8171ee35 | Paolo Bonzini | } |
271 | 84a69356 | Paolo Bonzini | |
272 | 84a69356 | Paolo Bonzini | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
273 | 84a69356 | Paolo Bonzini | QEMUSGList *sg, enum BlockAcctType type)
|
274 | 84a69356 | Paolo Bonzini | { |
275 | 84a69356 | Paolo Bonzini | bdrv_acct_start(bs, cookie, sg->size, type); |
276 | 84a69356 | Paolo Bonzini | } |