root / dma-helpers.c @ f53ec699
History | View | Annotate | Download (7 kB)
1 | 244ab90e | aliguori | /*
|
---|---|---|---|
2 | 244ab90e | aliguori | * DMA helper functions
|
3 | 244ab90e | aliguori | *
|
4 | 244ab90e | aliguori | * Copyright (c) 2009 Red Hat
|
5 | 244ab90e | aliguori | *
|
6 | 244ab90e | aliguori | * This work is licensed under the terms of the GNU General Public License
|
7 | 244ab90e | aliguori | * (GNU GPL), version 2 or later.
|
8 | 244ab90e | aliguori | */
|
9 | 244ab90e | aliguori | |
10 | 9c17d615 | Paolo Bonzini | #include "sysemu/dma.h" |
11 | c57c4658 | Kevin Wolf | #include "trace.h" |
12 | 1de7afc9 | Paolo Bonzini | #include "qemu/range.h" |
13 | 1de7afc9 | Paolo Bonzini | #include "qemu/thread.h" |
14 | 244ab90e | aliguori | |
15 | e5332e63 | David Gibson | /* #define DEBUG_IOMMU */
|
16 | e5332e63 | David Gibson | |
17 | df32fd1c | Paolo Bonzini | int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
18 | d86a77f8 | David Gibson | { |
19 | df32fd1c | Paolo Bonzini | dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); |
20 | 24addbc7 | Paolo Bonzini | |
21 | d86a77f8 | David Gibson | #define FILLBUF_SIZE 512 |
22 | d86a77f8 | David Gibson | uint8_t fillbuf[FILLBUF_SIZE]; |
23 | d86a77f8 | David Gibson | int l;
|
24 | 24addbc7 | Paolo Bonzini | bool error = false; |
25 | d86a77f8 | David Gibson | |
26 | d86a77f8 | David Gibson | memset(fillbuf, c, FILLBUF_SIZE); |
27 | d86a77f8 | David Gibson | while (len > 0) { |
28 | d86a77f8 | David Gibson | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
29 | 24addbc7 | Paolo Bonzini | error |= address_space_rw(as, addr, fillbuf, l, true);
|
30 | bc9b78de | Benjamin Herrenschmidt | len -= l; |
31 | bc9b78de | Benjamin Herrenschmidt | addr += l; |
32 | d86a77f8 | David Gibson | } |
33 | e5332e63 | David Gibson | |
34 | 24addbc7 | Paolo Bonzini | return error;
|
35 | d86a77f8 | David Gibson | } |
36 | d86a77f8 | David Gibson | |
37 | f487b677 | Paolo Bonzini | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
38 | f487b677 | Paolo Bonzini | AddressSpace *as) |
39 | 244ab90e | aliguori | { |
40 | 7267c094 | Anthony Liguori | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
41 | 244ab90e | aliguori | qsg->nsg = 0;
|
42 | 244ab90e | aliguori | qsg->nalloc = alloc_hint; |
43 | 244ab90e | aliguori | qsg->size = 0;
|
44 | df32fd1c | Paolo Bonzini | qsg->as = as; |
45 | f487b677 | Paolo Bonzini | qsg->dev = dev; |
46 | f487b677 | Paolo Bonzini | object_ref(OBJECT(dev)); |
47 | 244ab90e | aliguori | } |
48 | 244ab90e | aliguori | |
49 | d3231181 | David Gibson | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
50 | 244ab90e | aliguori | { |
51 | 244ab90e | aliguori | if (qsg->nsg == qsg->nalloc) {
|
52 | 244ab90e | aliguori | qsg->nalloc = 2 * qsg->nalloc + 1; |
53 | 7267c094 | Anthony Liguori | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
54 | 244ab90e | aliguori | } |
55 | 244ab90e | aliguori | qsg->sg[qsg->nsg].base = base; |
56 | 244ab90e | aliguori | qsg->sg[qsg->nsg].len = len; |
57 | 244ab90e | aliguori | qsg->size += len; |
58 | 244ab90e | aliguori | ++qsg->nsg; |
59 | 244ab90e | aliguori | } |
60 | 244ab90e | aliguori | |
61 | 244ab90e | aliguori | void qemu_sglist_destroy(QEMUSGList *qsg)
|
62 | 244ab90e | aliguori | { |
63 | f487b677 | Paolo Bonzini | object_unref(OBJECT(qsg->dev)); |
64 | 7267c094 | Anthony Liguori | g_free(qsg->sg); |
65 | ea8d82a1 | Jason Baron | memset(qsg, 0, sizeof(*qsg)); |
66 | 244ab90e | aliguori | } |
67 | 244ab90e | aliguori | |
68 | 59a703eb | aliguori | typedef struct { |
69 | 37b7842c | aliguori | BlockDriverAIOCB common; |
70 | 59a703eb | aliguori | BlockDriverState *bs; |
71 | 59a703eb | aliguori | BlockDriverAIOCB *acb; |
72 | 59a703eb | aliguori | QEMUSGList *sg; |
73 | 59a703eb | aliguori | uint64_t sector_num; |
74 | 43cf8ae6 | David Gibson | DMADirection dir; |
75 | c3adb5b9 | Paolo Bonzini | bool in_cancel;
|
76 | 59a703eb | aliguori | int sg_cur_index;
|
77 | d3231181 | David Gibson | dma_addr_t sg_cur_byte; |
78 | 59a703eb | aliguori | QEMUIOVector iov; |
79 | 59a703eb | aliguori | QEMUBH *bh; |
80 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func; |
81 | 37b7842c | aliguori | } DMAAIOCB; |
82 | 59a703eb | aliguori | |
83 | 59a703eb | aliguori | static void dma_bdrv_cb(void *opaque, int ret); |
84 | 59a703eb | aliguori | |
85 | 59a703eb | aliguori | static void reschedule_dma(void *opaque) |
86 | 59a703eb | aliguori | { |
87 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
88 | 59a703eb | aliguori | |
89 | 59a703eb | aliguori | qemu_bh_delete(dbs->bh); |
90 | 59a703eb | aliguori | dbs->bh = NULL;
|
91 | c3adb5b9 | Paolo Bonzini | dma_bdrv_cb(dbs, 0);
|
92 | 59a703eb | aliguori | } |
93 | 59a703eb | aliguori | |
94 | 59a703eb | aliguori | static void continue_after_map_failure(void *opaque) |
95 | 59a703eb | aliguori | { |
96 | 37b7842c | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
97 | 59a703eb | aliguori | |
98 | 59a703eb | aliguori | dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
99 | 59a703eb | aliguori | qemu_bh_schedule(dbs->bh); |
100 | 59a703eb | aliguori | } |
101 | 59a703eb | aliguori | |
102 | 7403b14e | aliguori | static void dma_bdrv_unmap(DMAAIOCB *dbs) |
103 | 59a703eb | aliguori | { |
104 | 59a703eb | aliguori | int i;
|
105 | 59a703eb | aliguori | |
106 | 59a703eb | aliguori | for (i = 0; i < dbs->iov.niov; ++i) { |
107 | df32fd1c | Paolo Bonzini | dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, |
108 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len, dbs->dir, |
109 | c65bcef3 | David Gibson | dbs->iov.iov[i].iov_len); |
110 | 59a703eb | aliguori | } |
111 | c3adb5b9 | Paolo Bonzini | qemu_iovec_reset(&dbs->iov); |
112 | c3adb5b9 | Paolo Bonzini | } |
113 | c3adb5b9 | Paolo Bonzini | |
114 | c3adb5b9 | Paolo Bonzini | static void dma_complete(DMAAIOCB *dbs, int ret) |
115 | c3adb5b9 | Paolo Bonzini | { |
116 | c57c4658 | Kevin Wolf | trace_dma_complete(dbs, ret, dbs->common.cb); |
117 | c57c4658 | Kevin Wolf | |
118 | c3adb5b9 | Paolo Bonzini | dma_bdrv_unmap(dbs); |
119 | c3adb5b9 | Paolo Bonzini | if (dbs->common.cb) {
|
120 | c3adb5b9 | Paolo Bonzini | dbs->common.cb(dbs->common.opaque, ret); |
121 | c3adb5b9 | Paolo Bonzini | } |
122 | c3adb5b9 | Paolo Bonzini | qemu_iovec_destroy(&dbs->iov); |
123 | c3adb5b9 | Paolo Bonzini | if (dbs->bh) {
|
124 | c3adb5b9 | Paolo Bonzini | qemu_bh_delete(dbs->bh); |
125 | c3adb5b9 | Paolo Bonzini | dbs->bh = NULL;
|
126 | c3adb5b9 | Paolo Bonzini | } |
127 | c3adb5b9 | Paolo Bonzini | if (!dbs->in_cancel) {
|
128 | c3adb5b9 | Paolo Bonzini | /* Requests may complete while dma_aio_cancel is in progress. In
|
129 | c3adb5b9 | Paolo Bonzini | * this case, the AIOCB should not be released because it is still
|
130 | c3adb5b9 | Paolo Bonzini | * referenced by dma_aio_cancel. */
|
131 | c3adb5b9 | Paolo Bonzini | qemu_aio_release(dbs); |
132 | c3adb5b9 | Paolo Bonzini | } |
133 | 7403b14e | aliguori | } |
134 | 7403b14e | aliguori | |
135 | 856ae5c3 | blueswir1 | static void dma_bdrv_cb(void *opaque, int ret) |
136 | 7403b14e | aliguori | { |
137 | 7403b14e | aliguori | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
138 | c65bcef3 | David Gibson | dma_addr_t cur_addr, cur_len; |
139 | 7403b14e | aliguori | void *mem;
|
140 | 7403b14e | aliguori | |
141 | c57c4658 | Kevin Wolf | trace_dma_bdrv_cb(dbs, ret); |
142 | c57c4658 | Kevin Wolf | |
143 | 7403b14e | aliguori | dbs->acb = NULL;
|
144 | 7403b14e | aliguori | dbs->sector_num += dbs->iov.size / 512;
|
145 | 7403b14e | aliguori | dma_bdrv_unmap(dbs); |
146 | 59a703eb | aliguori | |
147 | 59a703eb | aliguori | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
148 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, ret); |
149 | 59a703eb | aliguori | return;
|
150 | 59a703eb | aliguori | } |
151 | 59a703eb | aliguori | |
152 | 59a703eb | aliguori | while (dbs->sg_cur_index < dbs->sg->nsg) {
|
153 | 59a703eb | aliguori | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
154 | 59a703eb | aliguori | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
155 | df32fd1c | Paolo Bonzini | mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); |
156 | 59a703eb | aliguori | if (!mem)
|
157 | 59a703eb | aliguori | break;
|
158 | 59a703eb | aliguori | qemu_iovec_add(&dbs->iov, mem, cur_len); |
159 | 59a703eb | aliguori | dbs->sg_cur_byte += cur_len; |
160 | 59a703eb | aliguori | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
161 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
162 | 59a703eb | aliguori | ++dbs->sg_cur_index; |
163 | 59a703eb | aliguori | } |
164 | 59a703eb | aliguori | } |
165 | 59a703eb | aliguori | |
166 | 59a703eb | aliguori | if (dbs->iov.size == 0) { |
167 | c57c4658 | Kevin Wolf | trace_dma_map_wait(dbs); |
168 | 59a703eb | aliguori | cpu_register_map_client(dbs, continue_after_map_failure); |
169 | 59a703eb | aliguori | return;
|
170 | 59a703eb | aliguori | } |
171 | 59a703eb | aliguori | |
172 | cb144ccb | Christoph Hellwig | dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
173 | cb144ccb | Christoph Hellwig | dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
174 | 6bee44ea | Paolo Bonzini | assert(dbs->acb); |
175 | 59a703eb | aliguori | } |
176 | 59a703eb | aliguori | |
177 | c16b5a2c | Christoph Hellwig | static void dma_aio_cancel(BlockDriverAIOCB *acb) |
178 | c16b5a2c | Christoph Hellwig | { |
179 | c16b5a2c | Christoph Hellwig | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
180 | c16b5a2c | Christoph Hellwig | |
181 | c57c4658 | Kevin Wolf | trace_dma_aio_cancel(dbs); |
182 | c57c4658 | Kevin Wolf | |
183 | c16b5a2c | Christoph Hellwig | if (dbs->acb) {
|
184 | c3adb5b9 | Paolo Bonzini | BlockDriverAIOCB *acb = dbs->acb; |
185 | c3adb5b9 | Paolo Bonzini | dbs->acb = NULL;
|
186 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = true;
|
187 | c3adb5b9 | Paolo Bonzini | bdrv_aio_cancel(acb); |
188 | c3adb5b9 | Paolo Bonzini | dbs->in_cancel = false;
|
189 | c16b5a2c | Christoph Hellwig | } |
190 | c3adb5b9 | Paolo Bonzini | dbs->common.cb = NULL;
|
191 | c3adb5b9 | Paolo Bonzini | dma_complete(dbs, 0);
|
192 | c16b5a2c | Christoph Hellwig | } |
193 | c16b5a2c | Christoph Hellwig | |
194 | d7331bed | Stefan Hajnoczi | static const AIOCBInfo dma_aiocb_info = { |
195 | c16b5a2c | Christoph Hellwig | .aiocb_size = sizeof(DMAAIOCB),
|
196 | c16b5a2c | Christoph Hellwig | .cancel = dma_aio_cancel, |
197 | c16b5a2c | Christoph Hellwig | }; |
198 | c16b5a2c | Christoph Hellwig | |
199 | cb144ccb | Christoph Hellwig | BlockDriverAIOCB *dma_bdrv_io( |
200 | 59a703eb | aliguori | BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
201 | cb144ccb | Christoph Hellwig | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
202 | 43cf8ae6 | David Gibson | void *opaque, DMADirection dir)
|
203 | 59a703eb | aliguori | { |
204 | d7331bed | Stefan Hajnoczi | DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque); |
205 | 59a703eb | aliguori | |
206 | 43cf8ae6 | David Gibson | trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
207 | c57c4658 | Kevin Wolf | |
208 | 37b7842c | aliguori | dbs->acb = NULL;
|
209 | 59a703eb | aliguori | dbs->bs = bs; |
210 | 59a703eb | aliguori | dbs->sg = sg; |
211 | 59a703eb | aliguori | dbs->sector_num = sector_num; |
212 | 59a703eb | aliguori | dbs->sg_cur_index = 0;
|
213 | 59a703eb | aliguori | dbs->sg_cur_byte = 0;
|
214 | 43cf8ae6 | David Gibson | dbs->dir = dir; |
215 | cb144ccb | Christoph Hellwig | dbs->io_func = io_func; |
216 | 59a703eb | aliguori | dbs->bh = NULL;
|
217 | 59a703eb | aliguori | qemu_iovec_init(&dbs->iov, sg->nsg); |
218 | 59a703eb | aliguori | dma_bdrv_cb(dbs, 0);
|
219 | 37b7842c | aliguori | return &dbs->common;
|
220 | 59a703eb | aliguori | } |
221 | 59a703eb | aliguori | |
222 | 59a703eb | aliguori | |
223 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
224 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
225 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
226 | 59a703eb | aliguori | { |
227 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
228 | 43cf8ae6 | David Gibson | DMA_DIRECTION_FROM_DEVICE); |
229 | 59a703eb | aliguori | } |
230 | 59a703eb | aliguori | |
231 | 59a703eb | aliguori | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
232 | 59a703eb | aliguori | QEMUSGList *sg, uint64_t sector, |
233 | 59a703eb | aliguori | void (*cb)(void *opaque, int ret), void *opaque) |
234 | 59a703eb | aliguori | { |
235 | 43cf8ae6 | David Gibson | return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
236 | 43cf8ae6 | David Gibson | DMA_DIRECTION_TO_DEVICE); |
237 | 59a703eb | aliguori | } |
238 | 8171ee35 | Paolo Bonzini | |
239 | 8171ee35 | Paolo Bonzini | |
240 | c65bcef3 | David Gibson | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
241 | c65bcef3 | David Gibson | DMADirection dir) |
242 | 8171ee35 | Paolo Bonzini | { |
243 | 8171ee35 | Paolo Bonzini | uint64_t resid; |
244 | 8171ee35 | Paolo Bonzini | int sg_cur_index;
|
245 | 8171ee35 | Paolo Bonzini | |
246 | 8171ee35 | Paolo Bonzini | resid = sg->size; |
247 | 8171ee35 | Paolo Bonzini | sg_cur_index = 0;
|
248 | 8171ee35 | Paolo Bonzini | len = MIN(len, resid); |
249 | 8171ee35 | Paolo Bonzini | while (len > 0) { |
250 | 8171ee35 | Paolo Bonzini | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
251 | 8171ee35 | Paolo Bonzini | int32_t xfer = MIN(len, entry.len); |
252 | df32fd1c | Paolo Bonzini | dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); |
253 | 8171ee35 | Paolo Bonzini | ptr += xfer; |
254 | 8171ee35 | Paolo Bonzini | len -= xfer; |
255 | 8171ee35 | Paolo Bonzini | resid -= xfer; |
256 | 8171ee35 | Paolo Bonzini | } |
257 | 8171ee35 | Paolo Bonzini | |
258 | 8171ee35 | Paolo Bonzini | return resid;
|
259 | 8171ee35 | Paolo Bonzini | } |
260 | 8171ee35 | Paolo Bonzini | |
261 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
262 | 8171ee35 | Paolo Bonzini | { |
263 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
264 | 8171ee35 | Paolo Bonzini | } |
265 | 8171ee35 | Paolo Bonzini | |
266 | 8171ee35 | Paolo Bonzini | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
267 | 8171ee35 | Paolo Bonzini | { |
268 | c65bcef3 | David Gibson | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
269 | 8171ee35 | Paolo Bonzini | } |
270 | 84a69356 | Paolo Bonzini | |
271 | 84a69356 | Paolo Bonzini | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
272 | 84a69356 | Paolo Bonzini | QEMUSGList *sg, enum BlockAcctType type)
|
273 | 84a69356 | Paolo Bonzini | { |
274 | 84a69356 | Paolo Bonzini | bdrv_acct_start(bs, cookie, sg->size, type); |
275 | 84a69356 | Paolo Bonzini | } |