root / dma-helpers.c @ c65bcef3
History | View | Annotate | Download (6.7 kB)
1 |
/*
|
---|---|
2 |
* DMA helper functions
|
3 |
*
|
4 |
* Copyright (c) 2009 Red Hat
|
5 |
*
|
6 |
* This work is licensed under the terms of the GNU General Public License
|
7 |
* (GNU GPL), version 2 or later.
|
8 |
*/
|
9 |
|
10 |
#include "dma.h" |
11 |
#include "trace.h" |
12 |
|
13 |
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
14 |
{ |
15 |
#define FILLBUF_SIZE 512 |
16 |
uint8_t fillbuf[FILLBUF_SIZE]; |
17 |
int l;
|
18 |
|
19 |
memset(fillbuf, c, FILLBUF_SIZE); |
20 |
while (len > 0) { |
21 |
l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; |
22 |
cpu_physical_memory_rw(addr, fillbuf, l, true);
|
23 |
len -= len; |
24 |
addr += len; |
25 |
} |
26 |
return 0; |
27 |
} |
28 |
|
29 |
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) |
30 |
{ |
31 |
qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
|
32 |
qsg->nsg = 0;
|
33 |
qsg->nalloc = alloc_hint; |
34 |
qsg->size = 0;
|
35 |
qsg->dma = dma; |
36 |
} |
37 |
|
38 |
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
39 |
{ |
40 |
if (qsg->nsg == qsg->nalloc) {
|
41 |
qsg->nalloc = 2 * qsg->nalloc + 1; |
42 |
qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
|
43 |
} |
44 |
qsg->sg[qsg->nsg].base = base; |
45 |
qsg->sg[qsg->nsg].len = len; |
46 |
qsg->size += len; |
47 |
++qsg->nsg; |
48 |
} |
49 |
|
50 |
void qemu_sglist_destroy(QEMUSGList *qsg)
|
51 |
{ |
52 |
g_free(qsg->sg); |
53 |
} |
54 |
|
55 |
typedef struct { |
56 |
BlockDriverAIOCB common; |
57 |
BlockDriverState *bs; |
58 |
BlockDriverAIOCB *acb; |
59 |
QEMUSGList *sg; |
60 |
uint64_t sector_num; |
61 |
DMADirection dir; |
62 |
bool in_cancel;
|
63 |
int sg_cur_index;
|
64 |
dma_addr_t sg_cur_byte; |
65 |
QEMUIOVector iov; |
66 |
QEMUBH *bh; |
67 |
DMAIOFunc *io_func; |
68 |
} DMAAIOCB; |
69 |
|
70 |
static void dma_bdrv_cb(void *opaque, int ret); |
71 |
|
72 |
static void reschedule_dma(void *opaque) |
73 |
{ |
74 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
75 |
|
76 |
qemu_bh_delete(dbs->bh); |
77 |
dbs->bh = NULL;
|
78 |
dma_bdrv_cb(dbs, 0);
|
79 |
} |
80 |
|
81 |
static void continue_after_map_failure(void *opaque) |
82 |
{ |
83 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
84 |
|
85 |
dbs->bh = qemu_bh_new(reschedule_dma, dbs); |
86 |
qemu_bh_schedule(dbs->bh); |
87 |
} |
88 |
|
89 |
static void dma_bdrv_unmap(DMAAIOCB *dbs) |
90 |
{ |
91 |
int i;
|
92 |
|
93 |
for (i = 0; i < dbs->iov.niov; ++i) { |
94 |
dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, |
95 |
dbs->iov.iov[i].iov_len, dbs->dir, |
96 |
dbs->iov.iov[i].iov_len); |
97 |
} |
98 |
qemu_iovec_reset(&dbs->iov); |
99 |
} |
100 |
|
101 |
static void dma_complete(DMAAIOCB *dbs, int ret) |
102 |
{ |
103 |
trace_dma_complete(dbs, ret, dbs->common.cb); |
104 |
|
105 |
dma_bdrv_unmap(dbs); |
106 |
if (dbs->common.cb) {
|
107 |
dbs->common.cb(dbs->common.opaque, ret); |
108 |
} |
109 |
qemu_iovec_destroy(&dbs->iov); |
110 |
if (dbs->bh) {
|
111 |
qemu_bh_delete(dbs->bh); |
112 |
dbs->bh = NULL;
|
113 |
} |
114 |
if (!dbs->in_cancel) {
|
115 |
/* Requests may complete while dma_aio_cancel is in progress. In
|
116 |
* this case, the AIOCB should not be released because it is still
|
117 |
* referenced by dma_aio_cancel. */
|
118 |
qemu_aio_release(dbs); |
119 |
} |
120 |
} |
121 |
|
122 |
static void dma_bdrv_cb(void *opaque, int ret) |
123 |
{ |
124 |
DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
125 |
dma_addr_t cur_addr, cur_len; |
126 |
void *mem;
|
127 |
|
128 |
trace_dma_bdrv_cb(dbs, ret); |
129 |
|
130 |
dbs->acb = NULL;
|
131 |
dbs->sector_num += dbs->iov.size / 512;
|
132 |
dma_bdrv_unmap(dbs); |
133 |
|
134 |
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { |
135 |
dma_complete(dbs, ret); |
136 |
return;
|
137 |
} |
138 |
|
139 |
while (dbs->sg_cur_index < dbs->sg->nsg) {
|
140 |
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; |
141 |
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; |
142 |
mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); |
143 |
if (!mem)
|
144 |
break;
|
145 |
qemu_iovec_add(&dbs->iov, mem, cur_len); |
146 |
dbs->sg_cur_byte += cur_len; |
147 |
if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
148 |
dbs->sg_cur_byte = 0;
|
149 |
++dbs->sg_cur_index; |
150 |
} |
151 |
} |
152 |
|
153 |
if (dbs->iov.size == 0) { |
154 |
trace_dma_map_wait(dbs); |
155 |
cpu_register_map_client(dbs, continue_after_map_failure); |
156 |
return;
|
157 |
} |
158 |
|
159 |
dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, |
160 |
dbs->iov.size / 512, dma_bdrv_cb, dbs);
|
161 |
assert(dbs->acb); |
162 |
} |
163 |
|
164 |
static void dma_aio_cancel(BlockDriverAIOCB *acb) |
165 |
{ |
166 |
DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); |
167 |
|
168 |
trace_dma_aio_cancel(dbs); |
169 |
|
170 |
if (dbs->acb) {
|
171 |
BlockDriverAIOCB *acb = dbs->acb; |
172 |
dbs->acb = NULL;
|
173 |
dbs->in_cancel = true;
|
174 |
bdrv_aio_cancel(acb); |
175 |
dbs->in_cancel = false;
|
176 |
} |
177 |
dbs->common.cb = NULL;
|
178 |
dma_complete(dbs, 0);
|
179 |
} |
180 |
|
181 |
static AIOPool dma_aio_pool = {
|
182 |
.aiocb_size = sizeof(DMAAIOCB),
|
183 |
.cancel = dma_aio_cancel, |
184 |
}; |
185 |
|
186 |
BlockDriverAIOCB *dma_bdrv_io( |
187 |
BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, |
188 |
DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, |
189 |
void *opaque, DMADirection dir)
|
190 |
{ |
191 |
DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); |
192 |
|
193 |
trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
194 |
|
195 |
dbs->acb = NULL;
|
196 |
dbs->bs = bs; |
197 |
dbs->sg = sg; |
198 |
dbs->sector_num = sector_num; |
199 |
dbs->sg_cur_index = 0;
|
200 |
dbs->sg_cur_byte = 0;
|
201 |
dbs->dir = dir; |
202 |
dbs->io_func = io_func; |
203 |
dbs->bh = NULL;
|
204 |
qemu_iovec_init(&dbs->iov, sg->nsg); |
205 |
dma_bdrv_cb(dbs, 0);
|
206 |
return &dbs->common;
|
207 |
} |
208 |
|
209 |
|
210 |
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
211 |
QEMUSGList *sg, uint64_t sector, |
212 |
void (*cb)(void *opaque, int ret), void *opaque) |
213 |
{ |
214 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
|
215 |
DMA_DIRECTION_FROM_DEVICE); |
216 |
} |
217 |
|
218 |
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, |
219 |
QEMUSGList *sg, uint64_t sector, |
220 |
void (*cb)(void *opaque, int ret), void *opaque) |
221 |
{ |
222 |
return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
|
223 |
DMA_DIRECTION_TO_DEVICE); |
224 |
} |
225 |
|
226 |
|
227 |
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
|
228 |
DMADirection dir) |
229 |
{ |
230 |
uint64_t resid; |
231 |
int sg_cur_index;
|
232 |
|
233 |
resid = sg->size; |
234 |
sg_cur_index = 0;
|
235 |
len = MIN(len, resid); |
236 |
while (len > 0) { |
237 |
ScatterGatherEntry entry = sg->sg[sg_cur_index++]; |
238 |
int32_t xfer = MIN(len, entry.len); |
239 |
dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); |
240 |
ptr += xfer; |
241 |
len -= xfer; |
242 |
resid -= xfer; |
243 |
} |
244 |
|
245 |
return resid;
|
246 |
} |
247 |
|
248 |
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
249 |
{ |
250 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
|
251 |
} |
252 |
|
253 |
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) |
254 |
{ |
255 |
return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
|
256 |
} |
257 |
|
258 |
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
259 |
QEMUSGList *sg, enum BlockAcctType type)
|
260 |
{ |
261 |
bdrv_acct_start(bs, cookie, sg->size, type); |
262 |
} |