root / hw / xilinx_axidma.c @ 83f7d43a
History | View | Annotate | Download (13.1 kB)
1 |
/*
|
---|---|
2 |
* QEMU model of Xilinx AXI-DMA block.
|
3 |
*
|
4 |
* Copyright (c) 2011 Edgar E. Iglesias.
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#include "sysbus.h" |
26 |
#include "qemu-char.h" |
27 |
#include "qemu-timer.h" |
28 |
#include "ptimer.h" |
29 |
#include "qemu-log.h" |
30 |
#include "qdev-addr.h" |
31 |
|
32 |
#include "xilinx_axidma.h" |
33 |
|
34 |
#define D(x)
|
35 |
|
36 |
#define R_DMACR (0x00 / 4) |
37 |
#define R_DMASR (0x04 / 4) |
38 |
#define R_CURDESC (0x08 / 4) |
39 |
#define R_TAILDESC (0x10 / 4) |
40 |
#define R_MAX (0x30 / 4) |
41 |
|
42 |
enum {
|
43 |
DMACR_RUNSTOP = 1,
|
44 |
DMACR_TAILPTR_MODE = 2,
|
45 |
DMACR_RESET = 4
|
46 |
}; |
47 |
|
48 |
enum {
|
49 |
DMASR_HALTED = 1,
|
50 |
DMASR_IDLE = 2,
|
51 |
DMASR_IOC_IRQ = 1 << 12, |
52 |
DMASR_DLY_IRQ = 1 << 13, |
53 |
|
54 |
DMASR_IRQ_MASK = 7 << 12 |
55 |
}; |
56 |
|
57 |
struct SDesc {
|
58 |
uint64_t nxtdesc; |
59 |
uint64_t buffer_address; |
60 |
uint64_t reserved; |
61 |
uint32_t control; |
62 |
uint32_t status; |
63 |
uint32_t app[6];
|
64 |
}; |
65 |
|
66 |
enum {
|
67 |
SDESC_CTRL_EOF = (1 << 26), |
68 |
SDESC_CTRL_SOF = (1 << 27), |
69 |
|
70 |
SDESC_CTRL_LEN_MASK = (1 << 23) - 1 |
71 |
}; |
72 |
|
73 |
enum {
|
74 |
SDESC_STATUS_EOF = (1 << 26), |
75 |
SDESC_STATUS_SOF_BIT = 27,
|
76 |
SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
|
77 |
SDESC_STATUS_COMPLETE = (1 << 31) |
78 |
}; |
79 |
|
80 |
struct AXIStream {
|
81 |
QEMUBH *bh; |
82 |
ptimer_state *ptimer; |
83 |
qemu_irq irq; |
84 |
|
85 |
int nr;
|
86 |
|
87 |
struct SDesc desc;
|
88 |
int pos;
|
89 |
unsigned int complete_cnt; |
90 |
uint32_t regs[R_MAX]; |
91 |
}; |
92 |
|
93 |
struct XilinxAXIDMA {
|
94 |
SysBusDevice busdev; |
95 |
MemoryRegion iomem; |
96 |
uint32_t freqhz; |
97 |
void *dmach;
|
98 |
|
99 |
struct AXIStream streams[2]; |
100 |
}; |
101 |
|
102 |
/*
|
103 |
* Helper calls to extract info from desriptors and other trivial
|
104 |
* state from regs.
|
105 |
*/
|
106 |
static inline int stream_desc_sof(struct SDesc *d) |
107 |
{ |
108 |
return d->control & SDESC_CTRL_SOF;
|
109 |
} |
110 |
|
111 |
static inline int stream_desc_eof(struct SDesc *d) |
112 |
{ |
113 |
return d->control & SDESC_CTRL_EOF;
|
114 |
} |
115 |
|
116 |
static inline int stream_resetting(struct AXIStream *s) |
117 |
{ |
118 |
return !!(s->regs[R_DMACR] & DMACR_RESET);
|
119 |
} |
120 |
|
121 |
static inline int stream_running(struct AXIStream *s) |
122 |
{ |
123 |
return s->regs[R_DMACR] & DMACR_RUNSTOP;
|
124 |
} |
125 |
|
126 |
static inline int stream_halted(struct AXIStream *s) |
127 |
{ |
128 |
return s->regs[R_DMASR] & DMASR_HALTED;
|
129 |
} |
130 |
|
131 |
static inline int stream_idle(struct AXIStream *s) |
132 |
{ |
133 |
return !!(s->regs[R_DMASR] & DMASR_IDLE);
|
134 |
} |
135 |
|
136 |
static void stream_reset(struct AXIStream *s) |
137 |
{ |
138 |
s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
|
139 |
s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */ |
140 |
} |
141 |
|
142 |
/* Map an offset addr into a channel index. */
|
143 |
static inline int streamid_from_addr(target_phys_addr_t addr) |
144 |
{ |
145 |
int sid;
|
146 |
|
147 |
sid = addr / (0x30);
|
148 |
sid &= 1;
|
149 |
return sid;
|
150 |
} |
151 |
|
152 |
#ifdef DEBUG_ENET
|
153 |
static void stream_desc_show(struct SDesc *d) |
154 |
{ |
155 |
qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address); |
156 |
qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc); |
157 |
qemu_log("control = %x\n", d->control);
|
158 |
qemu_log("status = %x\n", d->status);
|
159 |
} |
160 |
#endif
|
161 |
|
162 |
static void stream_desc_load(struct AXIStream *s, target_phys_addr_t addr) |
163 |
{ |
164 |
struct SDesc *d = &s->desc;
|
165 |
int i;
|
166 |
|
167 |
cpu_physical_memory_read(addr, (void *) d, sizeof *d); |
168 |
|
169 |
/* Convert from LE into host endianness. */
|
170 |
d->buffer_address = le64_to_cpu(d->buffer_address); |
171 |
d->nxtdesc = le64_to_cpu(d->nxtdesc); |
172 |
d->control = le32_to_cpu(d->control); |
173 |
d->status = le32_to_cpu(d->status); |
174 |
for (i = 0; i < ARRAY_SIZE(d->app); i++) { |
175 |
d->app[i] = le32_to_cpu(d->app[i]); |
176 |
} |
177 |
} |
178 |
|
179 |
static void stream_desc_store(struct AXIStream *s, target_phys_addr_t addr) |
180 |
{ |
181 |
struct SDesc *d = &s->desc;
|
182 |
int i;
|
183 |
|
184 |
/* Convert from host endianness into LE. */
|
185 |
d->buffer_address = cpu_to_le64(d->buffer_address); |
186 |
d->nxtdesc = cpu_to_le64(d->nxtdesc); |
187 |
d->control = cpu_to_le32(d->control); |
188 |
d->status = cpu_to_le32(d->status); |
189 |
for (i = 0; i < ARRAY_SIZE(d->app); i++) { |
190 |
d->app[i] = cpu_to_le32(d->app[i]); |
191 |
} |
192 |
cpu_physical_memory_write(addr, (void *) d, sizeof *d); |
193 |
} |
194 |
|
195 |
static void stream_update_irq(struct AXIStream *s) |
196 |
{ |
197 |
unsigned int pending, mask, irq; |
198 |
|
199 |
pending = s->regs[R_DMASR] & DMASR_IRQ_MASK; |
200 |
mask = s->regs[R_DMACR] & DMASR_IRQ_MASK; |
201 |
|
202 |
irq = pending & mask; |
203 |
|
204 |
qemu_set_irq(s->irq, !!irq); |
205 |
} |
206 |
|
207 |
static void stream_reload_complete_cnt(struct AXIStream *s) |
208 |
{ |
209 |
unsigned int comp_th; |
210 |
comp_th = (s->regs[R_DMACR] >> 16) & 0xff; |
211 |
s->complete_cnt = comp_th; |
212 |
} |
213 |
|
214 |
static void timer_hit(void *opaque) |
215 |
{ |
216 |
struct AXIStream *s = opaque;
|
217 |
|
218 |
stream_reload_complete_cnt(s); |
219 |
s->regs[R_DMASR] |= DMASR_DLY_IRQ; |
220 |
stream_update_irq(s); |
221 |
} |
222 |
|
223 |
static void stream_complete(struct AXIStream *s) |
224 |
{ |
225 |
unsigned int comp_delay; |
226 |
|
227 |
/* Start the delayed timer. */
|
228 |
comp_delay = s->regs[R_DMACR] >> 24;
|
229 |
if (comp_delay) {
|
230 |
ptimer_stop(s->ptimer); |
231 |
ptimer_set_count(s->ptimer, comp_delay); |
232 |
ptimer_run(s->ptimer, 1);
|
233 |
} |
234 |
|
235 |
s->complete_cnt--; |
236 |
if (s->complete_cnt == 0) { |
237 |
/* Raise the IOC irq. */
|
238 |
s->regs[R_DMASR] |= DMASR_IOC_IRQ; |
239 |
stream_reload_complete_cnt(s); |
240 |
} |
241 |
} |
242 |
|
243 |
static void stream_process_mem2s(struct AXIStream *s, |
244 |
struct XilinxDMAConnection *dmach)
|
245 |
{ |
246 |
uint32_t prev_d; |
247 |
unsigned char txbuf[16 * 1024]; |
248 |
unsigned int txlen; |
249 |
uint32_t app[6];
|
250 |
|
251 |
if (!stream_running(s) || stream_idle(s)) {
|
252 |
return;
|
253 |
} |
254 |
|
255 |
while (1) { |
256 |
stream_desc_load(s, s->regs[R_CURDESC]); |
257 |
|
258 |
if (s->desc.status & SDESC_STATUS_COMPLETE) {
|
259 |
s->regs[R_DMASR] |= DMASR_IDLE; |
260 |
break;
|
261 |
} |
262 |
|
263 |
if (stream_desc_sof(&s->desc)) {
|
264 |
s->pos = 0;
|
265 |
memcpy(app, s->desc.app, sizeof app);
|
266 |
} |
267 |
|
268 |
txlen = s->desc.control & SDESC_CTRL_LEN_MASK; |
269 |
if ((txlen + s->pos) > sizeof txbuf) { |
270 |
hw_error("%s: too small internal txbuf! %d\n", __func__,
|
271 |
txlen + s->pos); |
272 |
} |
273 |
|
274 |
cpu_physical_memory_read(s->desc.buffer_address, |
275 |
txbuf + s->pos, txlen); |
276 |
s->pos += txlen; |
277 |
|
278 |
if (stream_desc_eof(&s->desc)) {
|
279 |
xlx_dma_push_to_client(dmach, txbuf, s->pos, app); |
280 |
s->pos = 0;
|
281 |
stream_complete(s); |
282 |
} |
283 |
|
284 |
/* Update the descriptor. */
|
285 |
s->desc.status = txlen | SDESC_STATUS_COMPLETE; |
286 |
stream_desc_store(s, s->regs[R_CURDESC]); |
287 |
|
288 |
/* Advance. */
|
289 |
prev_d = s->regs[R_CURDESC]; |
290 |
s->regs[R_CURDESC] = s->desc.nxtdesc; |
291 |
if (prev_d == s->regs[R_TAILDESC]) {
|
292 |
s->regs[R_DMASR] |= DMASR_IDLE; |
293 |
break;
|
294 |
} |
295 |
} |
296 |
} |
297 |
|
298 |
static void stream_process_s2mem(struct AXIStream *s, |
299 |
unsigned char *buf, size_t len, uint32_t *app) |
300 |
{ |
301 |
uint32_t prev_d; |
302 |
unsigned int rxlen; |
303 |
int pos = 0; |
304 |
int sof = 1; |
305 |
|
306 |
if (!stream_running(s) || stream_idle(s)) {
|
307 |
return;
|
308 |
} |
309 |
|
310 |
while (len) {
|
311 |
stream_desc_load(s, s->regs[R_CURDESC]); |
312 |
|
313 |
if (s->desc.status & SDESC_STATUS_COMPLETE) {
|
314 |
s->regs[R_DMASR] |= DMASR_IDLE; |
315 |
break;
|
316 |
} |
317 |
|
318 |
rxlen = s->desc.control & SDESC_CTRL_LEN_MASK; |
319 |
if (rxlen > len) {
|
320 |
/* It fits. */
|
321 |
rxlen = len; |
322 |
} |
323 |
|
324 |
cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen); |
325 |
len -= rxlen; |
326 |
pos += rxlen; |
327 |
|
328 |
/* Update the descriptor. */
|
329 |
if (!len) {
|
330 |
int i;
|
331 |
|
332 |
stream_complete(s); |
333 |
for (i = 0; i < 5; i++) { |
334 |
s->desc.app[i] = app[i]; |
335 |
} |
336 |
s->desc.status |= SDESC_STATUS_EOF; |
337 |
} |
338 |
|
339 |
s->desc.status |= sof << SDESC_STATUS_SOF_BIT; |
340 |
s->desc.status |= SDESC_STATUS_COMPLETE; |
341 |
stream_desc_store(s, s->regs[R_CURDESC]); |
342 |
sof = 0;
|
343 |
|
344 |
/* Advance. */
|
345 |
prev_d = s->regs[R_CURDESC]; |
346 |
s->regs[R_CURDESC] = s->desc.nxtdesc; |
347 |
if (prev_d == s->regs[R_TAILDESC]) {
|
348 |
s->regs[R_DMASR] |= DMASR_IDLE; |
349 |
break;
|
350 |
} |
351 |
} |
352 |
} |
353 |
|
354 |
static
|
355 |
void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app) |
356 |
{ |
357 |
struct XilinxAXIDMA *d = opaque;
|
358 |
struct AXIStream *s = &d->streams[1]; |
359 |
|
360 |
if (!app) {
|
361 |
hw_error("No stream app data!\n");
|
362 |
} |
363 |
stream_process_s2mem(s, buf, len, app); |
364 |
stream_update_irq(s); |
365 |
} |
366 |
|
367 |
static uint64_t axidma_read(void *opaque, target_phys_addr_t addr, |
368 |
unsigned size)
|
369 |
{ |
370 |
struct XilinxAXIDMA *d = opaque;
|
371 |
struct AXIStream *s;
|
372 |
uint32_t r = 0;
|
373 |
int sid;
|
374 |
|
375 |
sid = streamid_from_addr(addr); |
376 |
s = &d->streams[sid]; |
377 |
|
378 |
addr = addr % 0x30;
|
379 |
addr >>= 2;
|
380 |
switch (addr) {
|
381 |
case R_DMACR:
|
382 |
/* Simulate one cycles reset delay. */
|
383 |
s->regs[addr] &= ~DMACR_RESET; |
384 |
r = s->regs[addr]; |
385 |
break;
|
386 |
case R_DMASR:
|
387 |
s->regs[addr] &= 0xffff;
|
388 |
s->regs[addr] |= (s->complete_cnt & 0xff) << 16; |
389 |
s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24; |
390 |
r = s->regs[addr]; |
391 |
break;
|
392 |
default:
|
393 |
r = s->regs[addr]; |
394 |
D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n", |
395 |
__func__, sid, addr * 4, r));
|
396 |
break;
|
397 |
} |
398 |
return r;
|
399 |
|
400 |
} |
401 |
|
402 |
static void axidma_write(void *opaque, target_phys_addr_t addr, |
403 |
uint64_t value, unsigned size)
|
404 |
{ |
405 |
struct XilinxAXIDMA *d = opaque;
|
406 |
struct AXIStream *s;
|
407 |
int sid;
|
408 |
|
409 |
sid = streamid_from_addr(addr); |
410 |
s = &d->streams[sid]; |
411 |
|
412 |
addr = addr % 0x30;
|
413 |
addr >>= 2;
|
414 |
switch (addr) {
|
415 |
case R_DMACR:
|
416 |
/* Tailptr mode is always on. */
|
417 |
value |= DMACR_TAILPTR_MODE; |
418 |
/* Remember our previous reset state. */
|
419 |
value |= (s->regs[addr] & DMACR_RESET); |
420 |
s->regs[addr] = value; |
421 |
|
422 |
if (value & DMACR_RESET) {
|
423 |
stream_reset(s); |
424 |
} |
425 |
|
426 |
if ((value & 1) && !stream_resetting(s)) { |
427 |
/* Start processing. */
|
428 |
s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE); |
429 |
} |
430 |
stream_reload_complete_cnt(s); |
431 |
break;
|
432 |
|
433 |
case R_DMASR:
|
434 |
/* Mask away write to clear irq lines. */
|
435 |
value &= ~(value & DMASR_IRQ_MASK); |
436 |
s->regs[addr] = value; |
437 |
break;
|
438 |
|
439 |
case R_TAILDESC:
|
440 |
s->regs[addr] = value; |
441 |
s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
|
442 |
if (!sid) {
|
443 |
stream_process_mem2s(s, d->dmach); |
444 |
} |
445 |
break;
|
446 |
default:
|
447 |
D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n", |
448 |
__func__, sid, addr * 4, value));
|
449 |
s->regs[addr] = value; |
450 |
break;
|
451 |
} |
452 |
stream_update_irq(s); |
453 |
} |
454 |
|
455 |
static const MemoryRegionOps axidma_ops = { |
456 |
.read = axidma_read, |
457 |
.write = axidma_write, |
458 |
.endianness = DEVICE_NATIVE_ENDIAN, |
459 |
}; |
460 |
|
461 |
static int xilinx_axidma_init(SysBusDevice *dev) |
462 |
{ |
463 |
struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
|
464 |
int i;
|
465 |
|
466 |
sysbus_init_irq(dev, &s->streams[1].irq);
|
467 |
sysbus_init_irq(dev, &s->streams[0].irq);
|
468 |
|
469 |
if (!s->dmach) {
|
470 |
hw_error("Unconnected DMA channel.\n");
|
471 |
} |
472 |
|
473 |
xlx_dma_connect_dma(s->dmach, s, axidma_push); |
474 |
|
475 |
memory_region_init_io(&s->iomem, &axidma_ops, s, |
476 |
"axidma", R_MAX * 4 * 2); |
477 |
sysbus_init_mmio(dev, &s->iomem); |
478 |
|
479 |
for (i = 0; i < 2; i++) { |
480 |
stream_reset(&s->streams[i]); |
481 |
s->streams[i].nr = i; |
482 |
s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]); |
483 |
s->streams[i].ptimer = ptimer_init(s->streams[i].bh); |
484 |
ptimer_set_freq(s->streams[i].ptimer, s->freqhz); |
485 |
} |
486 |
return 0; |
487 |
} |
488 |
|
489 |
static Property axidma_properties[] = {
|
490 |
DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000), |
491 |
DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA, dmach), |
492 |
DEFINE_PROP_END_OF_LIST(), |
493 |
}; |
494 |
|
495 |
static void axidma_class_init(ObjectClass *klass, void *data) |
496 |
{ |
497 |
DeviceClass *dc = DEVICE_CLASS(klass); |
498 |
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); |
499 |
|
500 |
k->init = xilinx_axidma_init; |
501 |
dc->props = axidma_properties; |
502 |
} |
503 |
|
504 |
static TypeInfo axidma_info = {
|
505 |
.name = "xilinx,axidma",
|
506 |
.parent = TYPE_SYS_BUS_DEVICE, |
507 |
.instance_size = sizeof(struct XilinxAXIDMA), |
508 |
.class_init = axidma_class_init, |
509 |
}; |
510 |
|
511 |
static void xilinx_axidma_register_types(void) |
512 |
{ |
513 |
type_register_static(&axidma_info); |
514 |
} |
515 |
|
516 |
type_init(xilinx_axidma_register_types) |