root / hw / pxa2xx_dma.c @ 0d09e41a
History | View | Annotate | Download (16.1 kB)
1 |
/*
|
---|---|
2 |
* Intel XScale PXA255/270 DMA controller.
|
3 |
*
|
4 |
* Copyright (c) 2006 Openedhand Ltd.
|
5 |
* Copyright (c) 2006 Thorsten Zitterell
|
6 |
* Written by Andrzej Zaborowski <balrog@zabor.org>
|
7 |
*
|
8 |
* This code is licensed under the GPL.
|
9 |
*/
|
10 |
|
11 |
#include "hw/hw.h" |
12 |
#include "hw/arm/pxa.h" |
13 |
#include "hw/sysbus.h" |
14 |
|
15 |
#define PXA255_DMA_NUM_CHANNELS 16 |
16 |
#define PXA27X_DMA_NUM_CHANNELS 32 |
17 |
|
18 |
#define PXA2XX_DMA_NUM_REQUESTS 75 |
19 |
|
20 |
typedef struct { |
21 |
uint32_t descr; |
22 |
uint32_t src; |
23 |
uint32_t dest; |
24 |
uint32_t cmd; |
25 |
uint32_t state; |
26 |
int request;
|
27 |
} PXA2xxDMAChannel; |
28 |
|
29 |
typedef struct PXA2xxDMAState { |
30 |
SysBusDevice busdev; |
31 |
MemoryRegion iomem; |
32 |
qemu_irq irq; |
33 |
|
34 |
uint32_t stopintr; |
35 |
uint32_t eorintr; |
36 |
uint32_t rasintr; |
37 |
uint32_t startintr; |
38 |
uint32_t endintr; |
39 |
|
40 |
uint32_t align; |
41 |
uint32_t pio; |
42 |
|
43 |
int channels;
|
44 |
PXA2xxDMAChannel *chan; |
45 |
|
46 |
uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; |
47 |
|
48 |
/* Flag to avoid recursive DMA invocations. */
|
49 |
int running;
|
50 |
} PXA2xxDMAState; |
51 |
|
52 |
#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ |
53 |
#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ |
54 |
#define DALGN 0x00a0 /* DMA Alignment register */ |
55 |
#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ |
56 |
#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ |
57 |
#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ |
58 |
#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ |
59 |
#define DINT 0x00f0 /* DMA Interrupt register */ |
60 |
#define DRCMR0 0x0100 /* Request to Channel Map register 0 */ |
61 |
#define DRCMR63 0x01fc /* Request to Channel Map register 63 */ |
62 |
#define D_CH0 0x0200 /* Channel 0 Descriptor start */ |
63 |
#define DRCMR64 0x1100 /* Request to Channel Map register 64 */ |
64 |
#define DRCMR74 0x1128 /* Request to Channel Map register 74 */ |
65 |
|
66 |
/* Per-channel register */
|
67 |
#define DDADR 0x00 |
68 |
#define DSADR 0x01 |
69 |
#define DTADR 0x02 |
70 |
#define DCMD 0x03 |
71 |
|
72 |
/* Bit-field masks */
|
73 |
#define DRCMR_CHLNUM 0x1f |
74 |
#define DRCMR_MAPVLD (1 << 7) |
75 |
#define DDADR_STOP (1 << 0) |
76 |
#define DDADR_BREN (1 << 1) |
77 |
#define DCMD_LEN 0x1fff |
78 |
#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) |
79 |
#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) |
80 |
#define DCMD_FLYBYT (1 << 19) |
81 |
#define DCMD_FLYBYS (1 << 20) |
82 |
#define DCMD_ENDIRQEN (1 << 21) |
83 |
#define DCMD_STARTIRQEN (1 << 22) |
84 |
#define DCMD_CMPEN (1 << 25) |
85 |
#define DCMD_FLOWTRG (1 << 28) |
86 |
#define DCMD_FLOWSRC (1 << 29) |
87 |
#define DCMD_INCTRGADDR (1 << 30) |
88 |
#define DCMD_INCSRCADDR (1 << 31) |
89 |
#define DCSR_BUSERRINTR (1 << 0) |
90 |
#define DCSR_STARTINTR (1 << 1) |
91 |
#define DCSR_ENDINTR (1 << 2) |
92 |
#define DCSR_STOPINTR (1 << 3) |
93 |
#define DCSR_RASINTR (1 << 4) |
94 |
#define DCSR_REQPEND (1 << 8) |
95 |
#define DCSR_EORINT (1 << 9) |
96 |
#define DCSR_CMPST (1 << 10) |
97 |
#define DCSR_MASKRUN (1 << 22) |
98 |
#define DCSR_RASIRQEN (1 << 23) |
99 |
#define DCSR_CLRCMPST (1 << 24) |
100 |
#define DCSR_SETCMPST (1 << 25) |
101 |
#define DCSR_EORSTOPEN (1 << 26) |
102 |
#define DCSR_EORJMPEN (1 << 27) |
103 |
#define DCSR_EORIRQEN (1 << 28) |
104 |
#define DCSR_STOPIRQEN (1 << 29) |
105 |
#define DCSR_NODESCFETCH (1 << 30) |
106 |
#define DCSR_RUN (1 << 31) |
107 |
|
108 |
static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) |
109 |
{ |
110 |
if (ch >= 0) { |
111 |
if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
|
112 |
(s->chan[ch].state & DCSR_STOPINTR)) |
113 |
s->stopintr |= 1 << ch;
|
114 |
else
|
115 |
s->stopintr &= ~(1 << ch);
|
116 |
|
117 |
if ((s->chan[ch].state & DCSR_EORIRQEN) &&
|
118 |
(s->chan[ch].state & DCSR_EORINT)) |
119 |
s->eorintr |= 1 << ch;
|
120 |
else
|
121 |
s->eorintr &= ~(1 << ch);
|
122 |
|
123 |
if ((s->chan[ch].state & DCSR_RASIRQEN) &&
|
124 |
(s->chan[ch].state & DCSR_RASINTR)) |
125 |
s->rasintr |= 1 << ch;
|
126 |
else
|
127 |
s->rasintr &= ~(1 << ch);
|
128 |
|
129 |
if (s->chan[ch].state & DCSR_STARTINTR)
|
130 |
s->startintr |= 1 << ch;
|
131 |
else
|
132 |
s->startintr &= ~(1 << ch);
|
133 |
|
134 |
if (s->chan[ch].state & DCSR_ENDINTR)
|
135 |
s->endintr |= 1 << ch;
|
136 |
else
|
137 |
s->endintr &= ~(1 << ch);
|
138 |
} |
139 |
|
140 |
if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
|
141 |
qemu_irq_raise(s->irq); |
142 |
else
|
143 |
qemu_irq_lower(s->irq); |
144 |
} |
145 |
|
146 |
static inline void pxa2xx_dma_descriptor_fetch( |
147 |
PXA2xxDMAState *s, int ch)
|
148 |
{ |
149 |
uint32_t desc[4];
|
150 |
hwaddr daddr = s->chan[ch].descr & ~0xf;
|
151 |
if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
|
152 |
daddr += 32;
|
153 |
|
154 |
cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
|
155 |
s->chan[ch].descr = desc[DDADR]; |
156 |
s->chan[ch].src = desc[DSADR]; |
157 |
s->chan[ch].dest = desc[DTADR]; |
158 |
s->chan[ch].cmd = desc[DCMD]; |
159 |
|
160 |
if (s->chan[ch].cmd & DCMD_FLOWSRC)
|
161 |
s->chan[ch].src &= ~3;
|
162 |
if (s->chan[ch].cmd & DCMD_FLOWTRG)
|
163 |
s->chan[ch].dest &= ~3;
|
164 |
|
165 |
if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
|
166 |
printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
|
167 |
|
168 |
if (s->chan[ch].cmd & DCMD_STARTIRQEN)
|
169 |
s->chan[ch].state |= DCSR_STARTINTR; |
170 |
} |
171 |
|
172 |
static void pxa2xx_dma_run(PXA2xxDMAState *s) |
173 |
{ |
174 |
int c, srcinc, destinc;
|
175 |
uint32_t n, size; |
176 |
uint32_t width; |
177 |
uint32_t length; |
178 |
uint8_t buffer[32];
|
179 |
PXA2xxDMAChannel *ch; |
180 |
|
181 |
if (s->running ++)
|
182 |
return;
|
183 |
|
184 |
while (s->running) {
|
185 |
s->running = 1;
|
186 |
for (c = 0; c < s->channels; c ++) { |
187 |
ch = &s->chan[c]; |
188 |
|
189 |
while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
|
190 |
/* Test for pending requests */
|
191 |
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
|
192 |
break;
|
193 |
|
194 |
length = ch->cmd & DCMD_LEN; |
195 |
size = DCMD_SIZE(ch->cmd); |
196 |
width = DCMD_WIDTH(ch->cmd); |
197 |
|
198 |
srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
|
199 |
destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
|
200 |
|
201 |
while (length) {
|
202 |
size = MIN(length, size); |
203 |
|
204 |
for (n = 0; n < size; n += width) { |
205 |
cpu_physical_memory_read(ch->src, buffer + n, width); |
206 |
ch->src += srcinc; |
207 |
} |
208 |
|
209 |
for (n = 0; n < size; n += width) { |
210 |
cpu_physical_memory_write(ch->dest, buffer + n, width); |
211 |
ch->dest += destinc; |
212 |
} |
213 |
|
214 |
length -= size; |
215 |
|
216 |
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
|
217 |
!ch->request) { |
218 |
ch->state |= DCSR_EORINT; |
219 |
if (ch->state & DCSR_EORSTOPEN)
|
220 |
ch->state |= DCSR_STOPINTR; |
221 |
if ((ch->state & DCSR_EORJMPEN) &&
|
222 |
!(ch->state & DCSR_NODESCFETCH)) |
223 |
pxa2xx_dma_descriptor_fetch(s, c); |
224 |
break;
|
225 |
} |
226 |
} |
227 |
|
228 |
ch->cmd = (ch->cmd & ~DCMD_LEN) | length; |
229 |
|
230 |
/* Is the transfer complete now? */
|
231 |
if (!length) {
|
232 |
if (ch->cmd & DCMD_ENDIRQEN)
|
233 |
ch->state |= DCSR_ENDINTR; |
234 |
|
235 |
if ((ch->state & DCSR_NODESCFETCH) ||
|
236 |
(ch->descr & DDADR_STOP) || |
237 |
(ch->state & DCSR_EORSTOPEN)) { |
238 |
ch->state |= DCSR_STOPINTR; |
239 |
ch->state &= ~DCSR_RUN; |
240 |
|
241 |
break;
|
242 |
} |
243 |
|
244 |
ch->state |= DCSR_STOPINTR; |
245 |
break;
|
246 |
} |
247 |
} |
248 |
} |
249 |
|
250 |
s->running --; |
251 |
} |
252 |
} |
253 |
|
254 |
static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, |
255 |
unsigned size)
|
256 |
{ |
257 |
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; |
258 |
unsigned int channel; |
259 |
|
260 |
if (size != 4) { |
261 |
hw_error("%s: Bad access width\n", __FUNCTION__);
|
262 |
return 5; |
263 |
} |
264 |
|
265 |
switch (offset) {
|
266 |
case DRCMR64 ... DRCMR74:
|
267 |
offset -= DRCMR64 - DRCMR0 - (64 << 2); |
268 |
/* Fall through */
|
269 |
case DRCMR0 ... DRCMR63:
|
270 |
channel = (offset - DRCMR0) >> 2;
|
271 |
return s->req[channel];
|
272 |
|
273 |
case DRQSR0:
|
274 |
case DRQSR1:
|
275 |
case DRQSR2:
|
276 |
return 0; |
277 |
|
278 |
case DCSR0 ... DCSR31:
|
279 |
channel = offset >> 2;
|
280 |
if (s->chan[channel].request)
|
281 |
return s->chan[channel].state | DCSR_REQPEND;
|
282 |
return s->chan[channel].state;
|
283 |
|
284 |
case DINT:
|
285 |
return s->stopintr | s->eorintr | s->rasintr |
|
286 |
s->startintr | s->endintr; |
287 |
|
288 |
case DALGN:
|
289 |
return s->align;
|
290 |
|
291 |
case DPCSR:
|
292 |
return s->pio;
|
293 |
} |
294 |
|
295 |
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { |
296 |
channel = (offset - D_CH0) >> 4;
|
297 |
switch ((offset & 0x0f) >> 2) { |
298 |
case DDADR:
|
299 |
return s->chan[channel].descr;
|
300 |
case DSADR:
|
301 |
return s->chan[channel].src;
|
302 |
case DTADR:
|
303 |
return s->chan[channel].dest;
|
304 |
case DCMD:
|
305 |
return s->chan[channel].cmd;
|
306 |
} |
307 |
} |
308 |
|
309 |
hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset); |
310 |
return 7; |
311 |
} |
312 |
|
313 |
static void pxa2xx_dma_write(void *opaque, hwaddr offset, |
314 |
uint64_t value, unsigned size)
|
315 |
{ |
316 |
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; |
317 |
unsigned int channel; |
318 |
|
319 |
if (size != 4) { |
320 |
hw_error("%s: Bad access width\n", __FUNCTION__);
|
321 |
return;
|
322 |
} |
323 |
|
324 |
switch (offset) {
|
325 |
case DRCMR64 ... DRCMR74:
|
326 |
offset -= DRCMR64 - DRCMR0 - (64 << 2); |
327 |
/* Fall through */
|
328 |
case DRCMR0 ... DRCMR63:
|
329 |
channel = (offset - DRCMR0) >> 2;
|
330 |
|
331 |
if (value & DRCMR_MAPVLD)
|
332 |
if ((value & DRCMR_CHLNUM) > s->channels)
|
333 |
hw_error("%s: Bad DMA channel %i\n",
|
334 |
__FUNCTION__, (unsigned)value & DRCMR_CHLNUM);
|
335 |
|
336 |
s->req[channel] = value; |
337 |
break;
|
338 |
|
339 |
case DRQSR0:
|
340 |
case DRQSR1:
|
341 |
case DRQSR2:
|
342 |
/* Nothing to do */
|
343 |
break;
|
344 |
|
345 |
case DCSR0 ... DCSR31:
|
346 |
channel = offset >> 2;
|
347 |
s->chan[channel].state &= 0x0000071f & ~(value &
|
348 |
(DCSR_EORINT | DCSR_ENDINTR | |
349 |
DCSR_STARTINTR | DCSR_BUSERRINTR)); |
350 |
s->chan[channel].state |= value & 0xfc800000;
|
351 |
|
352 |
if (s->chan[channel].state & DCSR_STOPIRQEN)
|
353 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
354 |
|
355 |
if (value & DCSR_NODESCFETCH) {
|
356 |
/* No-descriptor-fetch mode */
|
357 |
if (value & DCSR_RUN) {
|
358 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
359 |
pxa2xx_dma_run(s); |
360 |
} |
361 |
} else {
|
362 |
/* Descriptor-fetch mode */
|
363 |
if (value & DCSR_RUN) {
|
364 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
365 |
pxa2xx_dma_descriptor_fetch(s, channel); |
366 |
pxa2xx_dma_run(s); |
367 |
} |
368 |
} |
369 |
|
370 |
/* Shouldn't matter as our DMA is synchronous. */
|
371 |
if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
|
372 |
s->chan[channel].state |= DCSR_STOPINTR; |
373 |
|
374 |
if (value & DCSR_CLRCMPST)
|
375 |
s->chan[channel].state &= ~DCSR_CMPST; |
376 |
if (value & DCSR_SETCMPST)
|
377 |
s->chan[channel].state |= DCSR_CMPST; |
378 |
|
379 |
pxa2xx_dma_update(s, channel); |
380 |
break;
|
381 |
|
382 |
case DALGN:
|
383 |
s->align = value; |
384 |
break;
|
385 |
|
386 |
case DPCSR:
|
387 |
s->pio = value & 0x80000001;
|
388 |
break;
|
389 |
|
390 |
default:
|
391 |
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { |
392 |
channel = (offset - D_CH0) >> 4;
|
393 |
switch ((offset & 0x0f) >> 2) { |
394 |
case DDADR:
|
395 |
s->chan[channel].descr = value; |
396 |
break;
|
397 |
case DSADR:
|
398 |
s->chan[channel].src = value; |
399 |
break;
|
400 |
case DTADR:
|
401 |
s->chan[channel].dest = value; |
402 |
break;
|
403 |
case DCMD:
|
404 |
s->chan[channel].cmd = value; |
405 |
break;
|
406 |
default:
|
407 |
goto fail;
|
408 |
} |
409 |
|
410 |
break;
|
411 |
} |
412 |
fail:
|
413 |
hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset); |
414 |
} |
415 |
} |
416 |
|
417 |
static const MemoryRegionOps pxa2xx_dma_ops = { |
418 |
.read = pxa2xx_dma_read, |
419 |
.write = pxa2xx_dma_write, |
420 |
.endianness = DEVICE_NATIVE_ENDIAN, |
421 |
}; |
422 |
|
423 |
static void pxa2xx_dma_request(void *opaque, int req_num, int on) |
424 |
{ |
425 |
PXA2xxDMAState *s = opaque; |
426 |
int ch;
|
427 |
if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) |
428 |
hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
|
429 |
|
430 |
if (!(s->req[req_num] & DRCMR_MAPVLD))
|
431 |
return;
|
432 |
ch = s->req[req_num] & DRCMR_CHLNUM; |
433 |
|
434 |
if (!s->chan[ch].request && on)
|
435 |
s->chan[ch].state |= DCSR_RASINTR; |
436 |
else
|
437 |
s->chan[ch].state &= ~DCSR_RASINTR; |
438 |
if (s->chan[ch].request && !on)
|
439 |
s->chan[ch].state |= DCSR_EORINT; |
440 |
|
441 |
s->chan[ch].request = on; |
442 |
if (on) {
|
443 |
pxa2xx_dma_run(s); |
444 |
pxa2xx_dma_update(s, ch); |
445 |
} |
446 |
} |
447 |
|
448 |
static int pxa2xx_dma_init(SysBusDevice *dev) |
449 |
{ |
450 |
int i;
|
451 |
PXA2xxDMAState *s; |
452 |
s = FROM_SYSBUS(PXA2xxDMAState, dev); |
453 |
|
454 |
if (s->channels <= 0) { |
455 |
return -1; |
456 |
} |
457 |
|
458 |
s->chan = g_malloc0(sizeof(PXA2xxDMAChannel) * s->channels);
|
459 |
|
460 |
memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels); |
461 |
for (i = 0; i < s->channels; i ++) |
462 |
s->chan[i].state = DCSR_STOPINTR; |
463 |
|
464 |
memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); |
465 |
|
466 |
qdev_init_gpio_in(&dev->qdev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); |
467 |
|
468 |
memory_region_init_io(&s->iomem, &pxa2xx_dma_ops, s, |
469 |
"pxa2xx.dma", 0x00010000); |
470 |
sysbus_init_mmio(dev, &s->iomem); |
471 |
sysbus_init_irq(dev, &s->irq); |
472 |
|
473 |
return 0; |
474 |
} |
475 |
|
476 |
DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) |
477 |
{ |
478 |
DeviceState *dev; |
479 |
|
480 |
dev = qdev_create(NULL, "pxa2xx-dma"); |
481 |
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
|
482 |
qdev_init_nofail(dev); |
483 |
|
484 |
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
485 |
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
|
486 |
|
487 |
return dev;
|
488 |
} |
489 |
|
490 |
DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) |
491 |
{ |
492 |
DeviceState *dev; |
493 |
|
494 |
dev = qdev_create(NULL, "pxa2xx-dma"); |
495 |
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
|
496 |
qdev_init_nofail(dev); |
497 |
|
498 |
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
499 |
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
|
500 |
|
501 |
return dev;
|
502 |
} |
503 |
|
504 |
static bool is_version_0(void *opaque, int version_id) |
505 |
{ |
506 |
return version_id == 0; |
507 |
} |
508 |
|
509 |
static VMStateDescription vmstate_pxa2xx_dma_chan = {
|
510 |
.name = "pxa2xx_dma_chan",
|
511 |
.version_id = 1,
|
512 |
.minimum_version_id = 1,
|
513 |
.minimum_version_id_old = 1,
|
514 |
.fields = (VMStateField[]) { |
515 |
VMSTATE_UINT32(descr, PXA2xxDMAChannel), |
516 |
VMSTATE_UINT32(src, PXA2xxDMAChannel), |
517 |
VMSTATE_UINT32(dest, PXA2xxDMAChannel), |
518 |
VMSTATE_UINT32(cmd, PXA2xxDMAChannel), |
519 |
VMSTATE_UINT32(state, PXA2xxDMAChannel), |
520 |
VMSTATE_INT32(request, PXA2xxDMAChannel), |
521 |
VMSTATE_END_OF_LIST(), |
522 |
}, |
523 |
}; |
524 |
|
525 |
static VMStateDescription vmstate_pxa2xx_dma = {
|
526 |
.name = "pxa2xx_dma",
|
527 |
.version_id = 1,
|
528 |
.minimum_version_id = 0,
|
529 |
.minimum_version_id_old = 0,
|
530 |
.fields = (VMStateField[]) { |
531 |
VMSTATE_UNUSED_TEST(is_version_0, 4),
|
532 |
VMSTATE_UINT32(stopintr, PXA2xxDMAState), |
533 |
VMSTATE_UINT32(eorintr, PXA2xxDMAState), |
534 |
VMSTATE_UINT32(rasintr, PXA2xxDMAState), |
535 |
VMSTATE_UINT32(startintr, PXA2xxDMAState), |
536 |
VMSTATE_UINT32(endintr, PXA2xxDMAState), |
537 |
VMSTATE_UINT32(align, PXA2xxDMAState), |
538 |
VMSTATE_UINT32(pio, PXA2xxDMAState), |
539 |
VMSTATE_BUFFER(req, PXA2xxDMAState), |
540 |
VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, |
541 |
vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), |
542 |
VMSTATE_END_OF_LIST(), |
543 |
}, |
544 |
}; |
545 |
|
546 |
static Property pxa2xx_dma_properties[] = {
|
547 |
DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), |
548 |
DEFINE_PROP_END_OF_LIST(), |
549 |
}; |
550 |
|
551 |
static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) |
552 |
{ |
553 |
DeviceClass *dc = DEVICE_CLASS(klass); |
554 |
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); |
555 |
|
556 |
k->init = pxa2xx_dma_init; |
557 |
dc->desc = "PXA2xx DMA controller";
|
558 |
dc->vmsd = &vmstate_pxa2xx_dma; |
559 |
dc->props = pxa2xx_dma_properties; |
560 |
} |
561 |
|
562 |
static const TypeInfo pxa2xx_dma_info = { |
563 |
.name = "pxa2xx-dma",
|
564 |
.parent = TYPE_SYS_BUS_DEVICE, |
565 |
.instance_size = sizeof(PXA2xxDMAState),
|
566 |
.class_init = pxa2xx_dma_class_init, |
567 |
}; |
568 |
|
569 |
static void pxa2xx_dma_register_types(void) |
570 |
{ |
571 |
type_register_static(&pxa2xx_dma_info); |
572 |
} |
573 |
|
574 |
type_init(pxa2xx_dma_register_types) |