root / hw / pxa2xx_dma.c @ e1dad5a6
History | View | Annotate | Download (16 kB)
1 |
/*
|
---|---|
2 |
* Intel XScale PXA255/270 DMA controller.
|
3 |
*
|
4 |
* Copyright (c) 2006 Openedhand Ltd.
|
5 |
* Copyright (c) 2006 Thorsten Zitterell
|
6 |
* Written by Andrzej Zaborowski <balrog@zabor.org>
|
7 |
*
|
8 |
* This code is licenced under the GPL.
|
9 |
*/
|
10 |
|
11 |
#include "hw.h" |
12 |
#include "pxa.h" |
13 |
|
14 |
struct pxa2xx_dma_channel_s {
|
15 |
target_phys_addr_t descr; |
16 |
target_phys_addr_t src; |
17 |
target_phys_addr_t dest; |
18 |
uint32_t cmd; |
19 |
uint32_t state; |
20 |
int request;
|
21 |
}; |
22 |
|
23 |
/* Allow the DMA to be used as a PIC. */
|
24 |
typedef void (*pxa2xx_dma_handler_t)(void *opaque, int irq, int level); |
25 |
|
26 |
struct pxa2xx_dma_state_s {
|
27 |
pxa2xx_dma_handler_t handler; |
28 |
target_phys_addr_t base; |
29 |
qemu_irq irq; |
30 |
|
31 |
uint32_t stopintr; |
32 |
uint32_t eorintr; |
33 |
uint32_t rasintr; |
34 |
uint32_t startintr; |
35 |
uint32_t endintr; |
36 |
|
37 |
uint32_t align; |
38 |
uint32_t pio; |
39 |
|
40 |
int channels;
|
41 |
struct pxa2xx_dma_channel_s *chan;
|
42 |
|
43 |
uint8_t *req; |
44 |
|
45 |
/* Flag to avoid recursive DMA invocations. */
|
46 |
int running;
|
47 |
}; |
48 |
|
49 |
#define PXA255_DMA_NUM_CHANNELS 16 |
50 |
#define PXA27X_DMA_NUM_CHANNELS 32 |
51 |
|
52 |
#define PXA2XX_DMA_NUM_REQUESTS 75 |
53 |
|
54 |
#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ |
55 |
#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ |
56 |
#define DALGN 0x00a0 /* DMA Alignment register */ |
57 |
#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ |
58 |
#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ |
59 |
#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ |
60 |
#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ |
61 |
#define DINT 0x00f0 /* DMA Interrupt register */ |
62 |
#define DRCMR0 0x0100 /* Request to Channel Map register 0 */ |
63 |
#define DRCMR63 0x01fc /* Request to Channel Map register 63 */ |
64 |
#define D_CH0 0x0200 /* Channel 0 Descriptor start */ |
65 |
#define DRCMR64 0x1100 /* Request to Channel Map register 64 */ |
66 |
#define DRCMR74 0x1128 /* Request to Channel Map register 74 */ |
67 |
|
68 |
/* Per-channel register */
|
69 |
#define DDADR 0x00 |
70 |
#define DSADR 0x01 |
71 |
#define DTADR 0x02 |
72 |
#define DCMD 0x03 |
73 |
|
74 |
/* Bit-field masks */
|
75 |
#define DRCMR_CHLNUM 0x1f |
76 |
#define DRCMR_MAPVLD (1 << 7) |
77 |
#define DDADR_STOP (1 << 0) |
78 |
#define DDADR_BREN (1 << 1) |
79 |
#define DCMD_LEN 0x1fff |
80 |
#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) |
81 |
#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) |
82 |
#define DCMD_FLYBYT (1 << 19) |
83 |
#define DCMD_FLYBYS (1 << 20) |
84 |
#define DCMD_ENDIRQEN (1 << 21) |
85 |
#define DCMD_STARTIRQEN (1 << 22) |
86 |
#define DCMD_CMPEN (1 << 25) |
87 |
#define DCMD_FLOWTRG (1 << 28) |
88 |
#define DCMD_FLOWSRC (1 << 29) |
89 |
#define DCMD_INCTRGADDR (1 << 30) |
90 |
#define DCMD_INCSRCADDR (1 << 31) |
91 |
#define DCSR_BUSERRINTR (1 << 0) |
92 |
#define DCSR_STARTINTR (1 << 1) |
93 |
#define DCSR_ENDINTR (1 << 2) |
94 |
#define DCSR_STOPINTR (1 << 3) |
95 |
#define DCSR_RASINTR (1 << 4) |
96 |
#define DCSR_REQPEND (1 << 8) |
97 |
#define DCSR_EORINT (1 << 9) |
98 |
#define DCSR_CMPST (1 << 10) |
99 |
#define DCSR_MASKRUN (1 << 22) |
100 |
#define DCSR_RASIRQEN (1 << 23) |
101 |
#define DCSR_CLRCMPST (1 << 24) |
102 |
#define DCSR_SETCMPST (1 << 25) |
103 |
#define DCSR_EORSTOPEN (1 << 26) |
104 |
#define DCSR_EORJMPEN (1 << 27) |
105 |
#define DCSR_EORIRQEN (1 << 28) |
106 |
#define DCSR_STOPIRQEN (1 << 29) |
107 |
#define DCSR_NODESCFETCH (1 << 30) |
108 |
#define DCSR_RUN (1 << 31) |
109 |
|
110 |
static inline void pxa2xx_dma_update(struct pxa2xx_dma_state_s *s, int ch) |
111 |
{ |
112 |
if (ch >= 0) { |
113 |
if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
|
114 |
(s->chan[ch].state & DCSR_STOPINTR)) |
115 |
s->stopintr |= 1 << ch;
|
116 |
else
|
117 |
s->stopintr &= ~(1 << ch);
|
118 |
|
119 |
if ((s->chan[ch].state & DCSR_EORIRQEN) &&
|
120 |
(s->chan[ch].state & DCSR_EORINT)) |
121 |
s->eorintr |= 1 << ch;
|
122 |
else
|
123 |
s->eorintr &= ~(1 << ch);
|
124 |
|
125 |
if ((s->chan[ch].state & DCSR_RASIRQEN) &&
|
126 |
(s->chan[ch].state & DCSR_RASINTR)) |
127 |
s->rasintr |= 1 << ch;
|
128 |
else
|
129 |
s->rasintr &= ~(1 << ch);
|
130 |
|
131 |
if (s->chan[ch].state & DCSR_STARTINTR)
|
132 |
s->startintr |= 1 << ch;
|
133 |
else
|
134 |
s->startintr &= ~(1 << ch);
|
135 |
|
136 |
if (s->chan[ch].state & DCSR_ENDINTR)
|
137 |
s->endintr |= 1 << ch;
|
138 |
else
|
139 |
s->endintr &= ~(1 << ch);
|
140 |
} |
141 |
|
142 |
if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
|
143 |
qemu_irq_raise(s->irq); |
144 |
else
|
145 |
qemu_irq_lower(s->irq); |
146 |
} |
147 |
|
148 |
static inline void pxa2xx_dma_descriptor_fetch( |
149 |
struct pxa2xx_dma_state_s *s, int ch) |
150 |
{ |
151 |
uint32_t desc[4];
|
152 |
target_phys_addr_t daddr = s->chan[ch].descr & ~0xf;
|
153 |
if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
|
154 |
daddr += 32;
|
155 |
|
156 |
cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
|
157 |
s->chan[ch].descr = desc[DDADR]; |
158 |
s->chan[ch].src = desc[DSADR]; |
159 |
s->chan[ch].dest = desc[DTADR]; |
160 |
s->chan[ch].cmd = desc[DCMD]; |
161 |
|
162 |
if (s->chan[ch].cmd & DCMD_FLOWSRC)
|
163 |
s->chan[ch].src &= ~3;
|
164 |
if (s->chan[ch].cmd & DCMD_FLOWTRG)
|
165 |
s->chan[ch].dest &= ~3;
|
166 |
|
167 |
if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
|
168 |
printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
|
169 |
|
170 |
if (s->chan[ch].cmd & DCMD_STARTIRQEN)
|
171 |
s->chan[ch].state |= DCSR_STARTINTR; |
172 |
} |
173 |
|
174 |
static void pxa2xx_dma_run(struct pxa2xx_dma_state_s *s) |
175 |
{ |
176 |
int c, srcinc, destinc;
|
177 |
uint32_t n, size; |
178 |
uint32_t width; |
179 |
uint32_t length; |
180 |
char buffer[32]; |
181 |
struct pxa2xx_dma_channel_s *ch;
|
182 |
|
183 |
if (s->running ++)
|
184 |
return;
|
185 |
|
186 |
while (s->running) {
|
187 |
s->running = 1;
|
188 |
for (c = 0; c < s->channels; c ++) { |
189 |
ch = &s->chan[c]; |
190 |
|
191 |
while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
|
192 |
/* Test for pending requests */
|
193 |
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
|
194 |
break;
|
195 |
|
196 |
length = ch->cmd & DCMD_LEN; |
197 |
size = DCMD_SIZE(ch->cmd); |
198 |
width = DCMD_WIDTH(ch->cmd); |
199 |
|
200 |
srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
|
201 |
destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
|
202 |
|
203 |
while (length) {
|
204 |
size = MIN(length, size); |
205 |
|
206 |
for (n = 0; n < size; n += width) { |
207 |
cpu_physical_memory_read(ch->src, buffer + n, width); |
208 |
ch->src += srcinc; |
209 |
} |
210 |
|
211 |
for (n = 0; n < size; n += width) { |
212 |
cpu_physical_memory_write(ch->dest, buffer + n, width); |
213 |
ch->dest += destinc; |
214 |
} |
215 |
|
216 |
length -= size; |
217 |
|
218 |
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
|
219 |
!ch->request) { |
220 |
ch->state |= DCSR_EORINT; |
221 |
if (ch->state & DCSR_EORSTOPEN)
|
222 |
ch->state |= DCSR_STOPINTR; |
223 |
if ((ch->state & DCSR_EORJMPEN) &&
|
224 |
!(ch->state & DCSR_NODESCFETCH)) |
225 |
pxa2xx_dma_descriptor_fetch(s, c); |
226 |
break;
|
227 |
} |
228 |
} |
229 |
|
230 |
ch->cmd = (ch->cmd & ~DCMD_LEN) | length; |
231 |
|
232 |
/* Is the transfer complete now? */
|
233 |
if (!length) {
|
234 |
if (ch->cmd & DCMD_ENDIRQEN)
|
235 |
ch->state |= DCSR_ENDINTR; |
236 |
|
237 |
if ((ch->state & DCSR_NODESCFETCH) ||
|
238 |
(ch->descr & DDADR_STOP) || |
239 |
(ch->state & DCSR_EORSTOPEN)) { |
240 |
ch->state |= DCSR_STOPINTR; |
241 |
ch->state &= ~DCSR_RUN; |
242 |
|
243 |
break;
|
244 |
} |
245 |
|
246 |
ch->state |= DCSR_STOPINTR; |
247 |
break;
|
248 |
} |
249 |
} |
250 |
} |
251 |
|
252 |
s->running --; |
253 |
} |
254 |
} |
255 |
|
256 |
static uint32_t pxa2xx_dma_read(void *opaque, target_phys_addr_t offset) |
257 |
{ |
258 |
struct pxa2xx_dma_state_s *s = (struct pxa2xx_dma_state_s *) opaque; |
259 |
unsigned int channel; |
260 |
offset -= s->base; |
261 |
|
262 |
switch (offset) {
|
263 |
case DRCMR64 ... DRCMR74:
|
264 |
offset -= DRCMR64 - DRCMR0 - (64 << 2); |
265 |
/* Fall through */
|
266 |
case DRCMR0 ... DRCMR63:
|
267 |
channel = (offset - DRCMR0) >> 2;
|
268 |
return s->req[channel];
|
269 |
|
270 |
case DRQSR0:
|
271 |
case DRQSR1:
|
272 |
case DRQSR2:
|
273 |
return 0; |
274 |
|
275 |
case DCSR0 ... DCSR31:
|
276 |
channel = offset >> 2;
|
277 |
if (s->chan[channel].request)
|
278 |
return s->chan[channel].state | DCSR_REQPEND;
|
279 |
return s->chan[channel].state;
|
280 |
|
281 |
case DINT:
|
282 |
return s->stopintr | s->eorintr | s->rasintr |
|
283 |
s->startintr | s->endintr; |
284 |
|
285 |
case DALGN:
|
286 |
return s->align;
|
287 |
|
288 |
case DPCSR:
|
289 |
return s->pio;
|
290 |
} |
291 |
|
292 |
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { |
293 |
channel = (offset - D_CH0) >> 4;
|
294 |
switch ((offset & 0x0f) >> 2) { |
295 |
case DDADR:
|
296 |
return s->chan[channel].descr;
|
297 |
case DSADR:
|
298 |
return s->chan[channel].src;
|
299 |
case DTADR:
|
300 |
return s->chan[channel].dest;
|
301 |
case DCMD:
|
302 |
return s->chan[channel].cmd;
|
303 |
} |
304 |
} |
305 |
|
306 |
cpu_abort(cpu_single_env, |
307 |
"%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset); |
308 |
return 7; |
309 |
} |
310 |
|
311 |
static void pxa2xx_dma_write(void *opaque, |
312 |
target_phys_addr_t offset, uint32_t value) |
313 |
{ |
314 |
struct pxa2xx_dma_state_s *s = (struct pxa2xx_dma_state_s *) opaque; |
315 |
unsigned int channel; |
316 |
offset -= s->base; |
317 |
|
318 |
switch (offset) {
|
319 |
case DRCMR64 ... DRCMR74:
|
320 |
offset -= DRCMR64 - DRCMR0 - (64 << 2); |
321 |
/* Fall through */
|
322 |
case DRCMR0 ... DRCMR63:
|
323 |
channel = (offset - DRCMR0) >> 2;
|
324 |
|
325 |
if (value & DRCMR_MAPVLD)
|
326 |
if ((value & DRCMR_CHLNUM) > s->channels)
|
327 |
cpu_abort(cpu_single_env, "%s: Bad DMA channel %i\n",
|
328 |
__FUNCTION__, value & DRCMR_CHLNUM); |
329 |
|
330 |
s->req[channel] = value; |
331 |
break;
|
332 |
|
333 |
case DRQSR0:
|
334 |
case DRQSR1:
|
335 |
case DRQSR2:
|
336 |
/* Nothing to do */
|
337 |
break;
|
338 |
|
339 |
case DCSR0 ... DCSR31:
|
340 |
channel = offset >> 2;
|
341 |
s->chan[channel].state &= 0x0000071f & ~(value &
|
342 |
(DCSR_EORINT | DCSR_ENDINTR | |
343 |
DCSR_STARTINTR | DCSR_BUSERRINTR)); |
344 |
s->chan[channel].state |= value & 0xfc800000;
|
345 |
|
346 |
if (s->chan[channel].state & DCSR_STOPIRQEN)
|
347 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
348 |
|
349 |
if (value & DCSR_NODESCFETCH) {
|
350 |
/* No-descriptor-fetch mode */
|
351 |
if (value & DCSR_RUN) {
|
352 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
353 |
pxa2xx_dma_run(s); |
354 |
} |
355 |
} else {
|
356 |
/* Descriptor-fetch mode */
|
357 |
if (value & DCSR_RUN) {
|
358 |
s->chan[channel].state &= ~DCSR_STOPINTR; |
359 |
pxa2xx_dma_descriptor_fetch(s, channel); |
360 |
pxa2xx_dma_run(s); |
361 |
} |
362 |
} |
363 |
|
364 |
/* Shouldn't matter as our DMA is synchronous. */
|
365 |
if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
|
366 |
s->chan[channel].state |= DCSR_STOPINTR; |
367 |
|
368 |
if (value & DCSR_CLRCMPST)
|
369 |
s->chan[channel].state &= ~DCSR_CMPST; |
370 |
if (value & DCSR_SETCMPST)
|
371 |
s->chan[channel].state |= DCSR_CMPST; |
372 |
|
373 |
pxa2xx_dma_update(s, channel); |
374 |
break;
|
375 |
|
376 |
case DALGN:
|
377 |
s->align = value; |
378 |
break;
|
379 |
|
380 |
case DPCSR:
|
381 |
s->pio = value & 0x80000001;
|
382 |
break;
|
383 |
|
384 |
default:
|
385 |
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { |
386 |
channel = (offset - D_CH0) >> 4;
|
387 |
switch ((offset & 0x0f) >> 2) { |
388 |
case DDADR:
|
389 |
s->chan[channel].descr = value; |
390 |
break;
|
391 |
case DSADR:
|
392 |
s->chan[channel].src = value; |
393 |
break;
|
394 |
case DTADR:
|
395 |
s->chan[channel].dest = value; |
396 |
break;
|
397 |
case DCMD:
|
398 |
s->chan[channel].cmd = value; |
399 |
break;
|
400 |
default:
|
401 |
goto fail;
|
402 |
} |
403 |
|
404 |
break;
|
405 |
} |
406 |
fail:
|
407 |
cpu_abort(cpu_single_env, "%s: Bad offset " TARGET_FMT_plx "\n", |
408 |
__FUNCTION__, offset); |
409 |
} |
410 |
} |
411 |
|
412 |
static uint32_t pxa2xx_dma_readbad(void *opaque, target_phys_addr_t offset) |
413 |
{ |
414 |
cpu_abort(cpu_single_env, "%s: Bad access width\n", __FUNCTION__);
|
415 |
return 5; |
416 |
} |
417 |
|
418 |
static void pxa2xx_dma_writebad(void *opaque, |
419 |
target_phys_addr_t offset, uint32_t value) |
420 |
{ |
421 |
cpu_abort(cpu_single_env, "%s: Bad access width\n", __FUNCTION__);
|
422 |
} |
423 |
|
424 |
static CPUReadMemoryFunc *pxa2xx_dma_readfn[] = {
|
425 |
pxa2xx_dma_readbad, |
426 |
pxa2xx_dma_readbad, |
427 |
pxa2xx_dma_read |
428 |
}; |
429 |
|
430 |
static CPUWriteMemoryFunc *pxa2xx_dma_writefn[] = {
|
431 |
pxa2xx_dma_writebad, |
432 |
pxa2xx_dma_writebad, |
433 |
pxa2xx_dma_write |
434 |
}; |
435 |
|
436 |
static void pxa2xx_dma_save(QEMUFile *f, void *opaque) |
437 |
{ |
438 |
struct pxa2xx_dma_state_s *s = (struct pxa2xx_dma_state_s *) opaque; |
439 |
int i;
|
440 |
|
441 |
qemu_put_be32(f, s->channels); |
442 |
|
443 |
qemu_put_be32s(f, &s->stopintr); |
444 |
qemu_put_be32s(f, &s->eorintr); |
445 |
qemu_put_be32s(f, &s->rasintr); |
446 |
qemu_put_be32s(f, &s->startintr); |
447 |
qemu_put_be32s(f, &s->endintr); |
448 |
qemu_put_be32s(f, &s->align); |
449 |
qemu_put_be32s(f, &s->pio); |
450 |
|
451 |
qemu_put_buffer(f, s->req, PXA2XX_DMA_NUM_REQUESTS); |
452 |
for (i = 0; i < s->channels; i ++) { |
453 |
qemu_put_betl(f, s->chan[i].descr); |
454 |
qemu_put_betl(f, s->chan[i].src); |
455 |
qemu_put_betl(f, s->chan[i].dest); |
456 |
qemu_put_be32s(f, &s->chan[i].cmd); |
457 |
qemu_put_be32s(f, &s->chan[i].state); |
458 |
qemu_put_be32(f, s->chan[i].request); |
459 |
}; |
460 |
} |
461 |
|
462 |
static int pxa2xx_dma_load(QEMUFile *f, void *opaque, int version_id) |
463 |
{ |
464 |
struct pxa2xx_dma_state_s *s = (struct pxa2xx_dma_state_s *) opaque; |
465 |
int i;
|
466 |
|
467 |
if (qemu_get_be32(f) != s->channels)
|
468 |
return -EINVAL;
|
469 |
|
470 |
qemu_get_be32s(f, &s->stopintr); |
471 |
qemu_get_be32s(f, &s->eorintr); |
472 |
qemu_get_be32s(f, &s->rasintr); |
473 |
qemu_get_be32s(f, &s->startintr); |
474 |
qemu_get_be32s(f, &s->endintr); |
475 |
qemu_get_be32s(f, &s->align); |
476 |
qemu_get_be32s(f, &s->pio); |
477 |
|
478 |
qemu_get_buffer(f, s->req, PXA2XX_DMA_NUM_REQUESTS); |
479 |
for (i = 0; i < s->channels; i ++) { |
480 |
s->chan[i].descr = qemu_get_betl(f); |
481 |
s->chan[i].src = qemu_get_betl(f); |
482 |
s->chan[i].dest = qemu_get_betl(f); |
483 |
qemu_get_be32s(f, &s->chan[i].cmd); |
484 |
qemu_get_be32s(f, &s->chan[i].state); |
485 |
s->chan[i].request = qemu_get_be32(f); |
486 |
}; |
487 |
|
488 |
return 0; |
489 |
} |
490 |
|
491 |
static struct pxa2xx_dma_state_s *pxa2xx_dma_init(target_phys_addr_t base, |
492 |
qemu_irq irq, int channels)
|
493 |
{ |
494 |
int i, iomemtype;
|
495 |
struct pxa2xx_dma_state_s *s;
|
496 |
s = (struct pxa2xx_dma_state_s *)
|
497 |
qemu_mallocz(sizeof(struct pxa2xx_dma_state_s)); |
498 |
|
499 |
s->channels = channels; |
500 |
s->chan = qemu_mallocz(sizeof(struct pxa2xx_dma_channel_s) * s->channels); |
501 |
s->base = base; |
502 |
s->irq = irq; |
503 |
s->handler = (pxa2xx_dma_handler_t) pxa2xx_dma_request; |
504 |
s->req = qemu_mallocz(sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
|
505 |
|
506 |
memset(s->chan, 0, sizeof(struct pxa2xx_dma_channel_s) * s->channels); |
507 |
for (i = 0; i < s->channels; i ++) |
508 |
s->chan[i].state = DCSR_STOPINTR; |
509 |
|
510 |
memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); |
511 |
|
512 |
iomemtype = cpu_register_io_memory(0, pxa2xx_dma_readfn,
|
513 |
pxa2xx_dma_writefn, s); |
514 |
cpu_register_physical_memory(base, 0x00010000, iomemtype);
|
515 |
|
516 |
register_savevm("pxa2xx_dma", 0, 0, pxa2xx_dma_save, pxa2xx_dma_load, s); |
517 |
|
518 |
return s;
|
519 |
} |
520 |
|
521 |
struct pxa2xx_dma_state_s *pxa27x_dma_init(target_phys_addr_t base,
|
522 |
qemu_irq irq) |
523 |
{ |
524 |
return pxa2xx_dma_init(base, irq, PXA27X_DMA_NUM_CHANNELS);
|
525 |
} |
526 |
|
527 |
struct pxa2xx_dma_state_s *pxa255_dma_init(target_phys_addr_t base,
|
528 |
qemu_irq irq) |
529 |
{ |
530 |
return pxa2xx_dma_init(base, irq, PXA255_DMA_NUM_CHANNELS);
|
531 |
} |
532 |
|
533 |
void pxa2xx_dma_request(struct pxa2xx_dma_state_s *s, int req_num, int on) |
534 |
{ |
535 |
int ch;
|
536 |
if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) |
537 |
cpu_abort(cpu_single_env, |
538 |
"%s: Bad DMA request %i\n", __FUNCTION__, req_num);
|
539 |
|
540 |
if (!(s->req[req_num] & DRCMR_MAPVLD))
|
541 |
return;
|
542 |
ch = s->req[req_num] & DRCMR_CHLNUM; |
543 |
|
544 |
if (!s->chan[ch].request && on)
|
545 |
s->chan[ch].state |= DCSR_RASINTR; |
546 |
else
|
547 |
s->chan[ch].state &= ~DCSR_RASINTR; |
548 |
if (s->chan[ch].request && !on)
|
549 |
s->chan[ch].state |= DCSR_EORINT; |
550 |
|
551 |
s->chan[ch].request = on; |
552 |
if (on) {
|
553 |
pxa2xx_dma_run(s); |
554 |
pxa2xx_dma_update(s, ch); |
555 |
} |
556 |
} |