Statistics
| Branch: | Revision:

root / hw / pxa2xx_dma.c @ ee2479d3

History | View | Annotate | Download (15.4 kB)

1
/*
2
 * Intel XScale PXA255/270 DMA controller.
3
 *
4
 * Copyright (c) 2006 Openedhand Ltd.
5
 * Copyright (c) 2006 Thorsten Zitterell
6
 * Written by Andrzej Zaborowski <balrog@zabor.org>
7
 *
8
 * This code is licenced under the GPL.
9
 */
10

    
11
#include "hw.h"
12
#include "pxa.h"
13

    
14
typedef struct {
15
    target_phys_addr_t descr;
16
    target_phys_addr_t src;
17
    target_phys_addr_t dest;
18
    uint32_t cmd;
19
    uint32_t state;
20
    int request;
21
} PXA2xxDMAChannel;
22

    
23
struct PXA2xxDMAState {
24
    qemu_irq irq;
25

    
26
    uint32_t stopintr;
27
    uint32_t eorintr;
28
    uint32_t rasintr;
29
    uint32_t startintr;
30
    uint32_t endintr;
31

    
32
    uint32_t align;
33
    uint32_t pio;
34

    
35
    int channels;
36
    PXA2xxDMAChannel *chan;
37

    
38
    uint8_t *req;
39

    
40
    /* Flag to avoid recursive DMA invocations.  */
41
    int running;
42
};
43

    
44
#define PXA255_DMA_NUM_CHANNELS        16
45
#define PXA27X_DMA_NUM_CHANNELS        32
46

    
47
#define PXA2XX_DMA_NUM_REQUESTS        75
48

    
49
#define DCSR0        0x0000        /* DMA Control / Status register for Channel 0 */
50
#define DCSR31        0x007c        /* DMA Control / Status register for Channel 31 */
51
#define DALGN        0x00a0        /* DMA Alignment register */
52
#define DPCSR        0x00a4        /* DMA Programmed I/O Control Status register */
53
#define DRQSR0        0x00e0        /* DMA DREQ<0> Status register */
54
#define DRQSR1        0x00e4        /* DMA DREQ<1> Status register */
55
#define DRQSR2        0x00e8        /* DMA DREQ<2> Status register */
56
#define DINT        0x00f0        /* DMA Interrupt register */
57
#define DRCMR0        0x0100        /* Request to Channel Map register 0 */
58
#define DRCMR63        0x01fc        /* Request to Channel Map register 63 */
59
#define D_CH0        0x0200        /* Channel 0 Descriptor start */
60
#define DRCMR64        0x1100        /* Request to Channel Map register 64 */
61
#define DRCMR74        0x1128        /* Request to Channel Map register 74 */
62

    
63
/* Per-channel register */
64
#define DDADR        0x00
65
#define DSADR        0x01
66
#define DTADR        0x02
67
#define DCMD        0x03
68

    
69
/* Bit-field masks */
70
#define DRCMR_CHLNUM                0x1f
71
#define DRCMR_MAPVLD                (1 << 7)
72
#define DDADR_STOP                (1 << 0)
73
#define DDADR_BREN                (1 << 1)
74
#define DCMD_LEN                0x1fff
75
#define DCMD_WIDTH(x)                (1 << ((((x) >> 14) & 3) - 1))
76
#define DCMD_SIZE(x)                (4 << (((x) >> 16) & 3))
77
#define DCMD_FLYBYT                (1 << 19)
78
#define DCMD_FLYBYS                (1 << 20)
79
#define DCMD_ENDIRQEN                (1 << 21)
80
#define DCMD_STARTIRQEN                (1 << 22)
81
#define DCMD_CMPEN                (1 << 25)
82
#define DCMD_FLOWTRG                (1 << 28)
83
#define DCMD_FLOWSRC                (1 << 29)
84
#define DCMD_INCTRGADDR                (1 << 30)
85
#define DCMD_INCSRCADDR                (1 << 31)
86
#define DCSR_BUSERRINTR                (1 << 0)
87
#define DCSR_STARTINTR                (1 << 1)
88
#define DCSR_ENDINTR                (1 << 2)
89
#define DCSR_STOPINTR                (1 << 3)
90
#define DCSR_RASINTR                (1 << 4)
91
#define DCSR_REQPEND                (1 << 8)
92
#define DCSR_EORINT                (1 << 9)
93
#define DCSR_CMPST                (1 << 10)
94
#define DCSR_MASKRUN                (1 << 22)
95
#define DCSR_RASIRQEN                (1 << 23)
96
#define DCSR_CLRCMPST                (1 << 24)
97
#define DCSR_SETCMPST                (1 << 25)
98
#define DCSR_EORSTOPEN                (1 << 26)
99
#define DCSR_EORJMPEN                (1 << 27)
100
#define DCSR_EORIRQEN                (1 << 28)
101
#define DCSR_STOPIRQEN                (1 << 29)
102
#define DCSR_NODESCFETCH        (1 << 30)
103
#define DCSR_RUN                (1 << 31)
104

    
105
static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
106
{
107
    if (ch >= 0) {
108
        if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
109
                (s->chan[ch].state & DCSR_STOPINTR))
110
            s->stopintr |= 1 << ch;
111
        else
112
            s->stopintr &= ~(1 << ch);
113

    
114
        if ((s->chan[ch].state & DCSR_EORIRQEN) &&
115
                (s->chan[ch].state & DCSR_EORINT))
116
            s->eorintr |= 1 << ch;
117
        else
118
            s->eorintr &= ~(1 << ch);
119

    
120
        if ((s->chan[ch].state & DCSR_RASIRQEN) &&
121
                (s->chan[ch].state & DCSR_RASINTR))
122
            s->rasintr |= 1 << ch;
123
        else
124
            s->rasintr &= ~(1 << ch);
125

    
126
        if (s->chan[ch].state & DCSR_STARTINTR)
127
            s->startintr |= 1 << ch;
128
        else
129
            s->startintr &= ~(1 << ch);
130

    
131
        if (s->chan[ch].state & DCSR_ENDINTR)
132
            s->endintr |= 1 << ch;
133
        else
134
            s->endintr &= ~(1 << ch);
135
    }
136

    
137
    if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
138
        qemu_irq_raise(s->irq);
139
    else
140
        qemu_irq_lower(s->irq);
141
}
142

    
143
static inline void pxa2xx_dma_descriptor_fetch(
144
                PXA2xxDMAState *s, int ch)
145
{
146
    uint32_t desc[4];
147
    target_phys_addr_t daddr = s->chan[ch].descr & ~0xf;
148
    if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
149
        daddr += 32;
150

    
151
    cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
152
    s->chan[ch].descr = desc[DDADR];
153
    s->chan[ch].src = desc[DSADR];
154
    s->chan[ch].dest = desc[DTADR];
155
    s->chan[ch].cmd = desc[DCMD];
156

    
157
    if (s->chan[ch].cmd & DCMD_FLOWSRC)
158
        s->chan[ch].src &= ~3;
159
    if (s->chan[ch].cmd & DCMD_FLOWTRG)
160
        s->chan[ch].dest &= ~3;
161

    
162
    if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
163
        printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
164

    
165
    if (s->chan[ch].cmd & DCMD_STARTIRQEN)
166
        s->chan[ch].state |= DCSR_STARTINTR;
167
}
168

    
169
static void pxa2xx_dma_run(PXA2xxDMAState *s)
170
{
171
    int c, srcinc, destinc;
172
    uint32_t n, size;
173
    uint32_t width;
174
    uint32_t length;
175
    uint8_t buffer[32];
176
    PXA2xxDMAChannel *ch;
177

    
178
    if (s->running ++)
179
        return;
180

    
181
    while (s->running) {
182
        s->running = 1;
183
        for (c = 0; c < s->channels; c ++) {
184
            ch = &s->chan[c];
185

    
186
            while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
187
                /* Test for pending requests */
188
                if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
189
                    break;
190

    
191
                length = ch->cmd & DCMD_LEN;
192
                size = DCMD_SIZE(ch->cmd);
193
                width = DCMD_WIDTH(ch->cmd);
194

    
195
                srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
196
                destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
197

    
198
                while (length) {
199
                    size = MIN(length, size);
200

    
201
                    for (n = 0; n < size; n += width) {
202
                        cpu_physical_memory_read(ch->src, buffer + n, width);
203
                        ch->src += srcinc;
204
                    }
205

    
206
                    for (n = 0; n < size; n += width) {
207
                        cpu_physical_memory_write(ch->dest, buffer + n, width);
208
                        ch->dest += destinc;
209
                    }
210

    
211
                    length -= size;
212

    
213
                    if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
214
                            !ch->request) {
215
                        ch->state |= DCSR_EORINT;
216
                        if (ch->state & DCSR_EORSTOPEN)
217
                            ch->state |= DCSR_STOPINTR;
218
                        if ((ch->state & DCSR_EORJMPEN) &&
219
                                        !(ch->state & DCSR_NODESCFETCH))
220
                            pxa2xx_dma_descriptor_fetch(s, c);
221
                        break;
222
                    }
223
                }
224

    
225
                ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
226

    
227
                /* Is the transfer complete now? */
228
                if (!length) {
229
                    if (ch->cmd & DCMD_ENDIRQEN)
230
                        ch->state |= DCSR_ENDINTR;
231

    
232
                    if ((ch->state & DCSR_NODESCFETCH) ||
233
                                (ch->descr & DDADR_STOP) ||
234
                                (ch->state & DCSR_EORSTOPEN)) {
235
                        ch->state |= DCSR_STOPINTR;
236
                        ch->state &= ~DCSR_RUN;
237

    
238
                        break;
239
                    }
240

    
241
                    ch->state |= DCSR_STOPINTR;
242
                    break;
243
                }
244
            }
245
        }
246

    
247
        s->running --;
248
    }
249
}
250

    
251
static uint32_t pxa2xx_dma_read(void *opaque, target_phys_addr_t offset)
252
{
253
    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
254
    unsigned int channel;
255

    
256
    switch (offset) {
257
    case DRCMR64 ... DRCMR74:
258
        offset -= DRCMR64 - DRCMR0 - (64 << 2);
259
        /* Fall through */
260
    case DRCMR0 ... DRCMR63:
261
        channel = (offset - DRCMR0) >> 2;
262
        return s->req[channel];
263

    
264
    case DRQSR0:
265
    case DRQSR1:
266
    case DRQSR2:
267
        return 0;
268

    
269
    case DCSR0 ... DCSR31:
270
        channel = offset >> 2;
271
        if (s->chan[channel].request)
272
            return s->chan[channel].state | DCSR_REQPEND;
273
        return s->chan[channel].state;
274

    
275
    case DINT:
276
        return s->stopintr | s->eorintr | s->rasintr |
277
                s->startintr | s->endintr;
278

    
279
    case DALGN:
280
        return s->align;
281

    
282
    case DPCSR:
283
        return s->pio;
284
    }
285

    
286
    if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
287
        channel = (offset - D_CH0) >> 4;
288
        switch ((offset & 0x0f) >> 2) {
289
        case DDADR:
290
            return s->chan[channel].descr;
291
        case DSADR:
292
            return s->chan[channel].src;
293
        case DTADR:
294
            return s->chan[channel].dest;
295
        case DCMD:
296
            return s->chan[channel].cmd;
297
        }
298
    }
299

    
300
    hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
301
    return 7;
302
}
303

    
304
static void pxa2xx_dma_write(void *opaque,
305
                 target_phys_addr_t offset, uint32_t value)
306
{
307
    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
308
    unsigned int channel;
309

    
310
    switch (offset) {
311
    case DRCMR64 ... DRCMR74:
312
        offset -= DRCMR64 - DRCMR0 - (64 << 2);
313
        /* Fall through */
314
    case DRCMR0 ... DRCMR63:
315
        channel = (offset - DRCMR0) >> 2;
316

    
317
        if (value & DRCMR_MAPVLD)
318
            if ((value & DRCMR_CHLNUM) > s->channels)
319
                hw_error("%s: Bad DMA channel %i\n",
320
                         __FUNCTION__, value & DRCMR_CHLNUM);
321

    
322
        s->req[channel] = value;
323
        break;
324

    
325
    case DRQSR0:
326
    case DRQSR1:
327
    case DRQSR2:
328
        /* Nothing to do */
329
        break;
330

    
331
    case DCSR0 ... DCSR31:
332
        channel = offset >> 2;
333
        s->chan[channel].state &= 0x0000071f & ~(value &
334
                        (DCSR_EORINT | DCSR_ENDINTR |
335
                         DCSR_STARTINTR | DCSR_BUSERRINTR));
336
        s->chan[channel].state |= value & 0xfc800000;
337

    
338
        if (s->chan[channel].state & DCSR_STOPIRQEN)
339
            s->chan[channel].state &= ~DCSR_STOPINTR;
340

    
341
        if (value & DCSR_NODESCFETCH) {
342
            /* No-descriptor-fetch mode */
343
            if (value & DCSR_RUN) {
344
                s->chan[channel].state &= ~DCSR_STOPINTR;
345
                pxa2xx_dma_run(s);
346
            }
347
        } else {
348
            /* Descriptor-fetch mode */
349
            if (value & DCSR_RUN) {
350
                s->chan[channel].state &= ~DCSR_STOPINTR;
351
                pxa2xx_dma_descriptor_fetch(s, channel);
352
                pxa2xx_dma_run(s);
353
            }
354
        }
355

    
356
        /* Shouldn't matter as our DMA is synchronous.  */
357
        if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
358
            s->chan[channel].state |= DCSR_STOPINTR;
359

    
360
        if (value & DCSR_CLRCMPST)
361
            s->chan[channel].state &= ~DCSR_CMPST;
362
        if (value & DCSR_SETCMPST)
363
            s->chan[channel].state |= DCSR_CMPST;
364

    
365
        pxa2xx_dma_update(s, channel);
366
        break;
367

    
368
    case DALGN:
369
        s->align = value;
370
        break;
371

    
372
    case DPCSR:
373
        s->pio = value & 0x80000001;
374
        break;
375

    
376
    default:
377
        if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
378
            channel = (offset - D_CH0) >> 4;
379
            switch ((offset & 0x0f) >> 2) {
380
            case DDADR:
381
                s->chan[channel].descr = value;
382
                break;
383
            case DSADR:
384
                s->chan[channel].src = value;
385
                break;
386
            case DTADR:
387
                s->chan[channel].dest = value;
388
                break;
389
            case DCMD:
390
                s->chan[channel].cmd = value;
391
                break;
392
            default:
393
                goto fail;
394
            }
395

    
396
            break;
397
        }
398
    fail:
399
        hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
400
    }
401
}
402

    
403
static uint32_t pxa2xx_dma_readbad(void *opaque, target_phys_addr_t offset)
404
{
405
    hw_error("%s: Bad access width\n", __FUNCTION__);
406
    return 5;
407
}
408

    
409
static void pxa2xx_dma_writebad(void *opaque,
410
                 target_phys_addr_t offset, uint32_t value)
411
{
412
    hw_error("%s: Bad access width\n", __FUNCTION__);
413
}
414

    
415
static CPUReadMemoryFunc * const pxa2xx_dma_readfn[] = {
416
    pxa2xx_dma_readbad,
417
    pxa2xx_dma_readbad,
418
    pxa2xx_dma_read
419
};
420

    
421
static CPUWriteMemoryFunc * const pxa2xx_dma_writefn[] = {
422
    pxa2xx_dma_writebad,
423
    pxa2xx_dma_writebad,
424
    pxa2xx_dma_write
425
};
426

    
427
static void pxa2xx_dma_save(QEMUFile *f, void *opaque)
428
{
429
    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
430
    int i;
431

    
432
    qemu_put_be32(f, s->channels);
433

    
434
    qemu_put_be32s(f, &s->stopintr);
435
    qemu_put_be32s(f, &s->eorintr);
436
    qemu_put_be32s(f, &s->rasintr);
437
    qemu_put_be32s(f, &s->startintr);
438
    qemu_put_be32s(f, &s->endintr);
439
    qemu_put_be32s(f, &s->align);
440
    qemu_put_be32s(f, &s->pio);
441

    
442
    qemu_put_buffer(f, s->req, PXA2XX_DMA_NUM_REQUESTS);
443
    for (i = 0; i < s->channels; i ++) {
444
        qemu_put_betl(f, s->chan[i].descr);
445
        qemu_put_betl(f, s->chan[i].src);
446
        qemu_put_betl(f, s->chan[i].dest);
447
        qemu_put_be32s(f, &s->chan[i].cmd);
448
        qemu_put_be32s(f, &s->chan[i].state);
449
        qemu_put_be32(f, s->chan[i].request);
450
    };
451
}
452

    
453
static int pxa2xx_dma_load(QEMUFile *f, void *opaque, int version_id)
454
{
455
    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
456
    int i;
457

    
458
    if (qemu_get_be32(f) != s->channels)
459
        return -EINVAL;
460

    
461
    qemu_get_be32s(f, &s->stopintr);
462
    qemu_get_be32s(f, &s->eorintr);
463
    qemu_get_be32s(f, &s->rasintr);
464
    qemu_get_be32s(f, &s->startintr);
465
    qemu_get_be32s(f, &s->endintr);
466
    qemu_get_be32s(f, &s->align);
467
    qemu_get_be32s(f, &s->pio);
468

    
469
    qemu_get_buffer(f, s->req, PXA2XX_DMA_NUM_REQUESTS);
470
    for (i = 0; i < s->channels; i ++) {
471
        s->chan[i].descr = qemu_get_betl(f);
472
        s->chan[i].src = qemu_get_betl(f);
473
        s->chan[i].dest = qemu_get_betl(f);
474
        qemu_get_be32s(f, &s->chan[i].cmd);
475
        qemu_get_be32s(f, &s->chan[i].state);
476
        s->chan[i].request = qemu_get_be32(f);
477
    };
478

    
479
    return 0;
480
}
481

    
482
static PXA2xxDMAState *pxa2xx_dma_init(target_phys_addr_t base,
483
                qemu_irq irq, int channels)
484
{
485
    int i, iomemtype;
486
    PXA2xxDMAState *s;
487
    s = (PXA2xxDMAState *)
488
            qemu_mallocz(sizeof(PXA2xxDMAState));
489

    
490
    s->channels = channels;
491
    s->chan = qemu_mallocz(sizeof(PXA2xxDMAChannel) * s->channels);
492
    s->irq = irq;
493
    s->req = qemu_mallocz(sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
494

    
495
    memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
496
    for (i = 0; i < s->channels; i ++)
497
        s->chan[i].state = DCSR_STOPINTR;
498

    
499
    memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
500

    
501
    iomemtype = cpu_register_io_memory(pxa2xx_dma_readfn,
502
                    pxa2xx_dma_writefn, s, DEVICE_NATIVE_ENDIAN);
503
    cpu_register_physical_memory(base, 0x00010000, iomemtype);
504

    
505
    register_savevm(NULL, "pxa2xx_dma", 0, 0, pxa2xx_dma_save, pxa2xx_dma_load, s);
506

    
507
    return s;
508
}
509

    
510
PXA2xxDMAState *pxa27x_dma_init(target_phys_addr_t base,
511
                qemu_irq irq)
512
{
513
    return pxa2xx_dma_init(base, irq, PXA27X_DMA_NUM_CHANNELS);
514
}
515

    
516
PXA2xxDMAState *pxa255_dma_init(target_phys_addr_t base,
517
                qemu_irq irq)
518
{
519
    return pxa2xx_dma_init(base, irq, PXA255_DMA_NUM_CHANNELS);
520
}
521

    
522
void pxa2xx_dma_request(PXA2xxDMAState *s, int req_num, int on)
523
{
524
    int ch;
525
    if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
526
        hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
527

    
528
    if (!(s->req[req_num] & DRCMR_MAPVLD))
529
        return;
530
    ch = s->req[req_num] & DRCMR_CHLNUM;
531

    
532
    if (!s->chan[ch].request && on)
533
        s->chan[ch].state |= DCSR_RASINTR;
534
    else
535
        s->chan[ch].state &= ~DCSR_RASINTR;
536
    if (s->chan[ch].request && !on)
537
        s->chan[ch].state |= DCSR_EORINT;
538

    
539
    s->chan[ch].request = on;
540
    if (on) {
541
        pxa2xx_dma_run(s);
542
        pxa2xx_dma_update(s, ch);
543
    }
544
}