Statistics
| Branch: | Revision:

root / hw / pl080.c @ b55266b5

History | View | Annotate | Download (9.4 kB)

1
/*
2
 * Arm PrimeCell PL080/PL081 DMA controller
3
 *
4
 * Copyright (c) 2006 CodeSourcery.
5
 * Written by Paul Brook
6
 *
7
 * This code is licenced under the GPL.
8
 */
9

    
10
#include "hw.h"
11
#include "primecell.h"
12

    
13
#define PL080_MAX_CHANNELS 8
14
#define PL080_CONF_E    0x1
15
#define PL080_CONF_M1   0x2
16
#define PL080_CONF_M2   0x4
17

    
18
#define PL080_CCONF_H   0x40000
19
#define PL080_CCONF_A   0x20000
20
#define PL080_CCONF_L   0x10000
21
#define PL080_CCONF_ITC 0x08000
22
#define PL080_CCONF_IE  0x04000
23
#define PL080_CCONF_E   0x00001
24

    
25
#define PL080_CCTRL_I   0x80000000
26
#define PL080_CCTRL_DI  0x08000000
27
#define PL080_CCTRL_SI  0x04000000
28
#define PL080_CCTRL_D   0x02000000
29
#define PL080_CCTRL_S   0x01000000
30

    
31
typedef struct {
32
    uint32_t src;
33
    uint32_t dest;
34
    uint32_t lli;
35
    uint32_t ctrl;
36
    uint32_t conf;
37
} pl080_channel;
38

    
39
typedef struct {
40
    uint32_t base;
41
    uint8_t tc_int;
42
    uint8_t tc_mask;
43
    uint8_t err_int;
44
    uint8_t err_mask;
45
    uint32_t conf;
46
    uint32_t sync;
47
    uint32_t req_single;
48
    uint32_t req_burst;
49
    pl080_channel chan[PL080_MAX_CHANNELS];
50
    int nchannels;
51
    /* Flag to avoid recursive DMA invocations.  */
52
    int running;
53
    qemu_irq irq;
54
} pl080_state;
55

    
56
static const unsigned char pl080_id[] =
57
{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
58

    
59
static const unsigned char pl081_id[] =
60
{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
61

    
62
static void pl080_update(pl080_state *s)
63
{
64
    if ((s->tc_int & s->tc_mask)
65
            || (s->err_int & s->err_mask))
66
        qemu_irq_raise(s->irq);
67
    else
68
        qemu_irq_lower(s->irq);
69
}
70

    
71
static void pl080_run(pl080_state *s)
72
{
73
    int c;
74
    int flow;
75
    pl080_channel *ch;
76
    int swidth;
77
    int dwidth;
78
    int xsize;
79
    int n;
80
    int src_id;
81
    int dest_id;
82
    int size;
83
    uint8_t buff[4];
84
    uint32_t req;
85

    
86
    s->tc_mask = 0;
87
    for (c = 0; c < s->nchannels; c++) {
88
        if (s->chan[c].conf & PL080_CCONF_ITC)
89
            s->tc_mask |= 1 << c;
90
        if (s->chan[c].conf & PL080_CCONF_IE)
91
            s->err_mask |= 1 << c;
92
    }
93

    
94
    if ((s->conf & PL080_CONF_E) == 0)
95
        return;
96

    
97
cpu_abort(cpu_single_env, "DMA active\n");
98
    /* If we are already in the middle of a DMA operation then indicate that
99
       there may be new DMA requests and return immediately.  */
100
    if (s->running) {
101
        s->running++;
102
        return;
103
    }
104
    s->running = 1;
105
    while (s->running) {
106
        for (c = 0; c < s->nchannels; c++) {
107
            ch = &s->chan[c];
108
again:
109
            /* Test if thiws channel has any pending DMA requests.  */
110
            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
111
                    != PL080_CCONF_E)
112
                continue;
113
            flow = (ch->conf >> 11) & 7;
114
            if (flow >= 4) {
115
                cpu_abort(cpu_single_env,
116
                    "pl080_run: Peripheral flow control not implemented\n");
117
            }
118
            src_id = (ch->conf >> 1) & 0x1f;
119
            dest_id = (ch->conf >> 6) & 0x1f;
120
            size = ch->ctrl & 0xfff;
121
            req = s->req_single | s->req_burst;
122
            switch (flow) {
123
            case 0:
124
                break;
125
            case 1:
126
                if ((req & (1u << dest_id)) == 0)
127
                    size = 0;
128
                break;
129
            case 2:
130
                if ((req & (1u << src_id)) == 0)
131
                    size = 0;
132
                break;
133
            case 3:
134
                if ((req & (1u << src_id)) == 0
135
                        || (req & (1u << dest_id)) == 0)
136
                    size = 0;
137
                break;
138
            }
139
            if (!size)
140
                continue;
141

    
142
            /* Transfer one element.  */
143
            /* ??? Should transfer multiple elements for a burst request.  */
144
            /* ??? Unclear what the proper behavior is when source and
145
               destination widths are different.  */
146
            swidth = 1 << ((ch->ctrl >> 18) & 7);
147
            dwidth = 1 << ((ch->ctrl >> 21) & 7);
148
            for (n = 0; n < dwidth; n+= swidth) {
149
                cpu_physical_memory_read(ch->src, buff + n, swidth);
150
                if (ch->ctrl & PL080_CCTRL_SI)
151
                    ch->src += swidth;
152
            }
153
            xsize = (dwidth < swidth) ? swidth : dwidth;
154
            /* ??? This may pad the value incorrectly for dwidth < 32.  */
155
            for (n = 0; n < xsize; n += dwidth) {
156
                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
157
                if (ch->ctrl & PL080_CCTRL_DI)
158
                    ch->dest += swidth;
159
            }
160

    
161
            size--;
162
            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
163
            if (size == 0) {
164
                /* Transfer complete.  */
165
                if (ch->lli) {
166
                    ch->src = ldl_phys(ch->lli);
167
                    ch->dest = ldl_phys(ch->lli + 4);
168
                    ch->ctrl = ldl_phys(ch->lli + 12);
169
                    ch->lli = ldl_phys(ch->lli + 8);
170
                } else {
171
                    ch->conf &= ~PL080_CCONF_E;
172
                }
173
                if (ch->ctrl & PL080_CCTRL_I) {
174
                    s->tc_int |= 1 << c;
175
                }
176
            }
177
            goto again;
178
        }
179
        if (--s->running)
180
            s->running = 1;
181
    }
182
}
183

    
184
static uint32_t pl080_read(void *opaque, target_phys_addr_t offset)
185
{
186
    pl080_state *s = (pl080_state *)opaque;
187
    uint32_t i;
188
    uint32_t mask;
189

    
190
    offset -= s->base;
191
    if (offset >= 0xfe0 && offset < 0x1000) {
192
        if (s->nchannels == 8) {
193
            return pl080_id[(offset - 0xfe0) >> 2];
194
        } else {
195
            return pl081_id[(offset - 0xfe0) >> 2];
196
        }
197
    }
198
    if (offset >= 0x100 && offset < 0x200) {
199
        i = (offset & 0xe0) >> 5;
200
        if (i >= s->nchannels)
201
            goto bad_offset;
202
        switch (offset >> 2) {
203
        case 0: /* SrcAddr */
204
            return s->chan[i].src;
205
        case 1: /* DestAddr */
206
            return s->chan[i].dest;
207
        case 2: /* LLI */
208
            return s->chan[i].lli;
209
        case 3: /* Control */
210
            return s->chan[i].ctrl;
211
        case 4: /* Configuration */
212
            return s->chan[i].conf;
213
        default:
214
            goto bad_offset;
215
        }
216
    }
217
    switch (offset >> 2) {
218
    case 0: /* IntStatus */
219
        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
220
    case 1: /* IntTCStatus */
221
        return (s->tc_int & s->tc_mask);
222
    case 3: /* IntErrorStatus */
223
        return (s->err_int & s->err_mask);
224
    case 5: /* RawIntTCStatus */
225
        return s->tc_int;
226
    case 6: /* RawIntErrorStatus */
227
        return s->err_int;
228
    case 7: /* EnbldChns */
229
        mask = 0;
230
        for (i = 0; i < s->nchannels; i++) {
231
            if (s->chan[i].conf & PL080_CCONF_E)
232
                mask |= 1 << i;
233
        }
234
        return mask;
235
    case 8: /* SoftBReq */
236
    case 9: /* SoftSReq */
237
    case 10: /* SoftLBReq */
238
    case 11: /* SoftLSReq */
239
        /* ??? Implement these. */
240
        return 0;
241
    case 12: /* Configuration */
242
        return s->conf;
243
    case 13: /* Sync */
244
        return s->sync;
245
    default:
246
    bad_offset:
247
        cpu_abort(cpu_single_env, "pl080_read: Bad offset %x\n", (int)offset);
248
        return 0;
249
    }
250
}
251

    
252
static void pl080_write(void *opaque, target_phys_addr_t offset,
253
                          uint32_t value)
254
{
255
    pl080_state *s = (pl080_state *)opaque;
256
    int i;
257

    
258
    offset -= s->base;
259
    if (offset >= 0x100 && offset < 0x200) {
260
        i = (offset & 0xe0) >> 5;
261
        if (i >= s->nchannels)
262
            goto bad_offset;
263
        switch (offset >> 2) {
264
        case 0: /* SrcAddr */
265
            s->chan[i].src = value;
266
            break;
267
        case 1: /* DestAddr */
268
            s->chan[i].dest = value;
269
            break;
270
        case 2: /* LLI */
271
            s->chan[i].lli = value;
272
            break;
273
        case 3: /* Control */
274
            s->chan[i].ctrl = value;
275
            break;
276
        case 4: /* Configuration */
277
            s->chan[i].conf = value;
278
            pl080_run(s);
279
            break;
280
        }
281
    }
282
    switch (offset >> 2) {
283
    case 2: /* IntTCClear */
284
        s->tc_int &= ~value;
285
        break;
286
    case 4: /* IntErrorClear */
287
        s->err_int &= ~value;
288
        break;
289
    case 8: /* SoftBReq */
290
    case 9: /* SoftSReq */
291
    case 10: /* SoftLBReq */
292
    case 11: /* SoftLSReq */
293
        /* ??? Implement these.  */
294
        cpu_abort(cpu_single_env, "pl080_write: Soft DMA not implemented\n");
295
        break;
296
    case 12: /* Configuration */
297
        s->conf = value;
298
        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
299
            cpu_abort(cpu_single_env,
300
                      "pl080_write: Big-endian DMA not implemented\n");
301
        }
302
        pl080_run(s);
303
        break;
304
    case 13: /* Sync */
305
        s->sync = value;
306
        break;
307
    default:
308
    bad_offset:
309
        cpu_abort(cpu_single_env, "pl080_write: Bad offset %x\n", (int)offset);
310
    }
311
    pl080_update(s);
312
}
313

    
314
static CPUReadMemoryFunc *pl080_readfn[] = {
315
   pl080_read,
316
   pl080_read,
317
   pl080_read
318
};
319

    
320
static CPUWriteMemoryFunc *pl080_writefn[] = {
321
   pl080_write,
322
   pl080_write,
323
   pl080_write
324
};
325

    
326
/* The PL080 and PL081 are the same except for the number of channels
327
   they implement (8 and 2 respectively).  */
328
void *pl080_init(uint32_t base, qemu_irq irq, int nchannels)
329
{
330
    int iomemtype;
331
    pl080_state *s;
332

    
333
    s = (pl080_state *)qemu_mallocz(sizeof(pl080_state));
334
    iomemtype = cpu_register_io_memory(0, pl080_readfn,
335
                                       pl080_writefn, s);
336
    cpu_register_physical_memory(base, 0x00001000, iomemtype);
337
    s->base = base;
338
    s->irq = irq;
339
    s->nchannels = nchannels;
340
    /* ??? Save/restore.  */
341
    return s;
342
}
343