Statistics
| Branch: | Revision:

root / hw / pl080.c @ 75b0646f

History | View | Annotate | Download (11.2 kB)

1
/*
2
 * Arm PrimeCell PL080/PL081 DMA controller
3
 *
4
 * Copyright (c) 2006 CodeSourcery.
5
 * Written by Paul Brook
6
 *
7
 * This code is licenced under the GPL.
8
 */
9

    
10
#include "sysbus.h"
11

    
12
#define PL080_MAX_CHANNELS 8
13
#define PL080_CONF_E    0x1
14
#define PL080_CONF_M1   0x2
15
#define PL080_CONF_M2   0x4
16

    
17
#define PL080_CCONF_H   0x40000
18
#define PL080_CCONF_A   0x20000
19
#define PL080_CCONF_L   0x10000
20
#define PL080_CCONF_ITC 0x08000
21
#define PL080_CCONF_IE  0x04000
22
#define PL080_CCONF_E   0x00001
23

    
24
#define PL080_CCTRL_I   0x80000000
25
#define PL080_CCTRL_DI  0x08000000
26
#define PL080_CCTRL_SI  0x04000000
27
#define PL080_CCTRL_D   0x02000000
28
#define PL080_CCTRL_S   0x01000000
29

    
30
typedef struct {
31
    uint32_t src;
32
    uint32_t dest;
33
    uint32_t lli;
34
    uint32_t ctrl;
35
    uint32_t conf;
36
} pl080_channel;
37

    
38
typedef struct {
39
    SysBusDevice busdev;
40
    uint8_t tc_int;
41
    uint8_t tc_mask;
42
    uint8_t err_int;
43
    uint8_t err_mask;
44
    uint32_t conf;
45
    uint32_t sync;
46
    uint32_t req_single;
47
    uint32_t req_burst;
48
    pl080_channel chan[PL080_MAX_CHANNELS];
49
    int nchannels;
50
    /* Flag to avoid recursive DMA invocations.  */
51
    int running;
52
    qemu_irq irq;
53
} pl080_state;
54

    
55
static const VMStateDescription vmstate_pl080_channel = {
56
    .name = "pl080_channel",
57
    .version_id = 1,
58
    .minimum_version_id = 1,
59
    .fields = (VMStateField[]) {
60
        VMSTATE_UINT32(src, pl080_channel),
61
        VMSTATE_UINT32(dest, pl080_channel),
62
        VMSTATE_UINT32(lli, pl080_channel),
63
        VMSTATE_UINT32(ctrl, pl080_channel),
64
        VMSTATE_UINT32(conf, pl080_channel),
65
        VMSTATE_END_OF_LIST()
66
    }
67
};
68

    
69
static const VMStateDescription vmstate_pl080 = {
70
    .name = "pl080",
71
    .version_id = 1,
72
    .minimum_version_id = 1,
73
    .fields = (VMStateField[]) {
74
        VMSTATE_UINT8(tc_int, pl080_state),
75
        VMSTATE_UINT8(tc_mask, pl080_state),
76
        VMSTATE_UINT8(err_int, pl080_state),
77
        VMSTATE_UINT8(err_mask, pl080_state),
78
        VMSTATE_UINT32(conf, pl080_state),
79
        VMSTATE_UINT32(sync, pl080_state),
80
        VMSTATE_UINT32(req_single, pl080_state),
81
        VMSTATE_UINT32(req_burst, pl080_state),
82
        VMSTATE_UINT8(tc_int, pl080_state),
83
        VMSTATE_UINT8(tc_int, pl080_state),
84
        VMSTATE_UINT8(tc_int, pl080_state),
85
        VMSTATE_STRUCT_ARRAY(chan, pl080_state, PL080_MAX_CHANNELS,
86
                             1, vmstate_pl080_channel, pl080_channel),
87
        VMSTATE_INT32(running, pl080_state),
88
        VMSTATE_END_OF_LIST()
89
    }
90
};
91

    
92
static const unsigned char pl080_id[] =
93
{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
94

    
95
static const unsigned char pl081_id[] =
96
{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
97

    
98
static void pl080_update(pl080_state *s)
99
{
100
    if ((s->tc_int & s->tc_mask)
101
            || (s->err_int & s->err_mask))
102
        qemu_irq_raise(s->irq);
103
    else
104
        qemu_irq_lower(s->irq);
105
}
106

    
107
static void pl080_run(pl080_state *s)
108
{
109
    int c;
110
    int flow;
111
    pl080_channel *ch;
112
    int swidth;
113
    int dwidth;
114
    int xsize;
115
    int n;
116
    int src_id;
117
    int dest_id;
118
    int size;
119
    uint8_t buff[4];
120
    uint32_t req;
121

    
122
    s->tc_mask = 0;
123
    for (c = 0; c < s->nchannels; c++) {
124
        if (s->chan[c].conf & PL080_CCONF_ITC)
125
            s->tc_mask |= 1 << c;
126
        if (s->chan[c].conf & PL080_CCONF_IE)
127
            s->err_mask |= 1 << c;
128
    }
129

    
130
    if ((s->conf & PL080_CONF_E) == 0)
131
        return;
132

    
133
hw_error("DMA active\n");
134
    /* If we are already in the middle of a DMA operation then indicate that
135
       there may be new DMA requests and return immediately.  */
136
    if (s->running) {
137
        s->running++;
138
        return;
139
    }
140
    s->running = 1;
141
    while (s->running) {
142
        for (c = 0; c < s->nchannels; c++) {
143
            ch = &s->chan[c];
144
again:
145
            /* Test if thiws channel has any pending DMA requests.  */
146
            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
147
                    != PL080_CCONF_E)
148
                continue;
149
            flow = (ch->conf >> 11) & 7;
150
            if (flow >= 4) {
151
                hw_error(
152
                    "pl080_run: Peripheral flow control not implemented\n");
153
            }
154
            src_id = (ch->conf >> 1) & 0x1f;
155
            dest_id = (ch->conf >> 6) & 0x1f;
156
            size = ch->ctrl & 0xfff;
157
            req = s->req_single | s->req_burst;
158
            switch (flow) {
159
            case 0:
160
                break;
161
            case 1:
162
                if ((req & (1u << dest_id)) == 0)
163
                    size = 0;
164
                break;
165
            case 2:
166
                if ((req & (1u << src_id)) == 0)
167
                    size = 0;
168
                break;
169
            case 3:
170
                if ((req & (1u << src_id)) == 0
171
                        || (req & (1u << dest_id)) == 0)
172
                    size = 0;
173
                break;
174
            }
175
            if (!size)
176
                continue;
177

    
178
            /* Transfer one element.  */
179
            /* ??? Should transfer multiple elements for a burst request.  */
180
            /* ??? Unclear what the proper behavior is when source and
181
               destination widths are different.  */
182
            swidth = 1 << ((ch->ctrl >> 18) & 7);
183
            dwidth = 1 << ((ch->ctrl >> 21) & 7);
184
            for (n = 0; n < dwidth; n+= swidth) {
185
                cpu_physical_memory_read(ch->src, buff + n, swidth);
186
                if (ch->ctrl & PL080_CCTRL_SI)
187
                    ch->src += swidth;
188
            }
189
            xsize = (dwidth < swidth) ? swidth : dwidth;
190
            /* ??? This may pad the value incorrectly for dwidth < 32.  */
191
            for (n = 0; n < xsize; n += dwidth) {
192
                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
193
                if (ch->ctrl & PL080_CCTRL_DI)
194
                    ch->dest += swidth;
195
            }
196

    
197
            size--;
198
            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
199
            if (size == 0) {
200
                /* Transfer complete.  */
201
                if (ch->lli) {
202
                    ch->src = ldl_le_phys(ch->lli);
203
                    ch->dest = ldl_le_phys(ch->lli + 4);
204
                    ch->ctrl = ldl_le_phys(ch->lli + 12);
205
                    ch->lli = ldl_le_phys(ch->lli + 8);
206
                } else {
207
                    ch->conf &= ~PL080_CCONF_E;
208
                }
209
                if (ch->ctrl & PL080_CCTRL_I) {
210
                    s->tc_int |= 1 << c;
211
                }
212
            }
213
            goto again;
214
        }
215
        if (--s->running)
216
            s->running = 1;
217
    }
218
}
219

    
220
static uint32_t pl080_read(void *opaque, target_phys_addr_t offset)
221
{
222
    pl080_state *s = (pl080_state *)opaque;
223
    uint32_t i;
224
    uint32_t mask;
225

    
226
    if (offset >= 0xfe0 && offset < 0x1000) {
227
        if (s->nchannels == 8) {
228
            return pl080_id[(offset - 0xfe0) >> 2];
229
        } else {
230
            return pl081_id[(offset - 0xfe0) >> 2];
231
        }
232
    }
233
    if (offset >= 0x100 && offset < 0x200) {
234
        i = (offset & 0xe0) >> 5;
235
        if (i >= s->nchannels)
236
            goto bad_offset;
237
        switch (offset >> 2) {
238
        case 0: /* SrcAddr */
239
            return s->chan[i].src;
240
        case 1: /* DestAddr */
241
            return s->chan[i].dest;
242
        case 2: /* LLI */
243
            return s->chan[i].lli;
244
        case 3: /* Control */
245
            return s->chan[i].ctrl;
246
        case 4: /* Configuration */
247
            return s->chan[i].conf;
248
        default:
249
            goto bad_offset;
250
        }
251
    }
252
    switch (offset >> 2) {
253
    case 0: /* IntStatus */
254
        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
255
    case 1: /* IntTCStatus */
256
        return (s->tc_int & s->tc_mask);
257
    case 3: /* IntErrorStatus */
258
        return (s->err_int & s->err_mask);
259
    case 5: /* RawIntTCStatus */
260
        return s->tc_int;
261
    case 6: /* RawIntErrorStatus */
262
        return s->err_int;
263
    case 7: /* EnbldChns */
264
        mask = 0;
265
        for (i = 0; i < s->nchannels; i++) {
266
            if (s->chan[i].conf & PL080_CCONF_E)
267
                mask |= 1 << i;
268
        }
269
        return mask;
270
    case 8: /* SoftBReq */
271
    case 9: /* SoftSReq */
272
    case 10: /* SoftLBReq */
273
    case 11: /* SoftLSReq */
274
        /* ??? Implement these. */
275
        return 0;
276
    case 12: /* Configuration */
277
        return s->conf;
278
    case 13: /* Sync */
279
        return s->sync;
280
    default:
281
    bad_offset:
282
        hw_error("pl080_read: Bad offset %x\n", (int)offset);
283
        return 0;
284
    }
285
}
286

    
287
static void pl080_write(void *opaque, target_phys_addr_t offset,
288
                          uint32_t value)
289
{
290
    pl080_state *s = (pl080_state *)opaque;
291
    int i;
292

    
293
    if (offset >= 0x100 && offset < 0x200) {
294
        i = (offset & 0xe0) >> 5;
295
        if (i >= s->nchannels)
296
            goto bad_offset;
297
        switch (offset >> 2) {
298
        case 0: /* SrcAddr */
299
            s->chan[i].src = value;
300
            break;
301
        case 1: /* DestAddr */
302
            s->chan[i].dest = value;
303
            break;
304
        case 2: /* LLI */
305
            s->chan[i].lli = value;
306
            break;
307
        case 3: /* Control */
308
            s->chan[i].ctrl = value;
309
            break;
310
        case 4: /* Configuration */
311
            s->chan[i].conf = value;
312
            pl080_run(s);
313
            break;
314
        }
315
    }
316
    switch (offset >> 2) {
317
    case 2: /* IntTCClear */
318
        s->tc_int &= ~value;
319
        break;
320
    case 4: /* IntErrorClear */
321
        s->err_int &= ~value;
322
        break;
323
    case 8: /* SoftBReq */
324
    case 9: /* SoftSReq */
325
    case 10: /* SoftLBReq */
326
    case 11: /* SoftLSReq */
327
        /* ??? Implement these.  */
328
        hw_error("pl080_write: Soft DMA not implemented\n");
329
        break;
330
    case 12: /* Configuration */
331
        s->conf = value;
332
        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
333
            hw_error("pl080_write: Big-endian DMA not implemented\n");
334
        }
335
        pl080_run(s);
336
        break;
337
    case 13: /* Sync */
338
        s->sync = value;
339
        break;
340
    default:
341
    bad_offset:
342
        hw_error("pl080_write: Bad offset %x\n", (int)offset);
343
    }
344
    pl080_update(s);
345
}
346

    
347
static CPUReadMemoryFunc * const pl080_readfn[] = {
348
   pl080_read,
349
   pl080_read,
350
   pl080_read
351
};
352

    
353
static CPUWriteMemoryFunc * const pl080_writefn[] = {
354
   pl080_write,
355
   pl080_write,
356
   pl080_write
357
};
358

    
359
static int pl08x_init(SysBusDevice *dev, int nchannels)
360
{
361
    int iomemtype;
362
    pl080_state *s = FROM_SYSBUS(pl080_state, dev);
363

    
364
    iomemtype = cpu_register_io_memory(pl080_readfn,
365
                                       pl080_writefn, s,
366
                                       DEVICE_NATIVE_ENDIAN);
367
    sysbus_init_mmio(dev, 0x1000, iomemtype);
368
    sysbus_init_irq(dev, &s->irq);
369
    s->nchannels = nchannels;
370
    return 0;
371
}
372

    
373
static int pl080_init(SysBusDevice *dev)
374
{
375
    return pl08x_init(dev, 8);
376
}
377

    
378
static int pl081_init(SysBusDevice *dev)
379
{
380
    return pl08x_init(dev, 2);
381
}
382

    
383
static SysBusDeviceInfo pl080_info = {
384
    .init = pl080_init,
385
    .qdev.name = "pl080",
386
    .qdev.size = sizeof(pl080_state),
387
    .qdev.vmsd = &vmstate_pl080,
388
    .qdev.no_user = 1,
389
};
390

    
391
static SysBusDeviceInfo pl081_info = {
392
    .init = pl081_init,
393
    .qdev.name = "pl081",
394
    .qdev.size = sizeof(pl080_state),
395
    .qdev.vmsd = &vmstate_pl080,
396
    .qdev.no_user = 1,
397
};
398

    
399
/* The PL080 and PL081 are the same except for the number of channels
400
   they implement (8 and 2 respectively).  */
401
static void pl080_register_devices(void)
402
{
403
    sysbus_register_withprop(&pl080_info);
404
    sysbus_register_withprop(&pl081_info);
405
}
406

    
407
device_init(pl080_register_devices)