Statistics
| Branch: | Revision:

root / hw / dma / pl080.c @ 49ab747f

History | View | Annotate | Download (11.7 kB)

1
/*
2
 * Arm PrimeCell PL080/PL081 DMA controller
3
 *
4
 * Copyright (c) 2006 CodeSourcery.
5
 * Written by Paul Brook
6
 *
7
 * This code is licensed under the GPL.
8
 */
9

    
10
#include "hw/sysbus.h"
11

    
12
#define PL080_MAX_CHANNELS 8
13
#define PL080_CONF_E    0x1
14
#define PL080_CONF_M1   0x2
15
#define PL080_CONF_M2   0x4
16

    
17
#define PL080_CCONF_H   0x40000
18
#define PL080_CCONF_A   0x20000
19
#define PL080_CCONF_L   0x10000
20
#define PL080_CCONF_ITC 0x08000
21
#define PL080_CCONF_IE  0x04000
22
#define PL080_CCONF_E   0x00001
23

    
24
#define PL080_CCTRL_I   0x80000000
25
#define PL080_CCTRL_DI  0x08000000
26
#define PL080_CCTRL_SI  0x04000000
27
#define PL080_CCTRL_D   0x02000000
28
#define PL080_CCTRL_S   0x01000000
29

    
30
typedef struct {
31
    uint32_t src;
32
    uint32_t dest;
33
    uint32_t lli;
34
    uint32_t ctrl;
35
    uint32_t conf;
36
} pl080_channel;
37

    
38
typedef struct {
39
    SysBusDevice busdev;
40
    MemoryRegion iomem;
41
    uint8_t tc_int;
42
    uint8_t tc_mask;
43
    uint8_t err_int;
44
    uint8_t err_mask;
45
    uint32_t conf;
46
    uint32_t sync;
47
    uint32_t req_single;
48
    uint32_t req_burst;
49
    pl080_channel chan[PL080_MAX_CHANNELS];
50
    int nchannels;
51
    /* Flag to avoid recursive DMA invocations.  */
52
    int running;
53
    qemu_irq irq;
54
} pl080_state;
55

    
56
static const VMStateDescription vmstate_pl080_channel = {
57
    .name = "pl080_channel",
58
    .version_id = 1,
59
    .minimum_version_id = 1,
60
    .fields = (VMStateField[]) {
61
        VMSTATE_UINT32(src, pl080_channel),
62
        VMSTATE_UINT32(dest, pl080_channel),
63
        VMSTATE_UINT32(lli, pl080_channel),
64
        VMSTATE_UINT32(ctrl, pl080_channel),
65
        VMSTATE_UINT32(conf, pl080_channel),
66
        VMSTATE_END_OF_LIST()
67
    }
68
};
69

    
70
static const VMStateDescription vmstate_pl080 = {
71
    .name = "pl080",
72
    .version_id = 1,
73
    .minimum_version_id = 1,
74
    .fields = (VMStateField[]) {
75
        VMSTATE_UINT8(tc_int, pl080_state),
76
        VMSTATE_UINT8(tc_mask, pl080_state),
77
        VMSTATE_UINT8(err_int, pl080_state),
78
        VMSTATE_UINT8(err_mask, pl080_state),
79
        VMSTATE_UINT32(conf, pl080_state),
80
        VMSTATE_UINT32(sync, pl080_state),
81
        VMSTATE_UINT32(req_single, pl080_state),
82
        VMSTATE_UINT32(req_burst, pl080_state),
83
        VMSTATE_UINT8(tc_int, pl080_state),
84
        VMSTATE_UINT8(tc_int, pl080_state),
85
        VMSTATE_UINT8(tc_int, pl080_state),
86
        VMSTATE_STRUCT_ARRAY(chan, pl080_state, PL080_MAX_CHANNELS,
87
                             1, vmstate_pl080_channel, pl080_channel),
88
        VMSTATE_INT32(running, pl080_state),
89
        VMSTATE_END_OF_LIST()
90
    }
91
};
92

    
93
static const unsigned char pl080_id[] =
94
{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
95

    
96
static const unsigned char pl081_id[] =
97
{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
98

    
99
static void pl080_update(pl080_state *s)
100
{
101
    if ((s->tc_int & s->tc_mask)
102
            || (s->err_int & s->err_mask))
103
        qemu_irq_raise(s->irq);
104
    else
105
        qemu_irq_lower(s->irq);
106
}
107

    
108
static void pl080_run(pl080_state *s)
109
{
110
    int c;
111
    int flow;
112
    pl080_channel *ch;
113
    int swidth;
114
    int dwidth;
115
    int xsize;
116
    int n;
117
    int src_id;
118
    int dest_id;
119
    int size;
120
    uint8_t buff[4];
121
    uint32_t req;
122

    
123
    s->tc_mask = 0;
124
    for (c = 0; c < s->nchannels; c++) {
125
        if (s->chan[c].conf & PL080_CCONF_ITC)
126
            s->tc_mask |= 1 << c;
127
        if (s->chan[c].conf & PL080_CCONF_IE)
128
            s->err_mask |= 1 << c;
129
    }
130

    
131
    if ((s->conf & PL080_CONF_E) == 0)
132
        return;
133

    
134
hw_error("DMA active\n");
135
    /* If we are already in the middle of a DMA operation then indicate that
136
       there may be new DMA requests and return immediately.  */
137
    if (s->running) {
138
        s->running++;
139
        return;
140
    }
141
    s->running = 1;
142
    while (s->running) {
143
        for (c = 0; c < s->nchannels; c++) {
144
            ch = &s->chan[c];
145
again:
146
            /* Test if thiws channel has any pending DMA requests.  */
147
            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
148
                    != PL080_CCONF_E)
149
                continue;
150
            flow = (ch->conf >> 11) & 7;
151
            if (flow >= 4) {
152
                hw_error(
153
                    "pl080_run: Peripheral flow control not implemented\n");
154
            }
155
            src_id = (ch->conf >> 1) & 0x1f;
156
            dest_id = (ch->conf >> 6) & 0x1f;
157
            size = ch->ctrl & 0xfff;
158
            req = s->req_single | s->req_burst;
159
            switch (flow) {
160
            case 0:
161
                break;
162
            case 1:
163
                if ((req & (1u << dest_id)) == 0)
164
                    size = 0;
165
                break;
166
            case 2:
167
                if ((req & (1u << src_id)) == 0)
168
                    size = 0;
169
                break;
170
            case 3:
171
                if ((req & (1u << src_id)) == 0
172
                        || (req & (1u << dest_id)) == 0)
173
                    size = 0;
174
                break;
175
            }
176
            if (!size)
177
                continue;
178

    
179
            /* Transfer one element.  */
180
            /* ??? Should transfer multiple elements for a burst request.  */
181
            /* ??? Unclear what the proper behavior is when source and
182
               destination widths are different.  */
183
            swidth = 1 << ((ch->ctrl >> 18) & 7);
184
            dwidth = 1 << ((ch->ctrl >> 21) & 7);
185
            for (n = 0; n < dwidth; n+= swidth) {
186
                cpu_physical_memory_read(ch->src, buff + n, swidth);
187
                if (ch->ctrl & PL080_CCTRL_SI)
188
                    ch->src += swidth;
189
            }
190
            xsize = (dwidth < swidth) ? swidth : dwidth;
191
            /* ??? This may pad the value incorrectly for dwidth < 32.  */
192
            for (n = 0; n < xsize; n += dwidth) {
193
                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
194
                if (ch->ctrl & PL080_CCTRL_DI)
195
                    ch->dest += swidth;
196
            }
197

    
198
            size--;
199
            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
200
            if (size == 0) {
201
                /* Transfer complete.  */
202
                if (ch->lli) {
203
                    ch->src = ldl_le_phys(ch->lli);
204
                    ch->dest = ldl_le_phys(ch->lli + 4);
205
                    ch->ctrl = ldl_le_phys(ch->lli + 12);
206
                    ch->lli = ldl_le_phys(ch->lli + 8);
207
                } else {
208
                    ch->conf &= ~PL080_CCONF_E;
209
                }
210
                if (ch->ctrl & PL080_CCTRL_I) {
211
                    s->tc_int |= 1 << c;
212
                }
213
            }
214
            goto again;
215
        }
216
        if (--s->running)
217
            s->running = 1;
218
    }
219
}
220

    
221
static uint64_t pl080_read(void *opaque, hwaddr offset,
222
                           unsigned size)
223
{
224
    pl080_state *s = (pl080_state *)opaque;
225
    uint32_t i;
226
    uint32_t mask;
227

    
228
    if (offset >= 0xfe0 && offset < 0x1000) {
229
        if (s->nchannels == 8) {
230
            return pl080_id[(offset - 0xfe0) >> 2];
231
        } else {
232
            return pl081_id[(offset - 0xfe0) >> 2];
233
        }
234
    }
235
    if (offset >= 0x100 && offset < 0x200) {
236
        i = (offset & 0xe0) >> 5;
237
        if (i >= s->nchannels)
238
            goto bad_offset;
239
        switch (offset >> 2) {
240
        case 0: /* SrcAddr */
241
            return s->chan[i].src;
242
        case 1: /* DestAddr */
243
            return s->chan[i].dest;
244
        case 2: /* LLI */
245
            return s->chan[i].lli;
246
        case 3: /* Control */
247
            return s->chan[i].ctrl;
248
        case 4: /* Configuration */
249
            return s->chan[i].conf;
250
        default:
251
            goto bad_offset;
252
        }
253
    }
254
    switch (offset >> 2) {
255
    case 0: /* IntStatus */
256
        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
257
    case 1: /* IntTCStatus */
258
        return (s->tc_int & s->tc_mask);
259
    case 3: /* IntErrorStatus */
260
        return (s->err_int & s->err_mask);
261
    case 5: /* RawIntTCStatus */
262
        return s->tc_int;
263
    case 6: /* RawIntErrorStatus */
264
        return s->err_int;
265
    case 7: /* EnbldChns */
266
        mask = 0;
267
        for (i = 0; i < s->nchannels; i++) {
268
            if (s->chan[i].conf & PL080_CCONF_E)
269
                mask |= 1 << i;
270
        }
271
        return mask;
272
    case 8: /* SoftBReq */
273
    case 9: /* SoftSReq */
274
    case 10: /* SoftLBReq */
275
    case 11: /* SoftLSReq */
276
        /* ??? Implement these. */
277
        return 0;
278
    case 12: /* Configuration */
279
        return s->conf;
280
    case 13: /* Sync */
281
        return s->sync;
282
    default:
283
    bad_offset:
284
        qemu_log_mask(LOG_GUEST_ERROR,
285
                      "pl080_read: Bad offset %x\n", (int)offset);
286
        return 0;
287
    }
288
}
289

    
290
static void pl080_write(void *opaque, hwaddr offset,
291
                        uint64_t value, unsigned size)
292
{
293
    pl080_state *s = (pl080_state *)opaque;
294
    int i;
295

    
296
    if (offset >= 0x100 && offset < 0x200) {
297
        i = (offset & 0xe0) >> 5;
298
        if (i >= s->nchannels)
299
            goto bad_offset;
300
        switch (offset >> 2) {
301
        case 0: /* SrcAddr */
302
            s->chan[i].src = value;
303
            break;
304
        case 1: /* DestAddr */
305
            s->chan[i].dest = value;
306
            break;
307
        case 2: /* LLI */
308
            s->chan[i].lli = value;
309
            break;
310
        case 3: /* Control */
311
            s->chan[i].ctrl = value;
312
            break;
313
        case 4: /* Configuration */
314
            s->chan[i].conf = value;
315
            pl080_run(s);
316
            break;
317
        }
318
    }
319
    switch (offset >> 2) {
320
    case 2: /* IntTCClear */
321
        s->tc_int &= ~value;
322
        break;
323
    case 4: /* IntErrorClear */
324
        s->err_int &= ~value;
325
        break;
326
    case 8: /* SoftBReq */
327
    case 9: /* SoftSReq */
328
    case 10: /* SoftLBReq */
329
    case 11: /* SoftLSReq */
330
        /* ??? Implement these.  */
331
        qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
332
        break;
333
    case 12: /* Configuration */
334
        s->conf = value;
335
        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
336
            qemu_log_mask(LOG_UNIMP,
337
                          "pl080_write: Big-endian DMA not implemented\n");
338
        }
339
        pl080_run(s);
340
        break;
341
    case 13: /* Sync */
342
        s->sync = value;
343
        break;
344
    default:
345
    bad_offset:
346
        qemu_log_mask(LOG_GUEST_ERROR,
347
                      "pl080_write: Bad offset %x\n", (int)offset);
348
    }
349
    pl080_update(s);
350
}
351

    
352
static const MemoryRegionOps pl080_ops = {
353
    .read = pl080_read,
354
    .write = pl080_write,
355
    .endianness = DEVICE_NATIVE_ENDIAN,
356
};
357

    
358
static int pl08x_init(SysBusDevice *dev, int nchannels)
359
{
360
    pl080_state *s = FROM_SYSBUS(pl080_state, dev);
361

    
362
    memory_region_init_io(&s->iomem, &pl080_ops, s, "pl080", 0x1000);
363
    sysbus_init_mmio(dev, &s->iomem);
364
    sysbus_init_irq(dev, &s->irq);
365
    s->nchannels = nchannels;
366
    return 0;
367
}
368

    
369
static int pl080_init(SysBusDevice *dev)
370
{
371
    return pl08x_init(dev, 8);
372
}
373

    
374
static int pl081_init(SysBusDevice *dev)
375
{
376
    return pl08x_init(dev, 2);
377
}
378

    
379
static void pl080_class_init(ObjectClass *klass, void *data)
380
{
381
    DeviceClass *dc = DEVICE_CLASS(klass);
382
    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
383

    
384
    k->init = pl080_init;
385
    dc->no_user = 1;
386
    dc->vmsd = &vmstate_pl080;
387
}
388

    
389
static const TypeInfo pl080_info = {
390
    .name          = "pl080",
391
    .parent        = TYPE_SYS_BUS_DEVICE,
392
    .instance_size = sizeof(pl080_state),
393
    .class_init    = pl080_class_init,
394
};
395

    
396
static void pl081_class_init(ObjectClass *klass, void *data)
397
{
398
    DeviceClass *dc = DEVICE_CLASS(klass);
399
    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
400

    
401
    k->init = pl081_init;
402
    dc->no_user = 1;
403
    dc->vmsd = &vmstate_pl080;
404
}
405

    
406
static const TypeInfo pl081_info = {
407
    .name          = "pl081",
408
    .parent        = TYPE_SYS_BUS_DEVICE,
409
    .instance_size = sizeof(pl080_state),
410
    .class_init    = pl081_class_init,
411
};
412

    
413
/* The PL080 and PL081 are the same except for the number of channels
414
   they implement (8 and 2 respectively).  */
415
static void pl080_register_types(void)
416
{
417
    type_register_static(&pl080_info);
418
    type_register_static(&pl081_info);
419
}
420

    
421
type_init(pl080_register_types)