Statistics
| Branch: | Revision:

root / hw / dma / xilinx_axidma.c @ 49ab747f

History | View | Annotate | Download (13.3 kB)

1
/*
2
 * QEMU model of Xilinx AXI-DMA block.
3
 *
4
 * Copyright (c) 2011 Edgar E. Iglesias.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "hw/sysbus.h"
26
#include "qemu/timer.h"
27
#include "hw/ptimer.h"
28
#include "qemu/log.h"
29
#include "hw/qdev-addr.h"
30

    
31
#include "hw/stream.h"
32

    
33
#define D(x)
34

    
35
#define R_DMACR             (0x00 / 4)
36
#define R_DMASR             (0x04 / 4)
37
#define R_CURDESC           (0x08 / 4)
38
#define R_TAILDESC          (0x10 / 4)
39
#define R_MAX               (0x30 / 4)
40

    
41
enum {
42
    DMACR_RUNSTOP = 1,
43
    DMACR_TAILPTR_MODE = 2,
44
    DMACR_RESET = 4
45
};
46

    
47
enum {
48
    DMASR_HALTED = 1,
49
    DMASR_IDLE  = 2,
50
    DMASR_IOC_IRQ  = 1 << 12,
51
    DMASR_DLY_IRQ  = 1 << 13,
52

    
53
    DMASR_IRQ_MASK = 7 << 12
54
};
55

    
56
struct SDesc {
57
    uint64_t nxtdesc;
58
    uint64_t buffer_address;
59
    uint64_t reserved;
60
    uint32_t control;
61
    uint32_t status;
62
    uint32_t app[6];
63
};
64

    
65
enum {
66
    SDESC_CTRL_EOF = (1 << 26),
67
    SDESC_CTRL_SOF = (1 << 27),
68

    
69
    SDESC_CTRL_LEN_MASK = (1 << 23) - 1
70
};
71

    
72
enum {
73
    SDESC_STATUS_EOF = (1 << 26),
74
    SDESC_STATUS_SOF_BIT = 27,
75
    SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
76
    SDESC_STATUS_COMPLETE = (1 << 31)
77
};
78

    
79
struct Stream {
80
    QEMUBH *bh;
81
    ptimer_state *ptimer;
82
    qemu_irq irq;
83

    
84
    int nr;
85

    
86
    struct SDesc desc;
87
    int pos;
88
    unsigned int complete_cnt;
89
    uint32_t regs[R_MAX];
90
};
91

    
92
struct XilinxAXIDMA {
93
    SysBusDevice busdev;
94
    MemoryRegion iomem;
95
    uint32_t freqhz;
96
    StreamSlave *tx_dev;
97

    
98
    struct Stream streams[2];
99
};
100

    
101
/*
102
 * Helper calls to extract info from desriptors and other trivial
103
 * state from regs.
104
 */
105
static inline int stream_desc_sof(struct SDesc *d)
106
{
107
    return d->control & SDESC_CTRL_SOF;
108
}
109

    
110
static inline int stream_desc_eof(struct SDesc *d)
111
{
112
    return d->control & SDESC_CTRL_EOF;
113
}
114

    
115
static inline int stream_resetting(struct Stream *s)
116
{
117
    return !!(s->regs[R_DMACR] & DMACR_RESET);
118
}
119

    
120
static inline int stream_running(struct Stream *s)
121
{
122
    return s->regs[R_DMACR] & DMACR_RUNSTOP;
123
}
124

    
125
static inline int stream_halted(struct Stream *s)
126
{
127
    return s->regs[R_DMASR] & DMASR_HALTED;
128
}
129

    
130
static inline int stream_idle(struct Stream *s)
131
{
132
    return !!(s->regs[R_DMASR] & DMASR_IDLE);
133
}
134

    
135
static void stream_reset(struct Stream *s)
136
{
137
    s->regs[R_DMASR] = DMASR_HALTED;  /* starts up halted.  */
138
    s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold.  */
139
}
140

    
141
/* Map an offset addr into a channel index.  */
142
static inline int streamid_from_addr(hwaddr addr)
143
{
144
    int sid;
145

    
146
    sid = addr / (0x30);
147
    sid &= 1;
148
    return sid;
149
}
150

    
151
#ifdef DEBUG_ENET
152
static void stream_desc_show(struct SDesc *d)
153
{
154
    qemu_log("buffer_addr  = " PRIx64 "\n", d->buffer_address);
155
    qemu_log("nxtdesc      = " PRIx64 "\n", d->nxtdesc);
156
    qemu_log("control      = %x\n", d->control);
157
    qemu_log("status       = %x\n", d->status);
158
}
159
#endif
160

    
161
static void stream_desc_load(struct Stream *s, hwaddr addr)
162
{
163
    struct SDesc *d = &s->desc;
164
    int i;
165

    
166
    cpu_physical_memory_read(addr, (void *) d, sizeof *d);
167

    
168
    /* Convert from LE into host endianness.  */
169
    d->buffer_address = le64_to_cpu(d->buffer_address);
170
    d->nxtdesc = le64_to_cpu(d->nxtdesc);
171
    d->control = le32_to_cpu(d->control);
172
    d->status = le32_to_cpu(d->status);
173
    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
174
        d->app[i] = le32_to_cpu(d->app[i]);
175
    }
176
}
177

    
178
static void stream_desc_store(struct Stream *s, hwaddr addr)
179
{
180
    struct SDesc *d = &s->desc;
181
    int i;
182

    
183
    /* Convert from host endianness into LE.  */
184
    d->buffer_address = cpu_to_le64(d->buffer_address);
185
    d->nxtdesc = cpu_to_le64(d->nxtdesc);
186
    d->control = cpu_to_le32(d->control);
187
    d->status = cpu_to_le32(d->status);
188
    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
189
        d->app[i] = cpu_to_le32(d->app[i]);
190
    }
191
    cpu_physical_memory_write(addr, (void *) d, sizeof *d);
192
}
193

    
194
static void stream_update_irq(struct Stream *s)
195
{
196
    unsigned int pending, mask, irq;
197

    
198
    pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
199
    mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
200

    
201
    irq = pending & mask;
202

    
203
    qemu_set_irq(s->irq, !!irq);
204
}
205

    
206
static void stream_reload_complete_cnt(struct Stream *s)
207
{
208
    unsigned int comp_th;
209
    comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
210
    s->complete_cnt = comp_th;
211
}
212

    
213
static void timer_hit(void *opaque)
214
{
215
    struct Stream *s = opaque;
216

    
217
    stream_reload_complete_cnt(s);
218
    s->regs[R_DMASR] |= DMASR_DLY_IRQ;
219
    stream_update_irq(s);
220
}
221

    
222
static void stream_complete(struct Stream *s)
223
{
224
    unsigned int comp_delay;
225

    
226
    /* Start the delayed timer.  */
227
    comp_delay = s->regs[R_DMACR] >> 24;
228
    if (comp_delay) {
229
        ptimer_stop(s->ptimer);
230
        ptimer_set_count(s->ptimer, comp_delay);
231
        ptimer_run(s->ptimer, 1);
232
    }
233

    
234
    s->complete_cnt--;
235
    if (s->complete_cnt == 0) {
236
        /* Raise the IOC irq.  */
237
        s->regs[R_DMASR] |= DMASR_IOC_IRQ;
238
        stream_reload_complete_cnt(s);
239
    }
240
}
241

    
242
static void stream_process_mem2s(struct Stream *s,
243
                                 StreamSlave *tx_dev)
244
{
245
    uint32_t prev_d;
246
    unsigned char txbuf[16 * 1024];
247
    unsigned int txlen;
248
    uint32_t app[6];
249

    
250
    if (!stream_running(s) || stream_idle(s)) {
251
        return;
252
    }
253

    
254
    while (1) {
255
        stream_desc_load(s, s->regs[R_CURDESC]);
256

    
257
        if (s->desc.status & SDESC_STATUS_COMPLETE) {
258
            s->regs[R_DMASR] |= DMASR_IDLE;
259
            break;
260
        }
261

    
262
        if (stream_desc_sof(&s->desc)) {
263
            s->pos = 0;
264
            memcpy(app, s->desc.app, sizeof app);
265
        }
266

    
267
        txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
268
        if ((txlen + s->pos) > sizeof txbuf) {
269
            hw_error("%s: too small internal txbuf! %d\n", __func__,
270
                     txlen + s->pos);
271
        }
272

    
273
        cpu_physical_memory_read(s->desc.buffer_address,
274
                                 txbuf + s->pos, txlen);
275
        s->pos += txlen;
276

    
277
        if (stream_desc_eof(&s->desc)) {
278
            stream_push(tx_dev, txbuf, s->pos, app);
279
            s->pos = 0;
280
            stream_complete(s);
281
        }
282

    
283
        /* Update the descriptor.  */
284
        s->desc.status = txlen | SDESC_STATUS_COMPLETE;
285
        stream_desc_store(s, s->regs[R_CURDESC]);
286

    
287
        /* Advance.  */
288
        prev_d = s->regs[R_CURDESC];
289
        s->regs[R_CURDESC] = s->desc.nxtdesc;
290
        if (prev_d == s->regs[R_TAILDESC]) {
291
            s->regs[R_DMASR] |= DMASR_IDLE;
292
            break;
293
        }
294
    }
295
}
296

    
297
static void stream_process_s2mem(struct Stream *s,
298
                                 unsigned char *buf, size_t len, uint32_t *app)
299
{
300
    uint32_t prev_d;
301
    unsigned int rxlen;
302
    int pos = 0;
303
    int sof = 1;
304

    
305
    if (!stream_running(s) || stream_idle(s)) {
306
        return;
307
    }
308

    
309
    while (len) {
310
        stream_desc_load(s, s->regs[R_CURDESC]);
311

    
312
        if (s->desc.status & SDESC_STATUS_COMPLETE) {
313
            s->regs[R_DMASR] |= DMASR_IDLE;
314
            break;
315
        }
316

    
317
        rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
318
        if (rxlen > len) {
319
            /* It fits.  */
320
            rxlen = len;
321
        }
322

    
323
        cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
324
        len -= rxlen;
325
        pos += rxlen;
326

    
327
        /* Update the descriptor.  */
328
        if (!len) {
329
            int i;
330

    
331
            stream_complete(s);
332
            for (i = 0; i < 5; i++) {
333
                s->desc.app[i] = app[i];
334
            }
335
            s->desc.status |= SDESC_STATUS_EOF;
336
        }
337

    
338
        s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
339
        s->desc.status |= SDESC_STATUS_COMPLETE;
340
        stream_desc_store(s, s->regs[R_CURDESC]);
341
        sof = 0;
342

    
343
        /* Advance.  */
344
        prev_d = s->regs[R_CURDESC];
345
        s->regs[R_CURDESC] = s->desc.nxtdesc;
346
        if (prev_d == s->regs[R_TAILDESC]) {
347
            s->regs[R_DMASR] |= DMASR_IDLE;
348
            break;
349
        }
350
    }
351
}
352

    
353
static void
354
axidma_push(StreamSlave *obj, unsigned char *buf, size_t len, uint32_t *app)
355
{
356
    struct XilinxAXIDMA *d = FROM_SYSBUS(typeof(*d), SYS_BUS_DEVICE(obj));
357
    struct Stream *s = &d->streams[1];
358

    
359
    if (!app) {
360
        hw_error("No stream app data!\n");
361
    }
362
    stream_process_s2mem(s, buf, len, app);
363
    stream_update_irq(s);
364
}
365

    
366
static uint64_t axidma_read(void *opaque, hwaddr addr,
367
                            unsigned size)
368
{
369
    struct XilinxAXIDMA *d = opaque;
370
    struct Stream *s;
371
    uint32_t r = 0;
372
    int sid;
373

    
374
    sid = streamid_from_addr(addr);
375
    s = &d->streams[sid];
376

    
377
    addr = addr % 0x30;
378
    addr >>= 2;
379
    switch (addr) {
380
        case R_DMACR:
381
            /* Simulate one cycles reset delay.  */
382
            s->regs[addr] &= ~DMACR_RESET;
383
            r = s->regs[addr];
384
            break;
385
        case R_DMASR:
386
            s->regs[addr] &= 0xffff;
387
            s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
388
            s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
389
            r = s->regs[addr];
390
            break;
391
        default:
392
            r = s->regs[addr];
393
            D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
394
                           __func__, sid, addr * 4, r));
395
            break;
396
    }
397
    return r;
398

    
399
}
400

    
401
static void axidma_write(void *opaque, hwaddr addr,
402
                         uint64_t value, unsigned size)
403
{
404
    struct XilinxAXIDMA *d = opaque;
405
    struct Stream *s;
406
    int sid;
407

    
408
    sid = streamid_from_addr(addr);
409
    s = &d->streams[sid];
410

    
411
    addr = addr % 0x30;
412
    addr >>= 2;
413
    switch (addr) {
414
        case R_DMACR:
415
            /* Tailptr mode is always on.  */
416
            value |= DMACR_TAILPTR_MODE;
417
            /* Remember our previous reset state.  */
418
            value |= (s->regs[addr] & DMACR_RESET);
419
            s->regs[addr] = value;
420

    
421
            if (value & DMACR_RESET) {
422
                stream_reset(s);
423
            }
424

    
425
            if ((value & 1) && !stream_resetting(s)) {
426
                /* Start processing.  */
427
                s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
428
            }
429
            stream_reload_complete_cnt(s);
430
            break;
431

    
432
        case R_DMASR:
433
            /* Mask away write to clear irq lines.  */
434
            value &= ~(value & DMASR_IRQ_MASK);
435
            s->regs[addr] = value;
436
            break;
437

    
438
        case R_TAILDESC:
439
            s->regs[addr] = value;
440
            s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle.  */
441
            if (!sid) {
442
                stream_process_mem2s(s, d->tx_dev);
443
            }
444
            break;
445
        default:
446
            D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
447
                  __func__, sid, addr * 4, (unsigned)value));
448
            s->regs[addr] = value;
449
            break;
450
    }
451
    stream_update_irq(s);
452
}
453

    
454
static const MemoryRegionOps axidma_ops = {
455
    .read = axidma_read,
456
    .write = axidma_write,
457
    .endianness = DEVICE_NATIVE_ENDIAN,
458
};
459

    
460
static int xilinx_axidma_init(SysBusDevice *dev)
461
{
462
    struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
463
    int i;
464

    
465
    sysbus_init_irq(dev, &s->streams[0].irq);
466
    sysbus_init_irq(dev, &s->streams[1].irq);
467

    
468
    memory_region_init_io(&s->iomem, &axidma_ops, s,
469
                          "xlnx.axi-dma", R_MAX * 4 * 2);
470
    sysbus_init_mmio(dev, &s->iomem);
471

    
472
    for (i = 0; i < 2; i++) {
473
        stream_reset(&s->streams[i]);
474
        s->streams[i].nr = i;
475
        s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
476
        s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
477
        ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
478
    }
479
    return 0;
480
}
481

    
482
static void xilinx_axidma_initfn(Object *obj)
483
{
484
    struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), SYS_BUS_DEVICE(obj));
485

    
486
    object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
487
                             (Object **) &s->tx_dev, NULL);
488
}
489

    
490
static Property axidma_properties[] = {
491
    DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
492
    DEFINE_PROP_END_OF_LIST(),
493
};
494

    
495
static void axidma_class_init(ObjectClass *klass, void *data)
496
{
497
    DeviceClass *dc = DEVICE_CLASS(klass);
498
    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
499
    StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
500

    
501
    k->init = xilinx_axidma_init;
502
    dc->props = axidma_properties;
503
    ssc->push = axidma_push;
504
}
505

    
506
static const TypeInfo axidma_info = {
507
    .name          = "xlnx.axi-dma",
508
    .parent        = TYPE_SYS_BUS_DEVICE,
509
    .instance_size = sizeof(struct XilinxAXIDMA),
510
    .class_init    = axidma_class_init,
511
    .instance_init = xilinx_axidma_initfn,
512
    .interfaces = (InterfaceInfo[]) {
513
        { TYPE_STREAM_SLAVE },
514
        { }
515
    }
516
};
517

    
518
static void xilinx_axidma_register_types(void)
519
{
520
    type_register_static(&axidma_info);
521
}
522

    
523
type_init(xilinx_axidma_register_types)