Statistics
| Branch: | Revision:

root / hw / xilinx_axidma.c @ 93f1e401

History | View | Annotate | Download (12.9 kB)

1
/*
2
 * QEMU model of Xilinx AXI-DMA block.
3
 *
4
 * Copyright (c) 2011 Edgar E. Iglesias.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "sysbus.h"
26
#include "qemu-char.h"
27
#include "qemu-timer.h"
28
#include "qemu-log.h"
29
#include "qdev-addr.h"
30

    
31
#include "xilinx_axidma.h"
32

    
33
#define D(x)
34

    
35
#define R_DMACR             (0x00 / 4)
36
#define R_DMASR             (0x04 / 4)
37
#define R_CURDESC           (0x08 / 4)
38
#define R_TAILDESC          (0x10 / 4)
39
#define R_MAX               (0x30 / 4)
40

    
41
enum {
42
    DMACR_RUNSTOP = 1,
43
    DMACR_TAILPTR_MODE = 2,
44
    DMACR_RESET = 4
45
};
46

    
47
enum {
48
    DMASR_HALTED = 1,
49
    DMASR_IDLE  = 2,
50
    DMASR_IOC_IRQ  = 1 << 12,
51
    DMASR_DLY_IRQ  = 1 << 13,
52

    
53
    DMASR_IRQ_MASK = 7 << 12
54
};
55

    
56
struct SDesc {
57
    uint64_t nxtdesc;
58
    uint64_t buffer_address;
59
    uint64_t reserved;
60
    uint32_t control;
61
    uint32_t status;
62
    uint32_t app[6];
63
};
64

    
65
enum {
66
    SDESC_CTRL_EOF = (1 << 26),
67
    SDESC_CTRL_SOF = (1 << 27),
68

    
69
    SDESC_CTRL_LEN_MASK = (1 << 23) - 1
70
};
71

    
72
enum {
73
    SDESC_STATUS_EOF = (1 << 26),
74
    SDESC_STATUS_SOF_BIT = 27,
75
    SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
76
    SDESC_STATUS_COMPLETE = (1 << 31)
77
};
78

    
79
struct AXIStream {
80
    QEMUBH *bh;
81
    ptimer_state *ptimer;
82
    qemu_irq irq;
83

    
84
    int nr;
85

    
86
    struct SDesc desc;
87
    int pos;
88
    unsigned int complete_cnt;
89
    uint32_t regs[R_MAX];
90
};
91

    
92
struct XilinxAXIDMA {
93
    SysBusDevice busdev;
94
    uint32_t freqhz;
95
    void *dmach;
96

    
97
    struct AXIStream streams[2];
98
};
99

    
100
/*
101
 * Helper calls to extract info from desriptors and other trivial
102
 * state from regs.
103
 */
104
static inline int stream_desc_sof(struct SDesc *d)
105
{
106
    return d->control & SDESC_CTRL_SOF;
107
}
108

    
109
static inline int stream_desc_eof(struct SDesc *d)
110
{
111
    return d->control & SDESC_CTRL_EOF;
112
}
113

    
114
static inline int stream_resetting(struct AXIStream *s)
115
{
116
    return !!(s->regs[R_DMACR] & DMACR_RESET);
117
}
118

    
119
static inline int stream_running(struct AXIStream *s)
120
{
121
    return s->regs[R_DMACR] & DMACR_RUNSTOP;
122
}
123

    
124
static inline int stream_halted(struct AXIStream *s)
125
{
126
    return s->regs[R_DMASR] & DMASR_HALTED;
127
}
128

    
129
static inline int stream_idle(struct AXIStream *s)
130
{
131
    return !!(s->regs[R_DMASR] & DMASR_IDLE);
132
}
133

    
134
static void stream_reset(struct AXIStream *s)
135
{
136
    s->regs[R_DMASR] = DMASR_HALTED;  /* starts up halted.  */
137
    s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshhold.  */
138
}
139

    
140
/* Mapp an offset addr into a channel index.  */
141
static inline int streamid_from_addr(target_phys_addr_t addr)
142
{
143
    int sid;
144

    
145
    sid = addr / (0x30);
146
    sid &= 1;
147
    return sid;
148
}
149

    
150
#ifdef DEBUG_ENET
151
static void stream_desc_show(struct SDesc *d)
152
{
153
    qemu_log("buffer_addr  = " PRIx64 "\n", d->buffer_address);
154
    qemu_log("nxtdesc      = " PRIx64 "\n", d->nxtdesc);
155
    qemu_log("control      = %x\n", d->control);
156
    qemu_log("status       = %x\n", d->status);
157
}
158
#endif
159

    
160
static void stream_desc_load(struct AXIStream *s, target_phys_addr_t addr)
161
{
162
    struct SDesc *d = &s->desc;
163
    int i;
164

    
165
    cpu_physical_memory_read(addr, (void *) d, sizeof *d);
166

    
167
    /* Convert from LE into host endianness.  */
168
    d->buffer_address = le64_to_cpu(d->buffer_address);
169
    d->nxtdesc = le64_to_cpu(d->nxtdesc);
170
    d->control = le32_to_cpu(d->control);
171
    d->status = le32_to_cpu(d->status);
172
    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
173
        d->app[i] = le32_to_cpu(d->app[i]);
174
    }
175
}
176

    
177
static void stream_desc_store(struct AXIStream *s, target_phys_addr_t addr)
178
{
179
    struct SDesc *d = &s->desc;
180
    int i;
181

    
182
    /* Convert from host endianness into LE.  */
183
    d->buffer_address = cpu_to_le64(d->buffer_address);
184
    d->nxtdesc = cpu_to_le64(d->nxtdesc);
185
    d->control = cpu_to_le32(d->control);
186
    d->status = cpu_to_le32(d->status);
187
    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
188
        d->app[i] = cpu_to_le32(d->app[i]);
189
    }
190
    cpu_physical_memory_write(addr, (void *) d, sizeof *d);
191
}
192

    
193
static void stream_update_irq(struct AXIStream *s)
194
{
195
    unsigned int pending, mask, irq;
196

    
197
    pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
198
    mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
199

    
200
    irq = pending & mask;
201

    
202
    qemu_set_irq(s->irq, !!irq);
203
}
204

    
205
static void stream_reload_complete_cnt(struct AXIStream *s)
206
{
207
    unsigned int comp_th;
208
    comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
209
    s->complete_cnt = comp_th;
210
}
211

    
212
static void timer_hit(void *opaque)
213
{
214
    struct AXIStream *s = opaque;
215

    
216
    stream_reload_complete_cnt(s);
217
    s->regs[R_DMASR] |= DMASR_DLY_IRQ;
218
    stream_update_irq(s);
219
}
220

    
221
static void stream_complete(struct AXIStream *s)
222
{
223
    unsigned int comp_delay;
224

    
225
    /* Start the delayed timer.  */
226
    comp_delay = s->regs[R_DMACR] >> 24;
227
    if (comp_delay) {
228
        ptimer_stop(s->ptimer);
229
        ptimer_set_count(s->ptimer, comp_delay);
230
        ptimer_run(s->ptimer, 1);
231
    }
232

    
233
    s->complete_cnt--;
234
    if (s->complete_cnt == 0) {
235
        /* Raise the IOC irq.  */
236
        s->regs[R_DMASR] |= DMASR_IOC_IRQ;
237
        stream_reload_complete_cnt(s);
238
    }
239
}
240

    
241
static void stream_process_mem2s(struct AXIStream *s,
242
                                 struct XilinxDMAConnection *dmach)
243
{
244
    uint32_t prev_d;
245
    unsigned char txbuf[16 * 1024];
246
    unsigned int txlen;
247
    uint32_t app[6];
248

    
249
    if (!stream_running(s) || stream_idle(s)) {
250
        return;
251
    }
252

    
253
    while (1) {
254
        stream_desc_load(s, s->regs[R_CURDESC]);
255

    
256
        if (s->desc.status & SDESC_STATUS_COMPLETE) {
257
            s->regs[R_DMASR] |= DMASR_IDLE;
258
            break;
259
        }
260

    
261
        if (stream_desc_sof(&s->desc)) {
262
            s->pos = 0;
263
            memcpy(app, s->desc.app, sizeof app);
264
        }
265

    
266
        txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
267
        if ((txlen + s->pos) > sizeof txbuf) {
268
            hw_error("%s: too small internal txbuf! %d\n", __func__,
269
                     txlen + s->pos);
270
        }
271

    
272
        cpu_physical_memory_read(s->desc.buffer_address,
273
                                 txbuf + s->pos, txlen);
274
        s->pos += txlen;
275

    
276
        if (stream_desc_eof(&s->desc)) {
277
            xlx_dma_push_to_client(dmach, txbuf, s->pos, app);
278
            s->pos = 0;
279
            stream_complete(s);
280
        }
281

    
282
        /* Update the descriptor.  */
283
        s->desc.status = txlen | SDESC_STATUS_COMPLETE;
284
        stream_desc_store(s, s->regs[R_CURDESC]);
285

    
286
        /* Advance.  */
287
        prev_d = s->regs[R_CURDESC];
288
        s->regs[R_CURDESC] = s->desc.nxtdesc;
289
        if (prev_d == s->regs[R_TAILDESC]) {
290
            s->regs[R_DMASR] |= DMASR_IDLE;
291
            break;
292
        }
293
    }
294
}
295

    
296
static void stream_process_s2mem(struct AXIStream *s,
297
                                 unsigned char *buf, size_t len, uint32_t *app)
298
{
299
    uint32_t prev_d;
300
    unsigned int rxlen;
301
    int pos = 0;
302
    int sof = 1;
303

    
304
    if (!stream_running(s) || stream_idle(s)) {
305
        return;
306
    }
307

    
308
    while (len) {
309
        stream_desc_load(s, s->regs[R_CURDESC]);
310

    
311
        if (s->desc.status & SDESC_STATUS_COMPLETE) {
312
            s->regs[R_DMASR] |= DMASR_IDLE;
313
            break;
314
        }
315

    
316
        rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
317
        if (rxlen > len) {
318
            /* It fits.  */
319
            rxlen = len;
320
        }
321

    
322
        cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
323
        len -= rxlen;
324
        pos += rxlen;
325

    
326
        /* Update the descriptor.  */
327
        if (!len) {
328
            int i;
329

    
330
            stream_complete(s);
331
            for (i = 0; i < 5; i++) {
332
                s->desc.app[i] = app[i];
333
            }
334
            s->desc.status |= SDESC_STATUS_EOF;
335
        }
336

    
337
        s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
338
        s->desc.status |= SDESC_STATUS_COMPLETE;
339
        stream_desc_store(s, s->regs[R_CURDESC]);
340
        sof = 0;
341

    
342
        /* Advance.  */
343
        prev_d = s->regs[R_CURDESC];
344
        s->regs[R_CURDESC] = s->desc.nxtdesc;
345
        if (prev_d == s->regs[R_TAILDESC]) {
346
            s->regs[R_DMASR] |= DMASR_IDLE;
347
            break;
348
        }
349
    }
350
}
351

    
352
static
353
void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app)
354
{
355
    struct XilinxAXIDMA *d = opaque;
356
    struct AXIStream *s = &d->streams[1];
357

    
358
    if (!app) {
359
        hw_error("No stream app data!\n");
360
    }
361
    stream_process_s2mem(s, buf, len, app);
362
    stream_update_irq(s);
363
}
364

    
365
static uint32_t axidma_readl(void *opaque, target_phys_addr_t addr)
366
{
367
    struct XilinxAXIDMA *d = opaque;
368
    struct AXIStream *s;
369
    uint32_t r = 0;
370
    int sid;
371

    
372
    sid = streamid_from_addr(addr);
373
    s = &d->streams[sid];
374

    
375
    addr = addr % 0x30;
376
    addr >>= 2;
377
    switch (addr) {
378
        case R_DMACR:
379
            /* Simulate one cycles reset delay.  */
380
            s->regs[addr] &= ~DMACR_RESET;
381
            r = s->regs[addr];
382
            break;
383
        case R_DMASR:
384
            s->regs[addr] &= 0xffff;
385
            s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
386
            s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
387
            r = s->regs[addr];
388
            break;
389
        default:
390
            r = s->regs[addr];
391
            D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
392
                           __func__, sid, addr * 4, r));
393
            break;
394
    }
395
    return r;
396

    
397
}
398

    
399
static void
400
axidma_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
401
{
402
    struct XilinxAXIDMA *d = opaque;
403
    struct AXIStream *s;
404
    int sid;
405

    
406
    sid = streamid_from_addr(addr);
407
    s = &d->streams[sid];
408

    
409
    addr = addr % 0x30;
410
    addr >>= 2;
411
    switch (addr) {
412
        case R_DMACR:
413
            /* Tailptr mode is always on.  */
414
            value |= DMACR_TAILPTR_MODE;
415
            /* Remember our previous reset state.  */
416
            value |= (s->regs[addr] & DMACR_RESET);
417
            s->regs[addr] = value;
418

    
419
            if (value & DMACR_RESET) {
420
                stream_reset(s);
421
            }
422

    
423
            if ((value & 1) && !stream_resetting(s)) {
424
                /* Start processing.  */
425
                s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
426
            }
427
            stream_reload_complete_cnt(s);
428
            break;
429

    
430
        case R_DMASR:
431
            /* Mask away write to clear irq lines.  */
432
            value &= ~(value & DMASR_IRQ_MASK);
433
            s->regs[addr] = value;
434
            break;
435

    
436
        case R_TAILDESC:
437
            s->regs[addr] = value;
438
            s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle.  */
439
            if (!sid) {
440
                stream_process_mem2s(s, d->dmach);
441
            }
442
            break;
443
        default:
444
            D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
445
                  __func__, sid, addr * 4, value));
446
            s->regs[addr] = value;
447
            break;
448
    }
449
    stream_update_irq(s);
450
}
451

    
452
static CPUReadMemoryFunc * const axidma_read[] = {
453
    &axidma_readl,
454
    &axidma_readl,
455
    &axidma_readl,
456
};
457

    
458
static CPUWriteMemoryFunc * const axidma_write[] = {
459
    &axidma_writel,
460
    &axidma_writel,
461
    &axidma_writel,
462
};
463

    
464
static int xilinx_axidma_init(SysBusDevice *dev)
465
{
466
    struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
467
    int axidma_regs;
468
    int i;
469

    
470
    sysbus_init_irq(dev, &s->streams[1].irq);
471
    sysbus_init_irq(dev, &s->streams[0].irq);
472

    
473
    if (!s->dmach) {
474
        hw_error("Unconnected DMA channel.\n");
475
    }
476

    
477
    xlx_dma_connect_dma(s->dmach, s, axidma_push);
478

    
479
    axidma_regs = cpu_register_io_memory(axidma_read, axidma_write, s,
480
                                       DEVICE_NATIVE_ENDIAN);
481
    sysbus_init_mmio(dev, R_MAX * 4 * 2, axidma_regs);
482

    
483
    for (i = 0; i < 2; i++) {
484
        stream_reset(&s->streams[i]);
485
        s->streams[i].nr = i;
486
        s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
487
        s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
488
        ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
489
    }
490
    return 0;
491
}
492

    
493
static SysBusDeviceInfo axidma_info = {
494
    .init = xilinx_axidma_init,
495
    .qdev.name  = "xilinx,axidma",
496
    .qdev.size  = sizeof(struct XilinxAXIDMA),
497
    .qdev.props = (Property[]) {
498
        DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
499
        DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA, dmach),
500
        DEFINE_PROP_END_OF_LIST(),
501
    }
502
};
503

    
504
static void xilinx_axidma_register(void)
505
{
506
    sysbus_register_withprop(&axidma_info);
507
}
508

    
509
device_init(xilinx_axidma_register)