Statistics
| Branch: | Revision:

root / hw / dma.c @ 58229933

History | View | Annotate | Download (15.2 kB)

1
/*
2
 * QEMU DMA emulation
3
 *
4
 * Copyright (c) 2003-2004 Vassili Karpov (malc)
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include "hw.h"
25
#include "isa.h"
26

    
27
/* #define DEBUG_DMA */
28

    
29
#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
30
#ifdef DEBUG_DMA
31
#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
32
#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
33
#else
34
#define linfo(...)
35
#define ldebug(...)
36
#endif
37

    
38
struct dma_regs {
39
    int now[2];
40
    uint16_t base[2];
41
    uint8_t mode;
42
    uint8_t page;
43
    uint8_t pageh;
44
    uint8_t dack;
45
    uint8_t eop;
46
    DMA_transfer_handler transfer_handler;
47
    void *opaque;
48
};
49

    
50
#define ADDR 0
51
#define COUNT 1
52

    
53
static struct dma_cont {
54
    uint8_t status;
55
    uint8_t command;
56
    uint8_t mask;
57
    uint8_t flip_flop;
58
    int dshift;
59
    struct dma_regs regs[4];
60
    qemu_irq *cpu_request_exit;
61
    MemoryRegion channel_io;
62
    MemoryRegion cont_io;
63
} dma_controllers[2];
64

    
65
enum {
66
    CMD_MEMORY_TO_MEMORY = 0x01,
67
    CMD_FIXED_ADDRESS    = 0x02,
68
    CMD_BLOCK_CONTROLLER = 0x04,
69
    CMD_COMPRESSED_TIME  = 0x08,
70
    CMD_CYCLIC_PRIORITY  = 0x10,
71
    CMD_EXTENDED_WRITE   = 0x20,
72
    CMD_LOW_DREQ         = 0x40,
73
    CMD_LOW_DACK         = 0x80,
74
    CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
75
    | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
76
    | CMD_LOW_DREQ | CMD_LOW_DACK
77

    
78
};
79

    
80
static void DMA_run (void);
81

    
82
static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
83

    
84
static void write_page (void *opaque, uint32_t nport, uint32_t data)
85
{
86
    struct dma_cont *d = opaque;
87
    int ichan;
88

    
89
    ichan = channels[nport & 7];
90
    if (-1 == ichan) {
91
        dolog ("invalid channel %#x %#x\n", nport, data);
92
        return;
93
    }
94
    d->regs[ichan].page = data;
95
}
96

    
97
static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
98
{
99
    struct dma_cont *d = opaque;
100
    int ichan;
101

    
102
    ichan = channels[nport & 7];
103
    if (-1 == ichan) {
104
        dolog ("invalid channel %#x %#x\n", nport, data);
105
        return;
106
    }
107
    d->regs[ichan].pageh = data;
108
}
109

    
110
static uint32_t read_page (void *opaque, uint32_t nport)
111
{
112
    struct dma_cont *d = opaque;
113
    int ichan;
114

    
115
    ichan = channels[nport & 7];
116
    if (-1 == ichan) {
117
        dolog ("invalid channel read %#x\n", nport);
118
        return 0;
119
    }
120
    return d->regs[ichan].page;
121
}
122

    
123
static uint32_t read_pageh (void *opaque, uint32_t nport)
124
{
125
    struct dma_cont *d = opaque;
126
    int ichan;
127

    
128
    ichan = channels[nport & 7];
129
    if (-1 == ichan) {
130
        dolog ("invalid channel read %#x\n", nport);
131
        return 0;
132
    }
133
    return d->regs[ichan].pageh;
134
}
135

    
136
static inline void init_chan (struct dma_cont *d, int ichan)
137
{
138
    struct dma_regs *r;
139

    
140
    r = d->regs + ichan;
141
    r->now[ADDR] = r->base[ADDR] << d->dshift;
142
    r->now[COUNT] = 0;
143
}
144

    
145
static inline int getff (struct dma_cont *d)
146
{
147
    int ff;
148

    
149
    ff = d->flip_flop;
150
    d->flip_flop = !ff;
151
    return ff;
152
}
153

    
154
static uint64_t read_chan(void *opaque, hwaddr nport, unsigned size)
155
{
156
    struct dma_cont *d = opaque;
157
    int ichan, nreg, iport, ff, val, dir;
158
    struct dma_regs *r;
159

    
160
    iport = (nport >> d->dshift) & 0x0f;
161
    ichan = iport >> 1;
162
    nreg = iport & 1;
163
    r = d->regs + ichan;
164

    
165
    dir = ((r->mode >> 5) & 1) ? -1 : 1;
166
    ff = getff (d);
167
    if (nreg)
168
        val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
169
    else
170
        val = r->now[ADDR] + r->now[COUNT] * dir;
171

    
172
    ldebug ("read_chan %#x -> %d\n", iport, val);
173
    return (val >> (d->dshift + (ff << 3))) & 0xff;
174
}
175

    
176
static void write_chan(void *opaque, hwaddr nport, uint64_t data,
177
                       unsigned size)
178
{
179
    struct dma_cont *d = opaque;
180
    int iport, ichan, nreg;
181
    struct dma_regs *r;
182

    
183
    iport = (nport >> d->dshift) & 0x0f;
184
    ichan = iport >> 1;
185
    nreg = iport & 1;
186
    r = d->regs + ichan;
187
    if (getff (d)) {
188
        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
189
        init_chan (d, ichan);
190
    } else {
191
        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
192
    }
193
}
194

    
195
static void write_cont(void *opaque, hwaddr nport, uint64_t data,
196
                       unsigned size)
197
{
198
    struct dma_cont *d = opaque;
199
    int iport, ichan = 0;
200

    
201
    iport = (nport >> d->dshift) & 0x0f;
202
    switch (iport) {
203
    case 0x01:                  /* command */
204
        if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
205
            dolog("command %"PRIx64" not supported\n", data);
206
            return;
207
        }
208
        d->command = data;
209
        break;
210

    
211
    case 0x02:
212
        ichan = data & 3;
213
        if (data & 4) {
214
            d->status |= 1 << (ichan + 4);
215
        }
216
        else {
217
            d->status &= ~(1 << (ichan + 4));
218
        }
219
        d->status &= ~(1 << ichan);
220
        DMA_run();
221
        break;
222

    
223
    case 0x03:                  /* single mask */
224
        if (data & 4)
225
            d->mask |= 1 << (data & 3);
226
        else
227
            d->mask &= ~(1 << (data & 3));
228
        DMA_run();
229
        break;
230

    
231
    case 0x04:                  /* mode */
232
        {
233
            ichan = data & 3;
234
#ifdef DEBUG_DMA
235
            {
236
                int op, ai, dir, opmode;
237
                op = (data >> 2) & 3;
238
                ai = (data >> 4) & 1;
239
                dir = (data >> 5) & 1;
240
                opmode = (data >> 6) & 3;
241

    
242
                linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
243
                       ichan, op, ai, dir, opmode);
244
            }
245
#endif
246
            d->regs[ichan].mode = data;
247
            break;
248
        }
249

    
250
    case 0x05:                  /* clear flip flop */
251
        d->flip_flop = 0;
252
        break;
253

    
254
    case 0x06:                  /* reset */
255
        d->flip_flop = 0;
256
        d->mask = ~0;
257
        d->status = 0;
258
        d->command = 0;
259
        break;
260

    
261
    case 0x07:                  /* clear mask for all channels */
262
        d->mask = 0;
263
        DMA_run();
264
        break;
265

    
266
    case 0x08:                  /* write mask for all channels */
267
        d->mask = data;
268
        DMA_run();
269
        break;
270

    
271
    default:
272
        dolog ("unknown iport %#x\n", iport);
273
        break;
274
    }
275

    
276
#ifdef DEBUG_DMA
277
    if (0xc != iport) {
278
        linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
279
               nport, ichan, data);
280
    }
281
#endif
282
}
283

    
284
static uint64_t read_cont(void *opaque, hwaddr nport, unsigned size)
285
{
286
    struct dma_cont *d = opaque;
287
    int iport, val;
288

    
289
    iport = (nport >> d->dshift) & 0x0f;
290
    switch (iport) {
291
    case 0x08:                  /* status */
292
        val = d->status;
293
        d->status &= 0xf0;
294
        break;
295
    case 0x0f:                  /* mask */
296
        val = d->mask;
297
        break;
298
    default:
299
        val = 0;
300
        break;
301
    }
302

    
303
    ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
304
    return val;
305
}
306

    
307
int DMA_get_channel_mode (int nchan)
308
{
309
    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
310
}
311

    
312
void DMA_hold_DREQ (int nchan)
313
{
314
    int ncont, ichan;
315

    
316
    ncont = nchan > 3;
317
    ichan = nchan & 3;
318
    linfo ("held cont=%d chan=%d\n", ncont, ichan);
319
    dma_controllers[ncont].status |= 1 << (ichan + 4);
320
    DMA_run();
321
}
322

    
323
void DMA_release_DREQ (int nchan)
324
{
325
    int ncont, ichan;
326

    
327
    ncont = nchan > 3;
328
    ichan = nchan & 3;
329
    linfo ("released cont=%d chan=%d\n", ncont, ichan);
330
    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
331
    DMA_run();
332
}
333

    
334
static void channel_run (int ncont, int ichan)
335
{
336
    int n;
337
    struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
338
#ifdef DEBUG_DMA
339
    int dir, opmode;
340

    
341
    dir = (r->mode >> 5) & 1;
342
    opmode = (r->mode >> 6) & 3;
343

    
344
    if (dir) {
345
        dolog ("DMA in address decrement mode\n");
346
    }
347
    if (opmode != 1) {
348
        dolog ("DMA not in single mode select %#x\n", opmode);
349
    }
350
#endif
351

    
352
    n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
353
                             r->now[COUNT], (r->base[COUNT] + 1) << ncont);
354
    r->now[COUNT] = n;
355
    ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
356
}
357

    
358
static QEMUBH *dma_bh;
359

    
360
static void DMA_run (void)
361
{
362
    struct dma_cont *d;
363
    int icont, ichan;
364
    int rearm = 0;
365
    static int running = 0;
366

    
367
    if (running) {
368
        rearm = 1;
369
        goto out;
370
    } else {
371
        running = 1;
372
    }
373

    
374
    d = dma_controllers;
375

    
376
    for (icont = 0; icont < 2; icont++, d++) {
377
        for (ichan = 0; ichan < 4; ichan++) {
378
            int mask;
379

    
380
            mask = 1 << ichan;
381

    
382
            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
383
                channel_run (icont, ichan);
384
                rearm = 1;
385
            }
386
        }
387
    }
388

    
389
    running = 0;
390
out:
391
    if (rearm)
392
        qemu_bh_schedule_idle(dma_bh);
393
}
394

    
395
static void DMA_run_bh(void *unused)
396
{
397
    DMA_run();
398
}
399

    
400
void DMA_register_channel (int nchan,
401
                           DMA_transfer_handler transfer_handler,
402
                           void *opaque)
403
{
404
    struct dma_regs *r;
405
    int ichan, ncont;
406

    
407
    ncont = nchan > 3;
408
    ichan = nchan & 3;
409

    
410
    r = dma_controllers[ncont].regs + ichan;
411
    r->transfer_handler = transfer_handler;
412
    r->opaque = opaque;
413
}
414

    
415
int DMA_read_memory (int nchan, void *buf, int pos, int len)
416
{
417
    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
418
    hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
419

    
420
    if (r->mode & 0x20) {
421
        int i;
422
        uint8_t *p = buf;
423

    
424
        cpu_physical_memory_read (addr - pos - len, buf, len);
425
        /* What about 16bit transfers? */
426
        for (i = 0; i < len >> 1; i++) {
427
            uint8_t b = p[len - i - 1];
428
            p[i] = b;
429
        }
430
    }
431
    else
432
        cpu_physical_memory_read (addr + pos, buf, len);
433

    
434
    return len;
435
}
436

    
437
int DMA_write_memory (int nchan, void *buf, int pos, int len)
438
{
439
    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
440
    hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
441

    
442
    if (r->mode & 0x20) {
443
        int i;
444
        uint8_t *p = buf;
445

    
446
        cpu_physical_memory_write (addr - pos - len, buf, len);
447
        /* What about 16bit transfers? */
448
        for (i = 0; i < len; i++) {
449
            uint8_t b = p[len - i - 1];
450
            p[i] = b;
451
        }
452
    }
453
    else
454
        cpu_physical_memory_write (addr + pos, buf, len);
455

    
456
    return len;
457
}
458

    
459
/* request the emulator to transfer a new DMA memory block ASAP */
460
void DMA_schedule(int nchan)
461
{
462
    struct dma_cont *d = &dma_controllers[nchan > 3];
463

    
464
    qemu_irq_pulse(*d->cpu_request_exit);
465
}
466

    
467
static void dma_reset(void *opaque)
468
{
469
    struct dma_cont *d = opaque;
470
    write_cont(d, (0x06 << d->dshift), 0, 1);
471
}
472

    
473
static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
474
{
475
    dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
476
           nchan, dma_pos, dma_len);
477
    return dma_pos;
478
}
479

    
480

    
481
static const MemoryRegionOps channel_io_ops = {
482
    .read = read_chan,
483
    .write = write_chan,
484
    .endianness = DEVICE_NATIVE_ENDIAN,
485
    .impl = {
486
        .min_access_size = 1,
487
        .max_access_size = 1,
488
    },
489
};
490

    
491
/* IOport from page_base */
492
static const MemoryRegionPortio page_portio_list[] = {
493
    { 0x01, 3, 1, .write = write_page, .read = read_page, },
494
    { 0x07, 1, 1, .write = write_page, .read = read_page, },
495
    PORTIO_END_OF_LIST(),
496
};
497

    
498
/* IOport from pageh_base */
499
static const MemoryRegionPortio pageh_portio_list[] = {
500
    { 0x01, 3, 1, .write = write_pageh, .read = read_pageh, },
501
    { 0x07, 3, 1, .write = write_pageh, .read = read_pageh, },
502
    PORTIO_END_OF_LIST(),
503
};
504

    
505
static const MemoryRegionOps cont_io_ops = {
506
    .read = read_cont,
507
    .write = write_cont,
508
    .endianness = DEVICE_NATIVE_ENDIAN,
509
    .impl = {
510
        .min_access_size = 1,
511
        .max_access_size = 1,
512
    },
513
};
514

    
515
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
516
static void dma_init2(struct dma_cont *d, int base, int dshift,
517
                      int page_base, int pageh_base,
518
                      qemu_irq *cpu_request_exit)
519
{
520
    int i;
521

    
522
    d->dshift = dshift;
523
    d->cpu_request_exit = cpu_request_exit;
524

    
525
    memory_region_init_io(&d->channel_io, &channel_io_ops, d,
526
                          "dma-chan", 8 << d->dshift);
527
    memory_region_add_subregion(isa_address_space_io(NULL),
528
                                base, &d->channel_io);
529

    
530
    isa_register_portio_list(NULL, page_base, page_portio_list, d,
531
                             "dma-page");
532
    if (pageh_base >= 0) {
533
        isa_register_portio_list(NULL, pageh_base, pageh_portio_list, d,
534
                                 "dma-pageh");
535
    }
536

    
537
    memory_region_init_io(&d->cont_io, &cont_io_ops, d, "dma-cont",
538
                          8 << d->dshift);
539
    memory_region_add_subregion(isa_address_space_io(NULL),
540
                                base + (8 << d->dshift), &d->cont_io);
541

    
542
    qemu_register_reset(dma_reset, d);
543
    dma_reset(d);
544
    for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
545
        d->regs[i].transfer_handler = dma_phony_handler;
546
    }
547
}
548

    
549
static const VMStateDescription vmstate_dma_regs = {
550
    .name = "dma_regs",
551
    .version_id = 1,
552
    .minimum_version_id = 1,
553
    .minimum_version_id_old = 1,
554
    .fields      = (VMStateField []) {
555
        VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
556
        VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
557
        VMSTATE_UINT8(mode, struct dma_regs),
558
        VMSTATE_UINT8(page, struct dma_regs),
559
        VMSTATE_UINT8(pageh, struct dma_regs),
560
        VMSTATE_UINT8(dack, struct dma_regs),
561
        VMSTATE_UINT8(eop, struct dma_regs),
562
        VMSTATE_END_OF_LIST()
563
    }
564
};
565

    
566
static int dma_post_load(void *opaque, int version_id)
567
{
568
    DMA_run();
569

    
570
    return 0;
571
}
572

    
573
static const VMStateDescription vmstate_dma = {
574
    .name = "dma",
575
    .version_id = 1,
576
    .minimum_version_id = 1,
577
    .minimum_version_id_old = 1,
578
    .post_load = dma_post_load,
579
    .fields      = (VMStateField []) {
580
        VMSTATE_UINT8(command, struct dma_cont),
581
        VMSTATE_UINT8(mask, struct dma_cont),
582
        VMSTATE_UINT8(flip_flop, struct dma_cont),
583
        VMSTATE_INT32(dshift, struct dma_cont),
584
        VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
585
        VMSTATE_END_OF_LIST()
586
    }
587
};
588

    
589
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
590
{
591
    dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
592
              high_page_enable ? 0x480 : -1, cpu_request_exit);
593
    dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
594
              high_page_enable ? 0x488 : -1, cpu_request_exit);
595
    vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
596
    vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
597

    
598
    dma_bh = qemu_bh_new(DMA_run_bh, NULL);
599
}