Statistics
| Branch: | Revision:

root / hw / dma.c @ 99a0949b

History | View | Annotate | Download (14.2 kB)

1
/*
2
 * QEMU DMA emulation
3
 *
4
 * Copyright (c) 2003-2004 Vassili Karpov (malc)
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include "hw.h"
25
#include "isa.h"
26

    
27
/* #define DEBUG_DMA */
28

    
29
#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
30
#ifdef DEBUG_DMA
31
#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
32
#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
33
#else
34
#define linfo(...)
35
#define ldebug(...)
36
#endif
37

    
38
struct dma_regs {
39
    int now[2];
40
    uint16_t base[2];
41
    uint8_t mode;
42
    uint8_t page;
43
    uint8_t pageh;
44
    uint8_t dack;
45
    uint8_t eop;
46
    DMA_transfer_handler transfer_handler;
47
    void *opaque;
48
};
49

    
50
#define ADDR 0
51
#define COUNT 1
52

    
53
static struct dma_cont {
54
    uint8_t status;
55
    uint8_t command;
56
    uint8_t mask;
57
    uint8_t flip_flop;
58
    int dshift;
59
    struct dma_regs regs[4];
60
} dma_controllers[2];
61

    
62
enum {
63
    CMD_MEMORY_TO_MEMORY = 0x01,
64
    CMD_FIXED_ADDRESS    = 0x02,
65
    CMD_BLOCK_CONTROLLER = 0x04,
66
    CMD_COMPRESSED_TIME  = 0x08,
67
    CMD_CYCLIC_PRIORITY  = 0x10,
68
    CMD_EXTENDED_WRITE   = 0x20,
69
    CMD_LOW_DREQ         = 0x40,
70
    CMD_LOW_DACK         = 0x80,
71
    CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
72
    | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
73
    | CMD_LOW_DREQ | CMD_LOW_DACK
74

    
75
};
76

    
77
static void DMA_run (void);
78

    
79
static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
80

    
81
static void write_page (void *opaque, uint32_t nport, uint32_t data)
82
{
83
    struct dma_cont *d = opaque;
84
    int ichan;
85

    
86
    ichan = channels[nport & 7];
87
    if (-1 == ichan) {
88
        dolog ("invalid channel %#x %#x\n", nport, data);
89
        return;
90
    }
91
    d->regs[ichan].page = data;
92
}
93

    
94
static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
95
{
96
    struct dma_cont *d = opaque;
97
    int ichan;
98

    
99
    ichan = channels[nport & 7];
100
    if (-1 == ichan) {
101
        dolog ("invalid channel %#x %#x\n", nport, data);
102
        return;
103
    }
104
    d->regs[ichan].pageh = data;
105
}
106

    
107
static uint32_t read_page (void *opaque, uint32_t nport)
108
{
109
    struct dma_cont *d = opaque;
110
    int ichan;
111

    
112
    ichan = channels[nport & 7];
113
    if (-1 == ichan) {
114
        dolog ("invalid channel read %#x\n", nport);
115
        return 0;
116
    }
117
    return d->regs[ichan].page;
118
}
119

    
120
static uint32_t read_pageh (void *opaque, uint32_t nport)
121
{
122
    struct dma_cont *d = opaque;
123
    int ichan;
124

    
125
    ichan = channels[nport & 7];
126
    if (-1 == ichan) {
127
        dolog ("invalid channel read %#x\n", nport);
128
        return 0;
129
    }
130
    return d->regs[ichan].pageh;
131
}
132

    
133
static inline void init_chan (struct dma_cont *d, int ichan)
134
{
135
    struct dma_regs *r;
136

    
137
    r = d->regs + ichan;
138
    r->now[ADDR] = r->base[ADDR] << d->dshift;
139
    r->now[COUNT] = 0;
140
}
141

    
142
static inline int getff (struct dma_cont *d)
143
{
144
    int ff;
145

    
146
    ff = d->flip_flop;
147
    d->flip_flop = !ff;
148
    return ff;
149
}
150

    
151
static uint32_t read_chan (void *opaque, uint32_t nport)
152
{
153
    struct dma_cont *d = opaque;
154
    int ichan, nreg, iport, ff, val, dir;
155
    struct dma_regs *r;
156

    
157
    iport = (nport >> d->dshift) & 0x0f;
158
    ichan = iport >> 1;
159
    nreg = iport & 1;
160
    r = d->regs + ichan;
161

    
162
    dir = ((r->mode >> 5) & 1) ? -1 : 1;
163
    ff = getff (d);
164
    if (nreg)
165
        val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
166
    else
167
        val = r->now[ADDR] + r->now[COUNT] * dir;
168

    
169
    ldebug ("read_chan %#x -> %d\n", iport, val);
170
    return (val >> (d->dshift + (ff << 3))) & 0xff;
171
}
172

    
173
static void write_chan (void *opaque, uint32_t nport, uint32_t data)
174
{
175
    struct dma_cont *d = opaque;
176
    int iport, ichan, nreg;
177
    struct dma_regs *r;
178

    
179
    iport = (nport >> d->dshift) & 0x0f;
180
    ichan = iport >> 1;
181
    nreg = iport & 1;
182
    r = d->regs + ichan;
183
    if (getff (d)) {
184
        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
185
        init_chan (d, ichan);
186
    } else {
187
        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
188
    }
189
}
190

    
191
static void write_cont (void *opaque, uint32_t nport, uint32_t data)
192
{
193
    struct dma_cont *d = opaque;
194
    int iport, ichan = 0;
195

    
196
    iport = (nport >> d->dshift) & 0x0f;
197
    switch (iport) {
198
    case 0x08:                  /* command */
199
        if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
200
            dolog ("command %#x not supported\n", data);
201
            return;
202
        }
203
        d->command = data;
204
        break;
205

    
206
    case 0x09:
207
        ichan = data & 3;
208
        if (data & 4) {
209
            d->status |= 1 << (ichan + 4);
210
        }
211
        else {
212
            d->status &= ~(1 << (ichan + 4));
213
        }
214
        d->status &= ~(1 << ichan);
215
        DMA_run();
216
        break;
217

    
218
    case 0x0a:                  /* single mask */
219
        if (data & 4)
220
            d->mask |= 1 << (data & 3);
221
        else
222
            d->mask &= ~(1 << (data & 3));
223
        DMA_run();
224
        break;
225

    
226
    case 0x0b:                  /* mode */
227
        {
228
            ichan = data & 3;
229
#ifdef DEBUG_DMA
230
            {
231
                int op, ai, dir, opmode;
232
                op = (data >> 2) & 3;
233
                ai = (data >> 4) & 1;
234
                dir = (data >> 5) & 1;
235
                opmode = (data >> 6) & 3;
236

    
237
                linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
238
                       ichan, op, ai, dir, opmode);
239
            }
240
#endif
241
            d->regs[ichan].mode = data;
242
            break;
243
        }
244

    
245
    case 0x0c:                  /* clear flip flop */
246
        d->flip_flop = 0;
247
        break;
248

    
249
    case 0x0d:                  /* reset */
250
        d->flip_flop = 0;
251
        d->mask = ~0;
252
        d->status = 0;
253
        d->command = 0;
254
        break;
255

    
256
    case 0x0e:                  /* clear mask for all channels */
257
        d->mask = 0;
258
        DMA_run();
259
        break;
260

    
261
    case 0x0f:                  /* write mask for all channels */
262
        d->mask = data;
263
        DMA_run();
264
        break;
265

    
266
    default:
267
        dolog ("unknown iport %#x\n", iport);
268
        break;
269
    }
270

    
271
#ifdef DEBUG_DMA
272
    if (0xc != iport) {
273
        linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
274
               nport, ichan, data);
275
    }
276
#endif
277
}
278

    
279
static uint32_t read_cont (void *opaque, uint32_t nport)
280
{
281
    struct dma_cont *d = opaque;
282
    int iport, val;
283

    
284
    iport = (nport >> d->dshift) & 0x0f;
285
    switch (iport) {
286
    case 0x08:                  /* status */
287
        val = d->status;
288
        d->status &= 0xf0;
289
        break;
290
    case 0x0f:                  /* mask */
291
        val = d->mask;
292
        break;
293
    default:
294
        val = 0;
295
        break;
296
    }
297

    
298
    ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
299
    return val;
300
}
301

    
302
int DMA_get_channel_mode (int nchan)
303
{
304
    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
305
}
306

    
307
void DMA_hold_DREQ (int nchan)
308
{
309
    int ncont, ichan;
310

    
311
    ncont = nchan > 3;
312
    ichan = nchan & 3;
313
    linfo ("held cont=%d chan=%d\n", ncont, ichan);
314
    dma_controllers[ncont].status |= 1 << (ichan + 4);
315
    DMA_run();
316
}
317

    
318
void DMA_release_DREQ (int nchan)
319
{
320
    int ncont, ichan;
321

    
322
    ncont = nchan > 3;
323
    ichan = nchan & 3;
324
    linfo ("released cont=%d chan=%d\n", ncont, ichan);
325
    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
326
    DMA_run();
327
}
328

    
329
static void channel_run (int ncont, int ichan)
330
{
331
    int n;
332
    struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
333
#ifdef DEBUG_DMA
334
    int dir, opmode;
335

    
336
    dir = (r->mode >> 5) & 1;
337
    opmode = (r->mode >> 6) & 3;
338

    
339
    if (dir) {
340
        dolog ("DMA in address decrement mode\n");
341
    }
342
    if (opmode != 1) {
343
        dolog ("DMA not in single mode select %#x\n", opmode);
344
    }
345
#endif
346

    
347
    r = dma_controllers[ncont].regs + ichan;
348
    n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
349
                             r->now[COUNT], (r->base[COUNT] + 1) << ncont);
350
    r->now[COUNT] = n;
351
    ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
352
}
353

    
354
static QEMUBH *dma_bh;
355

    
356
static void DMA_run (void)
357
{
358
    struct dma_cont *d;
359
    int icont, ichan;
360
    int rearm = 0;
361

    
362
    d = dma_controllers;
363

    
364
    for (icont = 0; icont < 2; icont++, d++) {
365
        for (ichan = 0; ichan < 4; ichan++) {
366
            int mask;
367

    
368
            mask = 1 << ichan;
369

    
370
            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
371
                channel_run (icont, ichan);
372
                rearm = 1;
373
            }
374
        }
375
    }
376

    
377
    if (rearm)
378
        qemu_bh_schedule_idle(dma_bh);
379
}
380

    
381
static void DMA_run_bh(void *unused)
382
{
383
    DMA_run();
384
}
385

    
386
void DMA_register_channel (int nchan,
387
                           DMA_transfer_handler transfer_handler,
388
                           void *opaque)
389
{
390
    struct dma_regs *r;
391
    int ichan, ncont;
392

    
393
    ncont = nchan > 3;
394
    ichan = nchan & 3;
395

    
396
    r = dma_controllers[ncont].regs + ichan;
397
    r->transfer_handler = transfer_handler;
398
    r->opaque = opaque;
399
}
400

    
401
int DMA_read_memory (int nchan, void *buf, int pos, int len)
402
{
403
    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
404
    a_target_phys_addr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
405

    
406
    if (r->mode & 0x20) {
407
        int i;
408
        uint8_t *p = buf;
409

    
410
        cpu_physical_memory_read (addr - pos - len, buf, len);
411
        /* What about 16bit transfers? */
412
        for (i = 0; i < len >> 1; i++) {
413
            uint8_t b = p[len - i - 1];
414
            p[i] = b;
415
        }
416
    }
417
    else
418
        cpu_physical_memory_read (addr + pos, buf, len);
419

    
420
    return len;
421
}
422

    
423
int DMA_write_memory (int nchan, void *buf, int pos, int len)
424
{
425
    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
426
    a_target_phys_addr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
427

    
428
    if (r->mode & 0x20) {
429
        int i;
430
        uint8_t *p = buf;
431

    
432
        cpu_physical_memory_write (addr - pos - len, buf, len);
433
        /* What about 16bit transfers? */
434
        for (i = 0; i < len; i++) {
435
            uint8_t b = p[len - i - 1];
436
            p[i] = b;
437
        }
438
    }
439
    else
440
        cpu_physical_memory_write (addr + pos, buf, len);
441

    
442
    return len;
443
}
444

    
445
/* request the emulator to transfer a new DMA memory block ASAP */
446
void DMA_schedule(int nchan)
447
{
448
    CPUState *env = cpu_single_env;
449
    if (env)
450
        cpu_exit(env);
451
}
452

    
453
static void dma_reset(void *opaque)
454
{
455
    struct dma_cont *d = opaque;
456
    write_cont (d, (0x0d << d->dshift), 0);
457
}
458

    
459
static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
460
{
461
    dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
462
           nchan, dma_pos, dma_len);
463
    return dma_pos;
464
}
465

    
466
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
467
static void dma_init2(struct dma_cont *d, int base, int dshift,
468
                      int page_base, int pageh_base)
469
{
470
    static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
471
    int i;
472

    
473
    d->dshift = dshift;
474
    for (i = 0; i < 8; i++) {
475
        register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
476
        register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
477
    }
478
    for (i = 0; i < ARRAY_SIZE (page_port_list); i++) {
479
        register_ioport_write (page_base + page_port_list[i], 1, 1,
480
                               write_page, d);
481
        register_ioport_read (page_base + page_port_list[i], 1, 1,
482
                              read_page, d);
483
        if (pageh_base >= 0) {
484
            register_ioport_write (pageh_base + page_port_list[i], 1, 1,
485
                                   write_pageh, d);
486
            register_ioport_read (pageh_base + page_port_list[i], 1, 1,
487
                                  read_pageh, d);
488
        }
489
    }
490
    for (i = 0; i < 8; i++) {
491
        register_ioport_write (base + ((i + 8) << dshift), 1, 1,
492
                               write_cont, d);
493
        register_ioport_read (base + ((i + 8) << dshift), 1, 1,
494
                              read_cont, d);
495
    }
496
    qemu_register_reset(dma_reset, d);
497
    dma_reset(d);
498
    for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
499
        d->regs[i].transfer_handler = dma_phony_handler;
500
    }
501
}
502

    
503
static const VMStateDescription vmstate_dma_regs = {
504
    .name = "dma_regs",
505
    .version_id = 1,
506
    .minimum_version_id = 1,
507
    .minimum_version_id_old = 1,
508
    .fields      = (VMStateField []) {
509
        VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
510
        VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
511
        VMSTATE_UINT8(mode, struct dma_regs),
512
        VMSTATE_UINT8(page, struct dma_regs),
513
        VMSTATE_UINT8(pageh, struct dma_regs),
514
        VMSTATE_UINT8(dack, struct dma_regs),
515
        VMSTATE_UINT8(eop, struct dma_regs),
516
        VMSTATE_END_OF_LIST()
517
    }
518
};
519

    
520
static int dma_post_load(void *opaque)
521
{
522
    DMA_run();
523

    
524
    return 0;
525
}
526

    
527
static const VMStateDescription vmstate_dma = {
528
    .name = "dma",
529
    .version_id = 1,
530
    .minimum_version_id = 1,
531
    .minimum_version_id_old = 1,
532
    .post_load = dma_post_load,
533
    .fields      = (VMStateField []) {
534
        VMSTATE_UINT8(command, struct dma_cont),
535
        VMSTATE_UINT8(mask, struct dma_cont),
536
        VMSTATE_UINT8(flip_flop, struct dma_cont),
537
        VMSTATE_INT32(dshift, struct dma_cont),
538
        VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
539
        VMSTATE_END_OF_LIST()
540
    }
541
};
542

    
543
void DMA_init (int high_page_enable)
544
{
545
    dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
546
              high_page_enable ? 0x480 : -1);
547
    dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
548
              high_page_enable ? 0x488 : -1);
549
    vmstate_register (0, &vmstate_dma, &dma_controllers[0]);
550
    vmstate_register (1, &vmstate_dma, &dma_controllers[1]);
551

    
552
    dma_bh = qemu_bh_new(DMA_run_bh, NULL);
553
}