Statistics
| Branch: | Revision:

root / hw / dma.c @ 16f62432

History | View | Annotate | Download (8.9 kB)

1
/*
2
 * QEMU DMA emulation
3
 * 
4
 * Copyright (c) 2003 Vassili Karpov (malc)
5
 * 
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include <stdio.h>
25
#include <stdlib.h>
26
#include <inttypes.h>
27

    
28
#include "cpu.h"
29
#include "vl.h"
30

    
31
#define log(...) fprintf (stderr, "dma: " __VA_ARGS__)
32
#ifdef DEBUG_DMA
33
#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
34
#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
35
#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
36
#else
37
#define lwarn(...)
38
#define linfo(...)
39
#define ldebug(...)
40
#endif
41

    
42
#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
43

    
44
struct dma_regs {
45
    int now[2];
46
    uint16_t base[2];
47
    uint8_t mode;
48
    uint8_t page;
49
    uint8_t dack;
50
    uint8_t eop;
51
    DMA_transfer_handler transfer_handler;
52
    void *opaque;
53
};
54

    
55
#define ADDR 0
56
#define COUNT 1
57

    
58
static struct dma_cont {
59
    uint8_t status;
60
    uint8_t command;
61
    uint8_t mask;
62
    uint8_t flip_flop;
63
    struct dma_regs regs[4];
64
} dma_controllers[2];
65

    
66
enum {
67
  CMD_MEMORY_TO_MEMORY = 0x01,
68
  CMD_FIXED_ADDRESS    = 0x02,
69
  CMD_BLOCK_CONTROLLER = 0x04,
70
  CMD_COMPRESSED_TIME  = 0x08,
71
  CMD_CYCLIC_PRIORITY  = 0x10,
72
  CMD_EXTENDED_WRITE   = 0x20,
73
  CMD_LOW_DREQ         = 0x40,
74
  CMD_LOW_DACK         = 0x80,
75
  CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
76
  | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
77
  | CMD_LOW_DREQ | CMD_LOW_DACK
78

    
79
};
80

    
81
static void write_page (CPUState *env, uint32_t nport, uint32_t data)
82
{
83
    int ichan;
84
    int ncont;
85
    static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
86

    
87
    ncont = nport > 0x87;
88
    ichan = channels[nport - 0x80 - (ncont << 3)];
89

    
90
    if (-1 == ichan) {
91
        log ("invalid channel %#x %#x\n", nport, data);
92
        return;
93
    }
94

    
95
    dma_controllers[ncont].regs[ichan].page = data;
96
}
97

    
98
static void init_chan (int ncont, int ichan)
99
{
100
    struct dma_regs *r;
101

    
102
    r = dma_controllers[ncont].regs + ichan;
103
    r->now[ADDR] = r->base[0] << ncont;
104
    r->now[COUNT] = 0;
105
}
106

    
107
static inline int getff (int ncont)
108
{
109
    int ff;
110

    
111
    ff = dma_controllers[ncont].flip_flop;
112
    dma_controllers[ncont].flip_flop = !ff;
113
    return ff;
114
}
115

    
116
static uint32_t read_chan (CPUState *env, uint32_t nport)
117
{
118
    int ff;
119
    int ncont, ichan, nreg;
120
    struct dma_regs *r;
121
    int val;
122

    
123
    ncont = nport > 7;
124
    ichan = (nport >> (1 + ncont)) & 3;
125
    nreg = (nport >> ncont) & 1;
126
    r = dma_controllers[ncont].regs + ichan;
127

    
128
    ff = getff (ncont);
129

    
130
    if (nreg)
131
        val = (r->base[COUNT] << ncont) - r->now[COUNT];
132
    else
133
        val = r->now[ADDR] + r->now[COUNT];
134

    
135
    return (val >> (ncont + (ff << 3))) & 0xff;
136
}
137

    
138
static void write_chan (CPUState *env, uint32_t nport, uint32_t data)
139
{
140
    int ncont, ichan, nreg;
141
    struct dma_regs *r;
142

    
143
    ncont = nport > 7;
144
    ichan = (nport >> (1 + ncont)) & 3;
145
    nreg = (nport >> ncont) & 1;
146
    r = dma_controllers[ncont].regs + ichan;
147

    
148
    if (getff (ncont)) {
149
        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
150
        init_chan (ncont, ichan);
151
    } else {
152
        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
153
    }
154
}
155

    
156
static void write_cont (CPUState *env, uint32_t nport, uint32_t data)
157
{
158
    int iport, ichan, ncont;
159
    struct dma_cont *d;
160

    
161
    ncont = nport > 0xf;
162
    ichan = -1;
163

    
164
    d = dma_controllers + ncont;
165
    if (ncont) {
166
        iport = ((nport - 0xd0) >> 1) + 8;
167
    }
168
    else {
169
        iport = nport;
170
    }
171

    
172
    switch (iport) {
173
    case 8:                     /* command */
174
        if (data && (data | CMD_NOT_SUPPORTED)) {
175
            log ("command %#x not supported\n", data);
176
            goto error;
177
        }
178
        d->command = data;
179
        break;
180

    
181
    case 9:
182
        ichan = data & 3;
183
        if (data & 4) {
184
            d->status |= 1 << (ichan + 4);
185
        }
186
        else {
187
            d->status &= ~(1 << (ichan + 4));
188
        }
189
        d->status &= ~(1 << ichan);
190
        break;
191

    
192
    case 0xa:                   /* single mask */
193
        if (data & 4)
194
            d->mask |= 1 << (data & 3);
195
        else
196
            d->mask &= ~(1 << (data & 3));
197
        break;
198

    
199
    case 0xb:                   /* mode */
200
        {
201
            ichan = data & 3;
202
#ifdef DEBUG_DMA
203
            int op;
204
            int ai;
205
            int dir;
206
            int opmode;
207

    
208
            op = (data >> 2) & 3;
209
            ai = (data >> 4) & 1;
210
            dir = (data >> 5) & 1;
211
            opmode = (data >> 6) & 3;
212

    
213
            linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
214
                   ichan, op, ai, dir, opmode);
215
#endif
216

    
217
            d->regs[ichan].mode = data;
218
            break;
219
        }
220

    
221
    case 0xc:                   /* clear flip flop */
222
        d->flip_flop = 0;
223
        break;
224

    
225
    case 0xd:                   /* reset */
226
        d->flip_flop = 0;
227
        d->mask = ~0;
228
        d->status = 0;
229
        d->command = 0;
230
        break;
231

    
232
    case 0xe:                   /* clear mask for all channels */
233
        d->mask = 0;
234
        break;
235

    
236
    case 0xf:                   /* write mask for all channels */
237
        d->mask = data;
238
        break;
239

    
240
    default:
241
        log ("dma: unknown iport %#x\n", iport);
242
        goto error;
243
    }
244

    
245
#ifdef DEBUG_DMA
246
    if (0xc != iport) {
247
        linfo ("nport %#06x, ncont %d, ichan % 2d, val %#06x\n",
248
               nport, d != dma_controllers, ichan, data);
249
    }
250
#endif
251
    return;
252

    
253
 error:
254
    abort ();
255
}
256

    
257
int DMA_get_channel_mode (int nchan)
258
{
259
    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
260
}
261

    
262
void DMA_hold_DREQ (int nchan)
263
{
264
    int ncont, ichan;
265

    
266
    ncont = nchan > 3;
267
    ichan = nchan & 3;
268
    linfo ("held cont=%d chan=%d\n", ncont, ichan);
269
    dma_controllers[ncont].status |= 1 << (ichan + 4);
270
}
271

    
272
void DMA_release_DREQ (int nchan)
273
{
274
    int ncont, ichan;
275

    
276
    ncont = nchan > 3;
277
    ichan = nchan & 3;
278
    linfo ("released cont=%d chan=%d\n", ncont, ichan);
279
    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
280
}
281

    
282
static void channel_run (int ncont, int ichan)
283
{
284
    struct dma_regs *r;
285
    int n;
286
    target_ulong addr;
287
/*     int ai, dir; */
288

    
289
    r = dma_controllers[ncont].regs + ichan;
290
/*   ai = r->mode & 16; */
291
/*   dir = r->mode & 32 ? -1 : 1; */
292

    
293
    addr = (r->page << 16) | r->now[ADDR];
294
    n = r->transfer_handler (r->opaque, addr, 
295
                             (r->base[COUNT] << ncont) + (1 << ncont));
296
    r->now[COUNT] = n;
297

    
298
    ldebug ("dma_pos %d size %d\n",
299
            n, (r->base[1] << ncont) + (1 << ncont));
300
}
301

    
302
void DMA_run (void)
303
{
304
    struct dma_cont *d;
305
    int icont, ichan;
306

    
307
    d = dma_controllers;
308

    
309
    for (icont = 0; icont < 2; icont++, d++) {
310
        for (ichan = 0; ichan < 4; ichan++) {
311
            int mask;
312

    
313
            mask = 1 << ichan;
314

    
315
            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
316
                channel_run (icont, ichan);
317
        }
318
    }
319
}
320

    
321
void DMA_register_channel (int nchan,
322
                           DMA_transfer_handler transfer_handler, 
323
                           void *opaque)
324
{
325
    struct dma_regs *r;
326
    int ichan, ncont;
327

    
328
    ncont = nchan > 3;
329
    ichan = nchan & 3;
330

    
331
    r = dma_controllers[ncont].regs + ichan;
332
    r->transfer_handler = transfer_handler;
333
    r->opaque = opaque;
334
}
335

    
336
/* request the emulator to transfer a new DMA memory block ASAP */
337
void DMA_schedule(int nchan)
338
{
339
    cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
340
}
341

    
342
void DMA_init (void)
343
{
344
    int i;
345
    int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
346

    
347
    for (i = 0; i < 8; i++) {
348
        register_ioport_write (i, 1, write_chan, 1);
349

    
350
        register_ioport_write (0xc0 + (i << 1), 1, write_chan, 1);
351

    
352
        register_ioport_read (i, 1, read_chan, 1);
353
        register_ioport_read (0xc0 + (i << 1), 1, read_chan, 1);
354
    }
355

    
356
    for (i = 0; i < LENOFA (page_port_list); i++) {
357
        register_ioport_write (page_port_list[i] + 0x80, 1, write_page, 1);
358
        register_ioport_write (page_port_list[i] + 0x88, 1, write_page, 1);
359
    }
360

    
361
    for (i = 0; i < 8; i++) {
362
        register_ioport_write (i + 8, 1, write_cont, 1);
363
        register_ioport_write (0xd0 + (i << 1), 1, write_cont, 1);
364
    }
365

    
366
    write_cont (NULL, 0x0d, 0);
367
    write_cont (NULL, 0xda, 0);
368
}