root / hw / dma.c @ f8f48b69
History | View | Annotate | Download (15.3 kB)
1 |
/*
|
---|---|
2 |
* QEMU DMA emulation
|
3 |
*
|
4 |
* Copyright (c) 2003-2004 Vassili Karpov (malc)
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
#include "hw.h" |
25 |
#include "isa.h" |
26 |
#include "qemu/main-loop.h" |
27 |
|
28 |
/* #define DEBUG_DMA */
|
29 |
|
30 |
#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__) |
31 |
#ifdef DEBUG_DMA
|
32 |
#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__) |
33 |
#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__) |
34 |
#else
|
35 |
#define linfo(...)
|
36 |
#define ldebug(...)
|
37 |
#endif
|
38 |
|
39 |
struct dma_regs {
|
40 |
int now[2]; |
41 |
uint16_t base[2];
|
42 |
uint8_t mode; |
43 |
uint8_t page; |
44 |
uint8_t pageh; |
45 |
uint8_t dack; |
46 |
uint8_t eop; |
47 |
DMA_transfer_handler transfer_handler; |
48 |
void *opaque;
|
49 |
}; |
50 |
|
51 |
#define ADDR 0 |
52 |
#define COUNT 1 |
53 |
|
54 |
static struct dma_cont { |
55 |
uint8_t status; |
56 |
uint8_t command; |
57 |
uint8_t mask; |
58 |
uint8_t flip_flop; |
59 |
int dshift;
|
60 |
struct dma_regs regs[4]; |
61 |
qemu_irq *cpu_request_exit; |
62 |
MemoryRegion channel_io; |
63 |
MemoryRegion cont_io; |
64 |
} dma_controllers[2];
|
65 |
|
66 |
enum {
|
67 |
CMD_MEMORY_TO_MEMORY = 0x01,
|
68 |
CMD_FIXED_ADDRESS = 0x02,
|
69 |
CMD_BLOCK_CONTROLLER = 0x04,
|
70 |
CMD_COMPRESSED_TIME = 0x08,
|
71 |
CMD_CYCLIC_PRIORITY = 0x10,
|
72 |
CMD_EXTENDED_WRITE = 0x20,
|
73 |
CMD_LOW_DREQ = 0x40,
|
74 |
CMD_LOW_DACK = 0x80,
|
75 |
CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS |
76 |
| CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE |
77 |
| CMD_LOW_DREQ | CMD_LOW_DACK |
78 |
|
79 |
}; |
80 |
|
81 |
static void DMA_run (void); |
82 |
|
83 |
static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; |
84 |
|
85 |
static void write_page (void *opaque, uint32_t nport, uint32_t data) |
86 |
{ |
87 |
struct dma_cont *d = opaque;
|
88 |
int ichan;
|
89 |
|
90 |
ichan = channels[nport & 7];
|
91 |
if (-1 == ichan) { |
92 |
dolog ("invalid channel %#x %#x\n", nport, data);
|
93 |
return;
|
94 |
} |
95 |
d->regs[ichan].page = data; |
96 |
} |
97 |
|
98 |
static void write_pageh (void *opaque, uint32_t nport, uint32_t data) |
99 |
{ |
100 |
struct dma_cont *d = opaque;
|
101 |
int ichan;
|
102 |
|
103 |
ichan = channels[nport & 7];
|
104 |
if (-1 == ichan) { |
105 |
dolog ("invalid channel %#x %#x\n", nport, data);
|
106 |
return;
|
107 |
} |
108 |
d->regs[ichan].pageh = data; |
109 |
} |
110 |
|
111 |
static uint32_t read_page (void *opaque, uint32_t nport) |
112 |
{ |
113 |
struct dma_cont *d = opaque;
|
114 |
int ichan;
|
115 |
|
116 |
ichan = channels[nport & 7];
|
117 |
if (-1 == ichan) { |
118 |
dolog ("invalid channel read %#x\n", nport);
|
119 |
return 0; |
120 |
} |
121 |
return d->regs[ichan].page;
|
122 |
} |
123 |
|
124 |
static uint32_t read_pageh (void *opaque, uint32_t nport) |
125 |
{ |
126 |
struct dma_cont *d = opaque;
|
127 |
int ichan;
|
128 |
|
129 |
ichan = channels[nport & 7];
|
130 |
if (-1 == ichan) { |
131 |
dolog ("invalid channel read %#x\n", nport);
|
132 |
return 0; |
133 |
} |
134 |
return d->regs[ichan].pageh;
|
135 |
} |
136 |
|
137 |
static inline void init_chan (struct dma_cont *d, int ichan) |
138 |
{ |
139 |
struct dma_regs *r;
|
140 |
|
141 |
r = d->regs + ichan; |
142 |
r->now[ADDR] = r->base[ADDR] << d->dshift; |
143 |
r->now[COUNT] = 0;
|
144 |
} |
145 |
|
146 |
static inline int getff (struct dma_cont *d) |
147 |
{ |
148 |
int ff;
|
149 |
|
150 |
ff = d->flip_flop; |
151 |
d->flip_flop = !ff; |
152 |
return ff;
|
153 |
} |
154 |
|
155 |
static uint64_t read_chan(void *opaque, hwaddr nport, unsigned size) |
156 |
{ |
157 |
struct dma_cont *d = opaque;
|
158 |
int ichan, nreg, iport, ff, val, dir;
|
159 |
struct dma_regs *r;
|
160 |
|
161 |
iport = (nport >> d->dshift) & 0x0f;
|
162 |
ichan = iport >> 1;
|
163 |
nreg = iport & 1;
|
164 |
r = d->regs + ichan; |
165 |
|
166 |
dir = ((r->mode >> 5) & 1) ? -1 : 1; |
167 |
ff = getff (d); |
168 |
if (nreg)
|
169 |
val = (r->base[COUNT] << d->dshift) - r->now[COUNT]; |
170 |
else
|
171 |
val = r->now[ADDR] + r->now[COUNT] * dir; |
172 |
|
173 |
ldebug ("read_chan %#x -> %d\n", iport, val);
|
174 |
return (val >> (d->dshift + (ff << 3))) & 0xff; |
175 |
} |
176 |
|
177 |
static void write_chan(void *opaque, hwaddr nport, uint64_t data, |
178 |
unsigned size)
|
179 |
{ |
180 |
struct dma_cont *d = opaque;
|
181 |
int iport, ichan, nreg;
|
182 |
struct dma_regs *r;
|
183 |
|
184 |
iport = (nport >> d->dshift) & 0x0f;
|
185 |
ichan = iport >> 1;
|
186 |
nreg = iport & 1;
|
187 |
r = d->regs + ichan; |
188 |
if (getff (d)) {
|
189 |
r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00); |
190 |
init_chan (d, ichan); |
191 |
} else {
|
192 |
r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff); |
193 |
} |
194 |
} |
195 |
|
196 |
static void write_cont(void *opaque, hwaddr nport, uint64_t data, |
197 |
unsigned size)
|
198 |
{ |
199 |
struct dma_cont *d = opaque;
|
200 |
int iport, ichan = 0; |
201 |
|
202 |
iport = (nport >> d->dshift) & 0x0f;
|
203 |
switch (iport) {
|
204 |
case 0x01: /* command */ |
205 |
if ((data != 0) && (data & CMD_NOT_SUPPORTED)) { |
206 |
dolog("command %"PRIx64" not supported\n", data); |
207 |
return;
|
208 |
} |
209 |
d->command = data; |
210 |
break;
|
211 |
|
212 |
case 0x02: |
213 |
ichan = data & 3;
|
214 |
if (data & 4) { |
215 |
d->status |= 1 << (ichan + 4); |
216 |
} |
217 |
else {
|
218 |
d->status &= ~(1 << (ichan + 4)); |
219 |
} |
220 |
d->status &= ~(1 << ichan);
|
221 |
DMA_run(); |
222 |
break;
|
223 |
|
224 |
case 0x03: /* single mask */ |
225 |
if (data & 4) |
226 |
d->mask |= 1 << (data & 3); |
227 |
else
|
228 |
d->mask &= ~(1 << (data & 3)); |
229 |
DMA_run(); |
230 |
break;
|
231 |
|
232 |
case 0x04: /* mode */ |
233 |
{ |
234 |
ichan = data & 3;
|
235 |
#ifdef DEBUG_DMA
|
236 |
{ |
237 |
int op, ai, dir, opmode;
|
238 |
op = (data >> 2) & 3; |
239 |
ai = (data >> 4) & 1; |
240 |
dir = (data >> 5) & 1; |
241 |
opmode = (data >> 6) & 3; |
242 |
|
243 |
linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
|
244 |
ichan, op, ai, dir, opmode); |
245 |
} |
246 |
#endif
|
247 |
d->regs[ichan].mode = data; |
248 |
break;
|
249 |
} |
250 |
|
251 |
case 0x05: /* clear flip flop */ |
252 |
d->flip_flop = 0;
|
253 |
break;
|
254 |
|
255 |
case 0x06: /* reset */ |
256 |
d->flip_flop = 0;
|
257 |
d->mask = ~0;
|
258 |
d->status = 0;
|
259 |
d->command = 0;
|
260 |
break;
|
261 |
|
262 |
case 0x07: /* clear mask for all channels */ |
263 |
d->mask = 0;
|
264 |
DMA_run(); |
265 |
break;
|
266 |
|
267 |
case 0x08: /* write mask for all channels */ |
268 |
d->mask = data; |
269 |
DMA_run(); |
270 |
break;
|
271 |
|
272 |
default:
|
273 |
dolog ("unknown iport %#x\n", iport);
|
274 |
break;
|
275 |
} |
276 |
|
277 |
#ifdef DEBUG_DMA
|
278 |
if (0xc != iport) { |
279 |
linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
|
280 |
nport, ichan, data); |
281 |
} |
282 |
#endif
|
283 |
} |
284 |
|
285 |
static uint64_t read_cont(void *opaque, hwaddr nport, unsigned size) |
286 |
{ |
287 |
struct dma_cont *d = opaque;
|
288 |
int iport, val;
|
289 |
|
290 |
iport = (nport >> d->dshift) & 0x0f;
|
291 |
switch (iport) {
|
292 |
case 0x08: /* status */ |
293 |
val = d->status; |
294 |
d->status &= 0xf0;
|
295 |
break;
|
296 |
case 0x0f: /* mask */ |
297 |
val = d->mask; |
298 |
break;
|
299 |
default:
|
300 |
val = 0;
|
301 |
break;
|
302 |
} |
303 |
|
304 |
ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
|
305 |
return val;
|
306 |
} |
307 |
|
308 |
int DMA_get_channel_mode (int nchan) |
309 |
{ |
310 |
return dma_controllers[nchan > 3].regs[nchan & 3].mode; |
311 |
} |
312 |
|
313 |
void DMA_hold_DREQ (int nchan) |
314 |
{ |
315 |
int ncont, ichan;
|
316 |
|
317 |
ncont = nchan > 3;
|
318 |
ichan = nchan & 3;
|
319 |
linfo ("held cont=%d chan=%d\n", ncont, ichan);
|
320 |
dma_controllers[ncont].status |= 1 << (ichan + 4); |
321 |
DMA_run(); |
322 |
} |
323 |
|
324 |
void DMA_release_DREQ (int nchan) |
325 |
{ |
326 |
int ncont, ichan;
|
327 |
|
328 |
ncont = nchan > 3;
|
329 |
ichan = nchan & 3;
|
330 |
linfo ("released cont=%d chan=%d\n", ncont, ichan);
|
331 |
dma_controllers[ncont].status &= ~(1 << (ichan + 4)); |
332 |
DMA_run(); |
333 |
} |
334 |
|
335 |
static void channel_run (int ncont, int ichan) |
336 |
{ |
337 |
int n;
|
338 |
struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
|
339 |
#ifdef DEBUG_DMA
|
340 |
int dir, opmode;
|
341 |
|
342 |
dir = (r->mode >> 5) & 1; |
343 |
opmode = (r->mode >> 6) & 3; |
344 |
|
345 |
if (dir) {
|
346 |
dolog ("DMA in address decrement mode\n");
|
347 |
} |
348 |
if (opmode != 1) { |
349 |
dolog ("DMA not in single mode select %#x\n", opmode);
|
350 |
} |
351 |
#endif
|
352 |
|
353 |
n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
|
354 |
r->now[COUNT], (r->base[COUNT] + 1) << ncont);
|
355 |
r->now[COUNT] = n; |
356 |
ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); |
357 |
} |
358 |
|
359 |
static QEMUBH *dma_bh;
|
360 |
|
361 |
static void DMA_run (void) |
362 |
{ |
363 |
struct dma_cont *d;
|
364 |
int icont, ichan;
|
365 |
int rearm = 0; |
366 |
static int running = 0; |
367 |
|
368 |
if (running) {
|
369 |
rearm = 1;
|
370 |
goto out;
|
371 |
} else {
|
372 |
running = 1;
|
373 |
} |
374 |
|
375 |
d = dma_controllers; |
376 |
|
377 |
for (icont = 0; icont < 2; icont++, d++) { |
378 |
for (ichan = 0; ichan < 4; ichan++) { |
379 |
int mask;
|
380 |
|
381 |
mask = 1 << ichan;
|
382 |
|
383 |
if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { |
384 |
channel_run (icont, ichan); |
385 |
rearm = 1;
|
386 |
} |
387 |
} |
388 |
} |
389 |
|
390 |
running = 0;
|
391 |
out:
|
392 |
if (rearm)
|
393 |
qemu_bh_schedule_idle(dma_bh); |
394 |
} |
395 |
|
396 |
static void DMA_run_bh(void *unused) |
397 |
{ |
398 |
DMA_run(); |
399 |
} |
400 |
|
401 |
void DMA_register_channel (int nchan, |
402 |
DMA_transfer_handler transfer_handler, |
403 |
void *opaque)
|
404 |
{ |
405 |
struct dma_regs *r;
|
406 |
int ichan, ncont;
|
407 |
|
408 |
ncont = nchan > 3;
|
409 |
ichan = nchan & 3;
|
410 |
|
411 |
r = dma_controllers[ncont].regs + ichan; |
412 |
r->transfer_handler = transfer_handler; |
413 |
r->opaque = opaque; |
414 |
} |
415 |
|
416 |
int DMA_read_memory (int nchan, void *buf, int pos, int len) |
417 |
{ |
418 |
struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; |
419 |
hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; |
420 |
|
421 |
if (r->mode & 0x20) { |
422 |
int i;
|
423 |
uint8_t *p = buf; |
424 |
|
425 |
cpu_physical_memory_read (addr - pos - len, buf, len); |
426 |
/* What about 16bit transfers? */
|
427 |
for (i = 0; i < len >> 1; i++) { |
428 |
uint8_t b = p[len - i - 1];
|
429 |
p[i] = b; |
430 |
} |
431 |
} |
432 |
else
|
433 |
cpu_physical_memory_read (addr + pos, buf, len); |
434 |
|
435 |
return len;
|
436 |
} |
437 |
|
438 |
int DMA_write_memory (int nchan, void *buf, int pos, int len) |
439 |
{ |
440 |
struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; |
441 |
hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; |
442 |
|
443 |
if (r->mode & 0x20) { |
444 |
int i;
|
445 |
uint8_t *p = buf; |
446 |
|
447 |
cpu_physical_memory_write (addr - pos - len, buf, len); |
448 |
/* What about 16bit transfers? */
|
449 |
for (i = 0; i < len; i++) { |
450 |
uint8_t b = p[len - i - 1];
|
451 |
p[i] = b; |
452 |
} |
453 |
} |
454 |
else
|
455 |
cpu_physical_memory_write (addr + pos, buf, len); |
456 |
|
457 |
return len;
|
458 |
} |
459 |
|
460 |
/* request the emulator to transfer a new DMA memory block ASAP */
|
461 |
void DMA_schedule(int nchan) |
462 |
{ |
463 |
struct dma_cont *d = &dma_controllers[nchan > 3]; |
464 |
|
465 |
qemu_irq_pulse(*d->cpu_request_exit); |
466 |
} |
467 |
|
468 |
static void dma_reset(void *opaque) |
469 |
{ |
470 |
struct dma_cont *d = opaque;
|
471 |
write_cont(d, (0x06 << d->dshift), 0, 1); |
472 |
} |
473 |
|
474 |
static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len) |
475 |
{ |
476 |
dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
|
477 |
nchan, dma_pos, dma_len); |
478 |
return dma_pos;
|
479 |
} |
480 |
|
481 |
|
482 |
static const MemoryRegionOps channel_io_ops = { |
483 |
.read = read_chan, |
484 |
.write = write_chan, |
485 |
.endianness = DEVICE_NATIVE_ENDIAN, |
486 |
.impl = { |
487 |
.min_access_size = 1,
|
488 |
.max_access_size = 1,
|
489 |
}, |
490 |
}; |
491 |
|
492 |
/* IOport from page_base */
|
493 |
static const MemoryRegionPortio page_portio_list[] = { |
494 |
{ 0x01, 3, 1, .write = write_page, .read = read_page, }, |
495 |
{ 0x07, 1, 1, .write = write_page, .read = read_page, }, |
496 |
PORTIO_END_OF_LIST(), |
497 |
}; |
498 |
|
499 |
/* IOport from pageh_base */
|
500 |
static const MemoryRegionPortio pageh_portio_list[] = { |
501 |
{ 0x01, 3, 1, .write = write_pageh, .read = read_pageh, }, |
502 |
{ 0x07, 3, 1, .write = write_pageh, .read = read_pageh, }, |
503 |
PORTIO_END_OF_LIST(), |
504 |
}; |
505 |
|
506 |
static const MemoryRegionOps cont_io_ops = { |
507 |
.read = read_cont, |
508 |
.write = write_cont, |
509 |
.endianness = DEVICE_NATIVE_ENDIAN, |
510 |
.impl = { |
511 |
.min_access_size = 1,
|
512 |
.max_access_size = 1,
|
513 |
}, |
514 |
}; |
515 |
|
516 |
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
|
517 |
static void dma_init2(struct dma_cont *d, int base, int dshift, |
518 |
int page_base, int pageh_base, |
519 |
qemu_irq *cpu_request_exit) |
520 |
{ |
521 |
int i;
|
522 |
|
523 |
d->dshift = dshift; |
524 |
d->cpu_request_exit = cpu_request_exit; |
525 |
|
526 |
memory_region_init_io(&d->channel_io, &channel_io_ops, d, |
527 |
"dma-chan", 8 << d->dshift); |
528 |
memory_region_add_subregion(isa_address_space_io(NULL),
|
529 |
base, &d->channel_io); |
530 |
|
531 |
isa_register_portio_list(NULL, page_base, page_portio_list, d,
|
532 |
"dma-page");
|
533 |
if (pageh_base >= 0) { |
534 |
isa_register_portio_list(NULL, pageh_base, pageh_portio_list, d,
|
535 |
"dma-pageh");
|
536 |
} |
537 |
|
538 |
memory_region_init_io(&d->cont_io, &cont_io_ops, d, "dma-cont",
|
539 |
8 << d->dshift);
|
540 |
memory_region_add_subregion(isa_address_space_io(NULL),
|
541 |
base + (8 << d->dshift), &d->cont_io);
|
542 |
|
543 |
qemu_register_reset(dma_reset, d); |
544 |
dma_reset(d); |
545 |
for (i = 0; i < ARRAY_SIZE (d->regs); ++i) { |
546 |
d->regs[i].transfer_handler = dma_phony_handler; |
547 |
} |
548 |
} |
549 |
|
550 |
static const VMStateDescription vmstate_dma_regs = { |
551 |
.name = "dma_regs",
|
552 |
.version_id = 1,
|
553 |
.minimum_version_id = 1,
|
554 |
.minimum_version_id_old = 1,
|
555 |
.fields = (VMStateField []) { |
556 |
VMSTATE_INT32_ARRAY(now, struct dma_regs, 2), |
557 |
VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2), |
558 |
VMSTATE_UINT8(mode, struct dma_regs),
|
559 |
VMSTATE_UINT8(page, struct dma_regs),
|
560 |
VMSTATE_UINT8(pageh, struct dma_regs),
|
561 |
VMSTATE_UINT8(dack, struct dma_regs),
|
562 |
VMSTATE_UINT8(eop, struct dma_regs),
|
563 |
VMSTATE_END_OF_LIST() |
564 |
} |
565 |
}; |
566 |
|
567 |
static int dma_post_load(void *opaque, int version_id) |
568 |
{ |
569 |
DMA_run(); |
570 |
|
571 |
return 0; |
572 |
} |
573 |
|
574 |
static const VMStateDescription vmstate_dma = { |
575 |
.name = "dma",
|
576 |
.version_id = 1,
|
577 |
.minimum_version_id = 1,
|
578 |
.minimum_version_id_old = 1,
|
579 |
.post_load = dma_post_load, |
580 |
.fields = (VMStateField []) { |
581 |
VMSTATE_UINT8(command, struct dma_cont),
|
582 |
VMSTATE_UINT8(mask, struct dma_cont),
|
583 |
VMSTATE_UINT8(flip_flop, struct dma_cont),
|
584 |
VMSTATE_INT32(dshift, struct dma_cont),
|
585 |
VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs), |
586 |
VMSTATE_END_OF_LIST() |
587 |
} |
588 |
}; |
589 |
|
590 |
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit) |
591 |
{ |
592 |
dma_init2(&dma_controllers[0], 0x00, 0, 0x80, |
593 |
high_page_enable ? 0x480 : -1, cpu_request_exit); |
594 |
dma_init2(&dma_controllers[1], 0xc0, 1, 0x88, |
595 |
high_page_enable ? 0x488 : -1, cpu_request_exit); |
596 |
vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]); |
597 |
vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]); |
598 |
|
599 |
dma_bh = qemu_bh_new(DMA_run_bh, NULL);
|
600 |
} |