root / hw / cuda.c @ 84ede329
History | View | Annotate | Download (20.3 kB)
1 |
/*
|
---|---|
2 |
* QEMU PowerMac CUDA device support
|
3 |
*
|
4 |
* Copyright (c) 2004-2007 Fabrice Bellard
|
5 |
* Copyright (c) 2007 Jocelyn Mayer
|
6 |
*
|
7 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
8 |
* of this software and associated documentation files (the "Software"), to deal
|
9 |
* in the Software without restriction, including without limitation the rights
|
10 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
11 |
* copies of the Software, and to permit persons to whom the Software is
|
12 |
* furnished to do so, subject to the following conditions:
|
13 |
*
|
14 |
* The above copyright notice and this permission notice shall be included in
|
15 |
* all copies or substantial portions of the Software.
|
16 |
*
|
17 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
19 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
20 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
21 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
22 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
23 |
* THE SOFTWARE.
|
24 |
*/
|
25 |
#include "hw.h" |
26 |
#include "ppc/mac.h" |
27 |
#include "adb.h" |
28 |
#include "qemu/timer.h" |
29 |
#include "sysemu/sysemu.h" |
30 |
|
31 |
/* XXX: implement all timer modes */
|
32 |
|
33 |
/* debug CUDA */
|
34 |
//#define DEBUG_CUDA
|
35 |
|
36 |
/* debug CUDA packets */
|
37 |
//#define DEBUG_CUDA_PACKET
|
38 |
|
39 |
#ifdef DEBUG_CUDA
|
40 |
#define CUDA_DPRINTF(fmt, ...) \
|
41 |
do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0) |
42 |
#else
|
43 |
#define CUDA_DPRINTF(fmt, ...)
|
44 |
#endif
|
45 |
|
46 |
/* Bits in B data register: all active low */
|
47 |
#define TREQ 0x08 /* Transfer request (input) */ |
48 |
#define TACK 0x10 /* Transfer acknowledge (output) */ |
49 |
#define TIP 0x20 /* Transfer in progress (output) */ |
50 |
|
51 |
/* Bits in ACR */
|
52 |
#define SR_CTRL 0x1c /* Shift register control bits */ |
53 |
#define SR_EXT 0x0c /* Shift on external clock */ |
54 |
#define SR_OUT 0x10 /* Shift out if 1 */ |
55 |
|
56 |
/* Bits in IFR and IER */
|
57 |
#define IER_SET 0x80 /* set bits in IER */ |
58 |
#define IER_CLR 0 /* clear bits in IER */ |
59 |
#define SR_INT 0x04 /* Shift register full/empty */ |
60 |
#define T1_INT 0x40 /* Timer 1 interrupt */ |
61 |
#define T2_INT 0x20 /* Timer 2 interrupt */ |
62 |
|
63 |
/* Bits in ACR */
|
64 |
#define T1MODE 0xc0 /* Timer 1 mode */ |
65 |
#define T1MODE_CONT 0x40 /* continuous interrupts */ |
66 |
|
67 |
/* commands (1st byte) */
|
68 |
#define ADB_PACKET 0 |
69 |
#define CUDA_PACKET 1 |
70 |
#define ERROR_PACKET 2 |
71 |
#define TIMER_PACKET 3 |
72 |
#define POWER_PACKET 4 |
73 |
#define MACIIC_PACKET 5 |
74 |
#define PMU_PACKET 6 |
75 |
|
76 |
|
77 |
/* CUDA commands (2nd byte) */
|
78 |
#define CUDA_WARM_START 0x0 |
79 |
#define CUDA_AUTOPOLL 0x1 |
80 |
#define CUDA_GET_6805_ADDR 0x2 |
81 |
#define CUDA_GET_TIME 0x3 |
82 |
#define CUDA_GET_PRAM 0x7 |
83 |
#define CUDA_SET_6805_ADDR 0x8 |
84 |
#define CUDA_SET_TIME 0x9 |
85 |
#define CUDA_POWERDOWN 0xa |
86 |
#define CUDA_POWERUP_TIME 0xb |
87 |
#define CUDA_SET_PRAM 0xc |
88 |
#define CUDA_MS_RESET 0xd |
89 |
#define CUDA_SEND_DFAC 0xe |
90 |
#define CUDA_BATTERY_SWAP_SENSE 0x10 |
91 |
#define CUDA_RESET_SYSTEM 0x11 |
92 |
#define CUDA_SET_IPL 0x12 |
93 |
#define CUDA_FILE_SERVER_FLAG 0x13 |
94 |
#define CUDA_SET_AUTO_RATE 0x14 |
95 |
#define CUDA_GET_AUTO_RATE 0x16 |
96 |
#define CUDA_SET_DEVICE_LIST 0x19 |
97 |
#define CUDA_GET_DEVICE_LIST 0x1a |
98 |
#define CUDA_SET_ONE_SECOND_MODE 0x1b |
99 |
#define CUDA_SET_POWER_MESSAGES 0x21 |
100 |
#define CUDA_GET_SET_IIC 0x22 |
101 |
#define CUDA_WAKEUP 0x23 |
102 |
#define CUDA_TIMER_TICKLE 0x24 |
103 |
#define CUDA_COMBINED_FORMAT_IIC 0x25 |
104 |
|
105 |
#define CUDA_TIMER_FREQ (4700000 / 6) |
106 |
#define CUDA_ADB_POLL_FREQ 50 |
107 |
|
108 |
/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
|
109 |
#define RTC_OFFSET 2082844800 |
110 |
|
111 |
ADBBusState adb_bus; |
112 |
|
113 |
static void cuda_update(CUDAState *s); |
114 |
static void cuda_receive_packet_from_host(CUDAState *s, |
115 |
const uint8_t *data, int len); |
116 |
static void cuda_timer_update(CUDAState *s, CUDATimer *ti, |
117 |
int64_t current_time); |
118 |
|
119 |
static void cuda_update_irq(CUDAState *s) |
120 |
{ |
121 |
if (s->ifr & s->ier & (SR_INT | T1_INT)) {
|
122 |
qemu_irq_raise(s->irq); |
123 |
} else {
|
124 |
qemu_irq_lower(s->irq); |
125 |
} |
126 |
} |
127 |
|
128 |
static unsigned int get_counter(CUDATimer *s) |
129 |
{ |
130 |
int64_t d; |
131 |
unsigned int counter; |
132 |
|
133 |
d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time, |
134 |
CUDA_TIMER_FREQ, get_ticks_per_sec()); |
135 |
if (s->index == 0) { |
136 |
/* the timer goes down from latch to -1 (period of latch + 2) */
|
137 |
if (d <= (s->counter_value + 1)) { |
138 |
counter = (s->counter_value - d) & 0xffff;
|
139 |
} else {
|
140 |
counter = (d - (s->counter_value + 1)) % (s->latch + 2); |
141 |
counter = (s->latch - counter) & 0xffff;
|
142 |
} |
143 |
} else {
|
144 |
counter = (s->counter_value - d) & 0xffff;
|
145 |
} |
146 |
return counter;
|
147 |
} |
148 |
|
149 |
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val) |
150 |
{ |
151 |
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val); |
152 |
ti->load_time = qemu_get_clock_ns(vm_clock); |
153 |
ti->counter_value = val; |
154 |
cuda_timer_update(s, ti, ti->load_time); |
155 |
} |
156 |
|
157 |
static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
|
158 |
{ |
159 |
int64_t d, next_time; |
160 |
unsigned int counter; |
161 |
|
162 |
/* current counter value */
|
163 |
d = muldiv64(current_time - s->load_time, |
164 |
CUDA_TIMER_FREQ, get_ticks_per_sec()); |
165 |
/* the timer goes down from latch to -1 (period of latch + 2) */
|
166 |
if (d <= (s->counter_value + 1)) { |
167 |
counter = (s->counter_value - d) & 0xffff;
|
168 |
} else {
|
169 |
counter = (d - (s->counter_value + 1)) % (s->latch + 2); |
170 |
counter = (s->latch - counter) & 0xffff;
|
171 |
} |
172 |
|
173 |
/* Note: we consider the irq is raised on 0 */
|
174 |
if (counter == 0xffff) { |
175 |
next_time = d + s->latch + 1;
|
176 |
} else if (counter == 0) { |
177 |
next_time = d + s->latch + 2;
|
178 |
} else {
|
179 |
next_time = d + counter; |
180 |
} |
181 |
CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n", |
182 |
s->latch, d, next_time - d); |
183 |
next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) + |
184 |
s->load_time; |
185 |
if (next_time <= current_time)
|
186 |
next_time = current_time + 1;
|
187 |
return next_time;
|
188 |
} |
189 |
|
190 |
static void cuda_timer_update(CUDAState *s, CUDATimer *ti, |
191 |
int64_t current_time) |
192 |
{ |
193 |
if (!ti->timer)
|
194 |
return;
|
195 |
if ((s->acr & T1MODE) != T1MODE_CONT) {
|
196 |
qemu_del_timer(ti->timer); |
197 |
} else {
|
198 |
ti->next_irq_time = get_next_irq_time(ti, current_time); |
199 |
qemu_mod_timer(ti->timer, ti->next_irq_time); |
200 |
} |
201 |
} |
202 |
|
203 |
static void cuda_timer1(void *opaque) |
204 |
{ |
205 |
CUDAState *s = opaque; |
206 |
CUDATimer *ti = &s->timers[0];
|
207 |
|
208 |
cuda_timer_update(s, ti, ti->next_irq_time); |
209 |
s->ifr |= T1_INT; |
210 |
cuda_update_irq(s); |
211 |
} |
212 |
|
213 |
static uint32_t cuda_readb(void *opaque, hwaddr addr) |
214 |
{ |
215 |
CUDAState *s = opaque; |
216 |
uint32_t val; |
217 |
|
218 |
addr = (addr >> 9) & 0xf; |
219 |
switch(addr) {
|
220 |
case 0: |
221 |
val = s->b; |
222 |
break;
|
223 |
case 1: |
224 |
val = s->a; |
225 |
break;
|
226 |
case 2: |
227 |
val = s->dirb; |
228 |
break;
|
229 |
case 3: |
230 |
val = s->dira; |
231 |
break;
|
232 |
case 4: |
233 |
val = get_counter(&s->timers[0]) & 0xff; |
234 |
s->ifr &= ~T1_INT; |
235 |
cuda_update_irq(s); |
236 |
break;
|
237 |
case 5: |
238 |
val = get_counter(&s->timers[0]) >> 8; |
239 |
cuda_update_irq(s); |
240 |
break;
|
241 |
case 6: |
242 |
val = s->timers[0].latch & 0xff; |
243 |
break;
|
244 |
case 7: |
245 |
/* XXX: check this */
|
246 |
val = (s->timers[0].latch >> 8) & 0xff; |
247 |
break;
|
248 |
case 8: |
249 |
val = get_counter(&s->timers[1]) & 0xff; |
250 |
s->ifr &= ~T2_INT; |
251 |
break;
|
252 |
case 9: |
253 |
val = get_counter(&s->timers[1]) >> 8; |
254 |
break;
|
255 |
case 10: |
256 |
val = s->sr; |
257 |
s->ifr &= ~SR_INT; |
258 |
cuda_update_irq(s); |
259 |
break;
|
260 |
case 11: |
261 |
val = s->acr; |
262 |
break;
|
263 |
case 12: |
264 |
val = s->pcr; |
265 |
break;
|
266 |
case 13: |
267 |
val = s->ifr; |
268 |
if (s->ifr & s->ier)
|
269 |
val |= 0x80;
|
270 |
break;
|
271 |
case 14: |
272 |
val = s->ier | 0x80;
|
273 |
break;
|
274 |
default:
|
275 |
case 15: |
276 |
val = s->anh; |
277 |
break;
|
278 |
} |
279 |
if (addr != 13 || val != 0) { |
280 |
CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val); |
281 |
} |
282 |
|
283 |
return val;
|
284 |
} |
285 |
|
286 |
static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val) |
287 |
{ |
288 |
CUDAState *s = opaque; |
289 |
|
290 |
addr = (addr >> 9) & 0xf; |
291 |
CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); |
292 |
|
293 |
switch(addr) {
|
294 |
case 0: |
295 |
s->b = val; |
296 |
cuda_update(s); |
297 |
break;
|
298 |
case 1: |
299 |
s->a = val; |
300 |
break;
|
301 |
case 2: |
302 |
s->dirb = val; |
303 |
break;
|
304 |
case 3: |
305 |
s->dira = val; |
306 |
break;
|
307 |
case 4: |
308 |
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; |
309 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
310 |
break;
|
311 |
case 5: |
312 |
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); |
313 |
s->ifr &= ~T1_INT; |
314 |
set_counter(s, &s->timers[0], s->timers[0].latch); |
315 |
break;
|
316 |
case 6: |
317 |
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; |
318 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
319 |
break;
|
320 |
case 7: |
321 |
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); |
322 |
s->ifr &= ~T1_INT; |
323 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
324 |
break;
|
325 |
case 8: |
326 |
s->timers[1].latch = val;
|
327 |
set_counter(s, &s->timers[1], val);
|
328 |
break;
|
329 |
case 9: |
330 |
set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch); |
331 |
break;
|
332 |
case 10: |
333 |
s->sr = val; |
334 |
break;
|
335 |
case 11: |
336 |
s->acr = val; |
337 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
338 |
cuda_update(s); |
339 |
break;
|
340 |
case 12: |
341 |
s->pcr = val; |
342 |
break;
|
343 |
case 13: |
344 |
/* reset bits */
|
345 |
s->ifr &= ~val; |
346 |
cuda_update_irq(s); |
347 |
break;
|
348 |
case 14: |
349 |
if (val & IER_SET) {
|
350 |
/* set bits */
|
351 |
s->ier |= val & 0x7f;
|
352 |
} else {
|
353 |
/* reset bits */
|
354 |
s->ier &= ~val; |
355 |
} |
356 |
cuda_update_irq(s); |
357 |
break;
|
358 |
default:
|
359 |
case 15: |
360 |
s->anh = val; |
361 |
break;
|
362 |
} |
363 |
} |
364 |
|
365 |
/* NOTE: TIP and TREQ are negated */
|
366 |
static void cuda_update(CUDAState *s) |
367 |
{ |
368 |
int packet_received, len;
|
369 |
|
370 |
packet_received = 0;
|
371 |
if (!(s->b & TIP)) {
|
372 |
/* transfer requested from host */
|
373 |
|
374 |
if (s->acr & SR_OUT) {
|
375 |
/* data output */
|
376 |
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
377 |
if (s->data_out_index < sizeof(s->data_out)) { |
378 |
CUDA_DPRINTF("send: %02x\n", s->sr);
|
379 |
s->data_out[s->data_out_index++] = s->sr; |
380 |
s->ifr |= SR_INT; |
381 |
cuda_update_irq(s); |
382 |
} |
383 |
} |
384 |
} else {
|
385 |
if (s->data_in_index < s->data_in_size) {
|
386 |
/* data input */
|
387 |
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
388 |
s->sr = s->data_in[s->data_in_index++]; |
389 |
CUDA_DPRINTF("recv: %02x\n", s->sr);
|
390 |
/* indicate end of transfer */
|
391 |
if (s->data_in_index >= s->data_in_size) {
|
392 |
s->b = (s->b | TREQ); |
393 |
} |
394 |
s->ifr |= SR_INT; |
395 |
cuda_update_irq(s); |
396 |
} |
397 |
} |
398 |
} |
399 |
} else {
|
400 |
/* no transfer requested: handle sync case */
|
401 |
if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
|
402 |
/* update TREQ state each time TACK change state */
|
403 |
if (s->b & TACK)
|
404 |
s->b = (s->b | TREQ); |
405 |
else
|
406 |
s->b = (s->b & ~TREQ); |
407 |
s->ifr |= SR_INT; |
408 |
cuda_update_irq(s); |
409 |
} else {
|
410 |
if (!(s->last_b & TIP)) {
|
411 |
/* handle end of host to cuda transfer */
|
412 |
packet_received = (s->data_out_index > 0);
|
413 |
/* always an IRQ at the end of transfer */
|
414 |
s->ifr |= SR_INT; |
415 |
cuda_update_irq(s); |
416 |
} |
417 |
/* signal if there is data to read */
|
418 |
if (s->data_in_index < s->data_in_size) {
|
419 |
s->b = (s->b & ~TREQ); |
420 |
} |
421 |
} |
422 |
} |
423 |
|
424 |
s->last_acr = s->acr; |
425 |
s->last_b = s->b; |
426 |
|
427 |
/* NOTE: cuda_receive_packet_from_host() can call cuda_update()
|
428 |
recursively */
|
429 |
if (packet_received) {
|
430 |
len = s->data_out_index; |
431 |
s->data_out_index = 0;
|
432 |
cuda_receive_packet_from_host(s, s->data_out, len); |
433 |
} |
434 |
} |
435 |
|
436 |
static void cuda_send_packet_to_host(CUDAState *s, |
437 |
const uint8_t *data, int len) |
438 |
{ |
439 |
#ifdef DEBUG_CUDA_PACKET
|
440 |
{ |
441 |
int i;
|
442 |
printf("cuda_send_packet_to_host:\n");
|
443 |
for(i = 0; i < len; i++) |
444 |
printf(" %02x", data[i]);
|
445 |
printf("\n");
|
446 |
} |
447 |
#endif
|
448 |
memcpy(s->data_in, data, len); |
449 |
s->data_in_size = len; |
450 |
s->data_in_index = 0;
|
451 |
cuda_update(s); |
452 |
s->ifr |= SR_INT; |
453 |
cuda_update_irq(s); |
454 |
} |
455 |
|
456 |
static void cuda_adb_poll(void *opaque) |
457 |
{ |
458 |
CUDAState *s = opaque; |
459 |
uint8_t obuf[ADB_MAX_OUT_LEN + 2];
|
460 |
int olen;
|
461 |
|
462 |
olen = adb_poll(&adb_bus, obuf + 2);
|
463 |
if (olen > 0) { |
464 |
obuf[0] = ADB_PACKET;
|
465 |
obuf[1] = 0x40; /* polled data */ |
466 |
cuda_send_packet_to_host(s, obuf, olen + 2);
|
467 |
} |
468 |
qemu_mod_timer(s->adb_poll_timer, |
469 |
qemu_get_clock_ns(vm_clock) + |
470 |
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ)); |
471 |
} |
472 |
|
473 |
static void cuda_receive_packet(CUDAState *s, |
474 |
const uint8_t *data, int len) |
475 |
{ |
476 |
uint8_t obuf[16];
|
477 |
int autopoll;
|
478 |
uint32_t ti; |
479 |
|
480 |
switch(data[0]) { |
481 |
case CUDA_AUTOPOLL:
|
482 |
autopoll = (data[1] != 0); |
483 |
if (autopoll != s->autopoll) {
|
484 |
s->autopoll = autopoll; |
485 |
if (autopoll) {
|
486 |
qemu_mod_timer(s->adb_poll_timer, |
487 |
qemu_get_clock_ns(vm_clock) + |
488 |
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ)); |
489 |
} else {
|
490 |
qemu_del_timer(s->adb_poll_timer); |
491 |
} |
492 |
} |
493 |
obuf[0] = CUDA_PACKET;
|
494 |
obuf[1] = data[1]; |
495 |
cuda_send_packet_to_host(s, obuf, 2);
|
496 |
break;
|
497 |
case CUDA_SET_TIME:
|
498 |
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4]; |
499 |
s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec()); |
500 |
obuf[0] = CUDA_PACKET;
|
501 |
obuf[1] = 0; |
502 |
obuf[2] = 0; |
503 |
cuda_send_packet_to_host(s, obuf, 3);
|
504 |
break;
|
505 |
case CUDA_GET_TIME:
|
506 |
ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec()); |
507 |
obuf[0] = CUDA_PACKET;
|
508 |
obuf[1] = 0; |
509 |
obuf[2] = 0; |
510 |
obuf[3] = ti >> 24; |
511 |
obuf[4] = ti >> 16; |
512 |
obuf[5] = ti >> 8; |
513 |
obuf[6] = ti;
|
514 |
cuda_send_packet_to_host(s, obuf, 7);
|
515 |
break;
|
516 |
case CUDA_FILE_SERVER_FLAG:
|
517 |
case CUDA_SET_DEVICE_LIST:
|
518 |
case CUDA_SET_AUTO_RATE:
|
519 |
case CUDA_SET_POWER_MESSAGES:
|
520 |
obuf[0] = CUDA_PACKET;
|
521 |
obuf[1] = 0; |
522 |
cuda_send_packet_to_host(s, obuf, 2);
|
523 |
break;
|
524 |
case CUDA_POWERDOWN:
|
525 |
obuf[0] = CUDA_PACKET;
|
526 |
obuf[1] = 0; |
527 |
cuda_send_packet_to_host(s, obuf, 2);
|
528 |
qemu_system_shutdown_request(); |
529 |
break;
|
530 |
case CUDA_RESET_SYSTEM:
|
531 |
obuf[0] = CUDA_PACKET;
|
532 |
obuf[1] = 0; |
533 |
cuda_send_packet_to_host(s, obuf, 2);
|
534 |
qemu_system_reset_request(); |
535 |
break;
|
536 |
default:
|
537 |
break;
|
538 |
} |
539 |
} |
540 |
|
541 |
static void cuda_receive_packet_from_host(CUDAState *s, |
542 |
const uint8_t *data, int len) |
543 |
{ |
544 |
#ifdef DEBUG_CUDA_PACKET
|
545 |
{ |
546 |
int i;
|
547 |
printf("cuda_receive_packet_from_host:\n");
|
548 |
for(i = 0; i < len; i++) |
549 |
printf(" %02x", data[i]);
|
550 |
printf("\n");
|
551 |
} |
552 |
#endif
|
553 |
switch(data[0]) { |
554 |
case ADB_PACKET:
|
555 |
{ |
556 |
uint8_t obuf[ADB_MAX_OUT_LEN + 2];
|
557 |
int olen;
|
558 |
olen = adb_request(&adb_bus, obuf + 2, data + 1, len - 1); |
559 |
if (olen > 0) { |
560 |
obuf[0] = ADB_PACKET;
|
561 |
obuf[1] = 0x00; |
562 |
} else {
|
563 |
/* error */
|
564 |
obuf[0] = ADB_PACKET;
|
565 |
obuf[1] = -olen;
|
566 |
olen = 0;
|
567 |
} |
568 |
cuda_send_packet_to_host(s, obuf, olen + 2);
|
569 |
} |
570 |
break;
|
571 |
case CUDA_PACKET:
|
572 |
cuda_receive_packet(s, data + 1, len - 1); |
573 |
break;
|
574 |
} |
575 |
} |
576 |
|
577 |
static void cuda_writew (void *opaque, hwaddr addr, uint32_t value) |
578 |
{ |
579 |
} |
580 |
|
581 |
static void cuda_writel (void *opaque, hwaddr addr, uint32_t value) |
582 |
{ |
583 |
} |
584 |
|
585 |
static uint32_t cuda_readw (void *opaque, hwaddr addr) |
586 |
{ |
587 |
return 0; |
588 |
} |
589 |
|
590 |
static uint32_t cuda_readl (void *opaque, hwaddr addr) |
591 |
{ |
592 |
return 0; |
593 |
} |
594 |
|
595 |
static const MemoryRegionOps cuda_ops = { |
596 |
.old_mmio = { |
597 |
.write = { |
598 |
cuda_writeb, |
599 |
cuda_writew, |
600 |
cuda_writel, |
601 |
}, |
602 |
.read = { |
603 |
cuda_readb, |
604 |
cuda_readw, |
605 |
cuda_readl, |
606 |
}, |
607 |
}, |
608 |
.endianness = DEVICE_NATIVE_ENDIAN, |
609 |
}; |
610 |
|
611 |
static bool cuda_timer_exist(void *opaque, int version_id) |
612 |
{ |
613 |
CUDATimer *s = opaque; |
614 |
|
615 |
return s->timer != NULL; |
616 |
} |
617 |
|
618 |
static const VMStateDescription vmstate_cuda_timer = { |
619 |
.name = "cuda_timer",
|
620 |
.version_id = 0,
|
621 |
.minimum_version_id = 0,
|
622 |
.minimum_version_id_old = 0,
|
623 |
.fields = (VMStateField[]) { |
624 |
VMSTATE_UINT16(latch, CUDATimer), |
625 |
VMSTATE_UINT16(counter_value, CUDATimer), |
626 |
VMSTATE_INT64(load_time, CUDATimer), |
627 |
VMSTATE_INT64(next_irq_time, CUDATimer), |
628 |
VMSTATE_TIMER_TEST(timer, CUDATimer, cuda_timer_exist), |
629 |
VMSTATE_END_OF_LIST() |
630 |
} |
631 |
}; |
632 |
|
633 |
static const VMStateDescription vmstate_cuda = { |
634 |
.name = "cuda",
|
635 |
.version_id = 1,
|
636 |
.minimum_version_id = 1,
|
637 |
.minimum_version_id_old = 1,
|
638 |
.fields = (VMStateField[]) { |
639 |
VMSTATE_UINT8(a, CUDAState), |
640 |
VMSTATE_UINT8(b, CUDAState), |
641 |
VMSTATE_UINT8(dira, CUDAState), |
642 |
VMSTATE_UINT8(dirb, CUDAState), |
643 |
VMSTATE_UINT8(sr, CUDAState), |
644 |
VMSTATE_UINT8(acr, CUDAState), |
645 |
VMSTATE_UINT8(pcr, CUDAState), |
646 |
VMSTATE_UINT8(ifr, CUDAState), |
647 |
VMSTATE_UINT8(ier, CUDAState), |
648 |
VMSTATE_UINT8(anh, CUDAState), |
649 |
VMSTATE_INT32(data_in_size, CUDAState), |
650 |
VMSTATE_INT32(data_in_index, CUDAState), |
651 |
VMSTATE_INT32(data_out_index, CUDAState), |
652 |
VMSTATE_UINT8(autopoll, CUDAState), |
653 |
VMSTATE_BUFFER(data_in, CUDAState), |
654 |
VMSTATE_BUFFER(data_out, CUDAState), |
655 |
VMSTATE_UINT32(tick_offset, CUDAState), |
656 |
VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1, |
657 |
vmstate_cuda_timer, CUDATimer), |
658 |
VMSTATE_END_OF_LIST() |
659 |
} |
660 |
}; |
661 |
|
662 |
static void cuda_reset(DeviceState *dev) |
663 |
{ |
664 |
CUDAState *s = CUDA(dev); |
665 |
|
666 |
s->b = 0;
|
667 |
s->a = 0;
|
668 |
s->dirb = 0;
|
669 |
s->dira = 0;
|
670 |
s->sr = 0;
|
671 |
s->acr = 0;
|
672 |
s->pcr = 0;
|
673 |
s->ifr = 0;
|
674 |
s->ier = 0;
|
675 |
// s->ier = T1_INT | SR_INT;
|
676 |
s->anh = 0;
|
677 |
s->data_in_size = 0;
|
678 |
s->data_in_index = 0;
|
679 |
s->data_out_index = 0;
|
680 |
s->autopoll = 0;
|
681 |
|
682 |
s->timers[0].latch = 0xffff; |
683 |
set_counter(s, &s->timers[0], 0xffff); |
684 |
|
685 |
s->timers[1].latch = 0; |
686 |
set_counter(s, &s->timers[1], 0xffff); |
687 |
} |
688 |
|
689 |
static void cuda_realizefn(DeviceState *dev, Error **errp) |
690 |
{ |
691 |
CUDAState *s = CUDA(dev); |
692 |
struct tm tm;
|
693 |
|
694 |
s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
|
695 |
|
696 |
qemu_get_timedate(&tm, 0);
|
697 |
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; |
698 |
|
699 |
s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s); |
700 |
} |
701 |
|
702 |
static void cuda_initfn(Object *obj) |
703 |
{ |
704 |
SysBusDevice *d = SYS_BUS_DEVICE(obj); |
705 |
CUDAState *s = CUDA(obj); |
706 |
int i;
|
707 |
|
708 |
memory_region_init_io(&s->mem, &cuda_ops, s, "cuda", 0x2000); |
709 |
sysbus_init_mmio(d, &s->mem); |
710 |
sysbus_init_irq(d, &s->irq); |
711 |
|
712 |
for (i = 0; i < ARRAY_SIZE(s->timers); i++) { |
713 |
s->timers[i].index = i; |
714 |
} |
715 |
|
716 |
qbus_create_inplace((BusState *)&adb_bus, TYPE_ADB_BUS, DEVICE(obj), |
717 |
"adb.0");
|
718 |
} |
719 |
|
720 |
static void cuda_class_init(ObjectClass *oc, void *data) |
721 |
{ |
722 |
DeviceClass *dc = DEVICE_CLASS(oc); |
723 |
|
724 |
dc->realize = cuda_realizefn; |
725 |
dc->reset = cuda_reset; |
726 |
dc->vmsd = &vmstate_cuda; |
727 |
} |
728 |
|
729 |
static const TypeInfo cuda_type_info = { |
730 |
.name = TYPE_CUDA, |
731 |
.parent = TYPE_SYS_BUS_DEVICE, |
732 |
.instance_size = sizeof(CUDAState),
|
733 |
.instance_init = cuda_initfn, |
734 |
.class_init = cuda_class_init, |
735 |
}; |
736 |
|
737 |
static void cuda_register_types(void) |
738 |
{ |
739 |
type_register_static(&cuda_type_info); |
740 |
} |
741 |
|
742 |
type_init(cuda_register_types) |