root / hw / misc / macio / cuda.c @ a8aec295
History | View | Annotate | Download (20.3 kB)
1 |
/*
|
---|---|
2 |
* QEMU PowerMac CUDA device support
|
3 |
*
|
4 |
* Copyright (c) 2004-2007 Fabrice Bellard
|
5 |
* Copyright (c) 2007 Jocelyn Mayer
|
6 |
*
|
7 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
8 |
* of this software and associated documentation files (the "Software"), to deal
|
9 |
* in the Software without restriction, including without limitation the rights
|
10 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
11 |
* copies of the Software, and to permit persons to whom the Software is
|
12 |
* furnished to do so, subject to the following conditions:
|
13 |
*
|
14 |
* The above copyright notice and this permission notice shall be included in
|
15 |
* all copies or substantial portions of the Software.
|
16 |
*
|
17 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
19 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
20 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
21 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
22 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
23 |
* THE SOFTWARE.
|
24 |
*/
|
25 |
#include "hw/hw.h" |
26 |
#include "hw/ppc/mac.h" |
27 |
#include "hw/input/adb.h" |
28 |
#include "qemu/timer.h" |
29 |
#include "sysemu/sysemu.h" |
30 |
|
31 |
/* XXX: implement all timer modes */
|
32 |
|
33 |
/* debug CUDA */
|
34 |
//#define DEBUG_CUDA
|
35 |
|
36 |
/* debug CUDA packets */
|
37 |
//#define DEBUG_CUDA_PACKET
|
38 |
|
39 |
#ifdef DEBUG_CUDA
|
40 |
#define CUDA_DPRINTF(fmt, ...) \
|
41 |
do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0) |
42 |
#else
|
43 |
#define CUDA_DPRINTF(fmt, ...)
|
44 |
#endif
|
45 |
|
46 |
/* Bits in B data register: all active low */
|
47 |
#define TREQ 0x08 /* Transfer request (input) */ |
48 |
#define TACK 0x10 /* Transfer acknowledge (output) */ |
49 |
#define TIP 0x20 /* Transfer in progress (output) */ |
50 |
|
51 |
/* Bits in ACR */
|
52 |
#define SR_CTRL 0x1c /* Shift register control bits */ |
53 |
#define SR_EXT 0x0c /* Shift on external clock */ |
54 |
#define SR_OUT 0x10 /* Shift out if 1 */ |
55 |
|
56 |
/* Bits in IFR and IER */
|
57 |
#define IER_SET 0x80 /* set bits in IER */ |
58 |
#define IER_CLR 0 /* clear bits in IER */ |
59 |
#define SR_INT 0x04 /* Shift register full/empty */ |
60 |
#define T1_INT 0x40 /* Timer 1 interrupt */ |
61 |
#define T2_INT 0x20 /* Timer 2 interrupt */ |
62 |
|
63 |
/* Bits in ACR */
|
64 |
#define T1MODE 0xc0 /* Timer 1 mode */ |
65 |
#define T1MODE_CONT 0x40 /* continuous interrupts */ |
66 |
|
67 |
/* commands (1st byte) */
|
68 |
#define ADB_PACKET 0 |
69 |
#define CUDA_PACKET 1 |
70 |
#define ERROR_PACKET 2 |
71 |
#define TIMER_PACKET 3 |
72 |
#define POWER_PACKET 4 |
73 |
#define MACIIC_PACKET 5 |
74 |
#define PMU_PACKET 6 |
75 |
|
76 |
|
77 |
/* CUDA commands (2nd byte) */
|
78 |
#define CUDA_WARM_START 0x0 |
79 |
#define CUDA_AUTOPOLL 0x1 |
80 |
#define CUDA_GET_6805_ADDR 0x2 |
81 |
#define CUDA_GET_TIME 0x3 |
82 |
#define CUDA_GET_PRAM 0x7 |
83 |
#define CUDA_SET_6805_ADDR 0x8 |
84 |
#define CUDA_SET_TIME 0x9 |
85 |
#define CUDA_POWERDOWN 0xa |
86 |
#define CUDA_POWERUP_TIME 0xb |
87 |
#define CUDA_SET_PRAM 0xc |
88 |
#define CUDA_MS_RESET 0xd |
89 |
#define CUDA_SEND_DFAC 0xe |
90 |
#define CUDA_BATTERY_SWAP_SENSE 0x10 |
91 |
#define CUDA_RESET_SYSTEM 0x11 |
92 |
#define CUDA_SET_IPL 0x12 |
93 |
#define CUDA_FILE_SERVER_FLAG 0x13 |
94 |
#define CUDA_SET_AUTO_RATE 0x14 |
95 |
#define CUDA_GET_AUTO_RATE 0x16 |
96 |
#define CUDA_SET_DEVICE_LIST 0x19 |
97 |
#define CUDA_GET_DEVICE_LIST 0x1a |
98 |
#define CUDA_SET_ONE_SECOND_MODE 0x1b |
99 |
#define CUDA_SET_POWER_MESSAGES 0x21 |
100 |
#define CUDA_GET_SET_IIC 0x22 |
101 |
#define CUDA_WAKEUP 0x23 |
102 |
#define CUDA_TIMER_TICKLE 0x24 |
103 |
#define CUDA_COMBINED_FORMAT_IIC 0x25 |
104 |
|
105 |
#define CUDA_TIMER_FREQ (4700000 / 6) |
106 |
#define CUDA_ADB_POLL_FREQ 50 |
107 |
|
108 |
/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
|
109 |
#define RTC_OFFSET 2082844800 |
110 |
|
111 |
static void cuda_update(CUDAState *s); |
112 |
static void cuda_receive_packet_from_host(CUDAState *s, |
113 |
const uint8_t *data, int len); |
114 |
static void cuda_timer_update(CUDAState *s, CUDATimer *ti, |
115 |
int64_t current_time); |
116 |
|
117 |
static void cuda_update_irq(CUDAState *s) |
118 |
{ |
119 |
if (s->ifr & s->ier & (SR_INT | T1_INT)) {
|
120 |
qemu_irq_raise(s->irq); |
121 |
} else {
|
122 |
qemu_irq_lower(s->irq); |
123 |
} |
124 |
} |
125 |
|
126 |
static unsigned int get_counter(CUDATimer *s) |
127 |
{ |
128 |
int64_t d; |
129 |
unsigned int counter; |
130 |
|
131 |
d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time, |
132 |
CUDA_TIMER_FREQ, get_ticks_per_sec()); |
133 |
if (s->index == 0) { |
134 |
/* the timer goes down from latch to -1 (period of latch + 2) */
|
135 |
if (d <= (s->counter_value + 1)) { |
136 |
counter = (s->counter_value - d) & 0xffff;
|
137 |
} else {
|
138 |
counter = (d - (s->counter_value + 1)) % (s->latch + 2); |
139 |
counter = (s->latch - counter) & 0xffff;
|
140 |
} |
141 |
} else {
|
142 |
counter = (s->counter_value - d) & 0xffff;
|
143 |
} |
144 |
return counter;
|
145 |
} |
146 |
|
147 |
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val) |
148 |
{ |
149 |
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val); |
150 |
ti->load_time = qemu_get_clock_ns(vm_clock); |
151 |
ti->counter_value = val; |
152 |
cuda_timer_update(s, ti, ti->load_time); |
153 |
} |
154 |
|
155 |
static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
|
156 |
{ |
157 |
int64_t d, next_time; |
158 |
unsigned int counter; |
159 |
|
160 |
/* current counter value */
|
161 |
d = muldiv64(current_time - s->load_time, |
162 |
CUDA_TIMER_FREQ, get_ticks_per_sec()); |
163 |
/* the timer goes down from latch to -1 (period of latch + 2) */
|
164 |
if (d <= (s->counter_value + 1)) { |
165 |
counter = (s->counter_value - d) & 0xffff;
|
166 |
} else {
|
167 |
counter = (d - (s->counter_value + 1)) % (s->latch + 2); |
168 |
counter = (s->latch - counter) & 0xffff;
|
169 |
} |
170 |
|
171 |
/* Note: we consider the irq is raised on 0 */
|
172 |
if (counter == 0xffff) { |
173 |
next_time = d + s->latch + 1;
|
174 |
} else if (counter == 0) { |
175 |
next_time = d + s->latch + 2;
|
176 |
} else {
|
177 |
next_time = d + counter; |
178 |
} |
179 |
CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n", |
180 |
s->latch, d, next_time - d); |
181 |
next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) + |
182 |
s->load_time; |
183 |
if (next_time <= current_time)
|
184 |
next_time = current_time + 1;
|
185 |
return next_time;
|
186 |
} |
187 |
|
188 |
static void cuda_timer_update(CUDAState *s, CUDATimer *ti, |
189 |
int64_t current_time) |
190 |
{ |
191 |
if (!ti->timer)
|
192 |
return;
|
193 |
if ((s->acr & T1MODE) != T1MODE_CONT) {
|
194 |
qemu_del_timer(ti->timer); |
195 |
} else {
|
196 |
ti->next_irq_time = get_next_irq_time(ti, current_time); |
197 |
qemu_mod_timer(ti->timer, ti->next_irq_time); |
198 |
} |
199 |
} |
200 |
|
201 |
static void cuda_timer1(void *opaque) |
202 |
{ |
203 |
CUDAState *s = opaque; |
204 |
CUDATimer *ti = &s->timers[0];
|
205 |
|
206 |
cuda_timer_update(s, ti, ti->next_irq_time); |
207 |
s->ifr |= T1_INT; |
208 |
cuda_update_irq(s); |
209 |
} |
210 |
|
211 |
static uint32_t cuda_readb(void *opaque, hwaddr addr) |
212 |
{ |
213 |
CUDAState *s = opaque; |
214 |
uint32_t val; |
215 |
|
216 |
addr = (addr >> 9) & 0xf; |
217 |
switch(addr) {
|
218 |
case 0: |
219 |
val = s->b; |
220 |
break;
|
221 |
case 1: |
222 |
val = s->a; |
223 |
break;
|
224 |
case 2: |
225 |
val = s->dirb; |
226 |
break;
|
227 |
case 3: |
228 |
val = s->dira; |
229 |
break;
|
230 |
case 4: |
231 |
val = get_counter(&s->timers[0]) & 0xff; |
232 |
s->ifr &= ~T1_INT; |
233 |
cuda_update_irq(s); |
234 |
break;
|
235 |
case 5: |
236 |
val = get_counter(&s->timers[0]) >> 8; |
237 |
cuda_update_irq(s); |
238 |
break;
|
239 |
case 6: |
240 |
val = s->timers[0].latch & 0xff; |
241 |
break;
|
242 |
case 7: |
243 |
/* XXX: check this */
|
244 |
val = (s->timers[0].latch >> 8) & 0xff; |
245 |
break;
|
246 |
case 8: |
247 |
val = get_counter(&s->timers[1]) & 0xff; |
248 |
s->ifr &= ~T2_INT; |
249 |
break;
|
250 |
case 9: |
251 |
val = get_counter(&s->timers[1]) >> 8; |
252 |
break;
|
253 |
case 10: |
254 |
val = s->sr; |
255 |
s->ifr &= ~SR_INT; |
256 |
cuda_update_irq(s); |
257 |
break;
|
258 |
case 11: |
259 |
val = s->acr; |
260 |
break;
|
261 |
case 12: |
262 |
val = s->pcr; |
263 |
break;
|
264 |
case 13: |
265 |
val = s->ifr; |
266 |
if (s->ifr & s->ier)
|
267 |
val |= 0x80;
|
268 |
break;
|
269 |
case 14: |
270 |
val = s->ier | 0x80;
|
271 |
break;
|
272 |
default:
|
273 |
case 15: |
274 |
val = s->anh; |
275 |
break;
|
276 |
} |
277 |
if (addr != 13 || val != 0) { |
278 |
CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val); |
279 |
} |
280 |
|
281 |
return val;
|
282 |
} |
283 |
|
284 |
static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val) |
285 |
{ |
286 |
CUDAState *s = opaque; |
287 |
|
288 |
addr = (addr >> 9) & 0xf; |
289 |
CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); |
290 |
|
291 |
switch(addr) {
|
292 |
case 0: |
293 |
s->b = val; |
294 |
cuda_update(s); |
295 |
break;
|
296 |
case 1: |
297 |
s->a = val; |
298 |
break;
|
299 |
case 2: |
300 |
s->dirb = val; |
301 |
break;
|
302 |
case 3: |
303 |
s->dira = val; |
304 |
break;
|
305 |
case 4: |
306 |
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; |
307 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
308 |
break;
|
309 |
case 5: |
310 |
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); |
311 |
s->ifr &= ~T1_INT; |
312 |
set_counter(s, &s->timers[0], s->timers[0].latch); |
313 |
break;
|
314 |
case 6: |
315 |
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; |
316 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
317 |
break;
|
318 |
case 7: |
319 |
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); |
320 |
s->ifr &= ~T1_INT; |
321 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
322 |
break;
|
323 |
case 8: |
324 |
s->timers[1].latch = val;
|
325 |
set_counter(s, &s->timers[1], val);
|
326 |
break;
|
327 |
case 9: |
328 |
set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch); |
329 |
break;
|
330 |
case 10: |
331 |
s->sr = val; |
332 |
break;
|
333 |
case 11: |
334 |
s->acr = val; |
335 |
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
|
336 |
cuda_update(s); |
337 |
break;
|
338 |
case 12: |
339 |
s->pcr = val; |
340 |
break;
|
341 |
case 13: |
342 |
/* reset bits */
|
343 |
s->ifr &= ~val; |
344 |
cuda_update_irq(s); |
345 |
break;
|
346 |
case 14: |
347 |
if (val & IER_SET) {
|
348 |
/* set bits */
|
349 |
s->ier |= val & 0x7f;
|
350 |
} else {
|
351 |
/* reset bits */
|
352 |
s->ier &= ~val; |
353 |
} |
354 |
cuda_update_irq(s); |
355 |
break;
|
356 |
default:
|
357 |
case 15: |
358 |
s->anh = val; |
359 |
break;
|
360 |
} |
361 |
} |
362 |
|
363 |
/* NOTE: TIP and TREQ are negated */
|
364 |
static void cuda_update(CUDAState *s) |
365 |
{ |
366 |
int packet_received, len;
|
367 |
|
368 |
packet_received = 0;
|
369 |
if (!(s->b & TIP)) {
|
370 |
/* transfer requested from host */
|
371 |
|
372 |
if (s->acr & SR_OUT) {
|
373 |
/* data output */
|
374 |
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
375 |
if (s->data_out_index < sizeof(s->data_out)) { |
376 |
CUDA_DPRINTF("send: %02x\n", s->sr);
|
377 |
s->data_out[s->data_out_index++] = s->sr; |
378 |
s->ifr |= SR_INT; |
379 |
cuda_update_irq(s); |
380 |
} |
381 |
} |
382 |
} else {
|
383 |
if (s->data_in_index < s->data_in_size) {
|
384 |
/* data input */
|
385 |
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
386 |
s->sr = s->data_in[s->data_in_index++]; |
387 |
CUDA_DPRINTF("recv: %02x\n", s->sr);
|
388 |
/* indicate end of transfer */
|
389 |
if (s->data_in_index >= s->data_in_size) {
|
390 |
s->b = (s->b | TREQ); |
391 |
} |
392 |
s->ifr |= SR_INT; |
393 |
cuda_update_irq(s); |
394 |
} |
395 |
} |
396 |
} |
397 |
} else {
|
398 |
/* no transfer requested: handle sync case */
|
399 |
if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
|
400 |
/* update TREQ state each time TACK change state */
|
401 |
if (s->b & TACK)
|
402 |
s->b = (s->b | TREQ); |
403 |
else
|
404 |
s->b = (s->b & ~TREQ); |
405 |
s->ifr |= SR_INT; |
406 |
cuda_update_irq(s); |
407 |
} else {
|
408 |
if (!(s->last_b & TIP)) {
|
409 |
/* handle end of host to cuda transfer */
|
410 |
packet_received = (s->data_out_index > 0);
|
411 |
/* always an IRQ at the end of transfer */
|
412 |
s->ifr |= SR_INT; |
413 |
cuda_update_irq(s); |
414 |
} |
415 |
/* signal if there is data to read */
|
416 |
if (s->data_in_index < s->data_in_size) {
|
417 |
s->b = (s->b & ~TREQ); |
418 |
} |
419 |
} |
420 |
} |
421 |
|
422 |
s->last_acr = s->acr; |
423 |
s->last_b = s->b; |
424 |
|
425 |
/* NOTE: cuda_receive_packet_from_host() can call cuda_update()
|
426 |
recursively */
|
427 |
if (packet_received) {
|
428 |
len = s->data_out_index; |
429 |
s->data_out_index = 0;
|
430 |
cuda_receive_packet_from_host(s, s->data_out, len); |
431 |
} |
432 |
} |
433 |
|
434 |
static void cuda_send_packet_to_host(CUDAState *s, |
435 |
const uint8_t *data, int len) |
436 |
{ |
437 |
#ifdef DEBUG_CUDA_PACKET
|
438 |
{ |
439 |
int i;
|
440 |
printf("cuda_send_packet_to_host:\n");
|
441 |
for(i = 0; i < len; i++) |
442 |
printf(" %02x", data[i]);
|
443 |
printf("\n");
|
444 |
} |
445 |
#endif
|
446 |
memcpy(s->data_in, data, len); |
447 |
s->data_in_size = len; |
448 |
s->data_in_index = 0;
|
449 |
cuda_update(s); |
450 |
s->ifr |= SR_INT; |
451 |
cuda_update_irq(s); |
452 |
} |
453 |
|
454 |
static void cuda_adb_poll(void *opaque) |
455 |
{ |
456 |
CUDAState *s = opaque; |
457 |
uint8_t obuf[ADB_MAX_OUT_LEN + 2];
|
458 |
int olen;
|
459 |
|
460 |
olen = adb_poll(&s->adb_bus, obuf + 2);
|
461 |
if (olen > 0) { |
462 |
obuf[0] = ADB_PACKET;
|
463 |
obuf[1] = 0x40; /* polled data */ |
464 |
cuda_send_packet_to_host(s, obuf, olen + 2);
|
465 |
} |
466 |
qemu_mod_timer(s->adb_poll_timer, |
467 |
qemu_get_clock_ns(vm_clock) + |
468 |
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ)); |
469 |
} |
470 |
|
471 |
static void cuda_receive_packet(CUDAState *s, |
472 |
const uint8_t *data, int len) |
473 |
{ |
474 |
uint8_t obuf[16];
|
475 |
int autopoll;
|
476 |
uint32_t ti; |
477 |
|
478 |
switch(data[0]) { |
479 |
case CUDA_AUTOPOLL:
|
480 |
autopoll = (data[1] != 0); |
481 |
if (autopoll != s->autopoll) {
|
482 |
s->autopoll = autopoll; |
483 |
if (autopoll) {
|
484 |
qemu_mod_timer(s->adb_poll_timer, |
485 |
qemu_get_clock_ns(vm_clock) + |
486 |
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ)); |
487 |
} else {
|
488 |
qemu_del_timer(s->adb_poll_timer); |
489 |
} |
490 |
} |
491 |
obuf[0] = CUDA_PACKET;
|
492 |
obuf[1] = data[1]; |
493 |
cuda_send_packet_to_host(s, obuf, 2);
|
494 |
break;
|
495 |
case CUDA_SET_TIME:
|
496 |
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4]; |
497 |
s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec()); |
498 |
obuf[0] = CUDA_PACKET;
|
499 |
obuf[1] = 0; |
500 |
obuf[2] = 0; |
501 |
cuda_send_packet_to_host(s, obuf, 3);
|
502 |
break;
|
503 |
case CUDA_GET_TIME:
|
504 |
ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec()); |
505 |
obuf[0] = CUDA_PACKET;
|
506 |
obuf[1] = 0; |
507 |
obuf[2] = 0; |
508 |
obuf[3] = ti >> 24; |
509 |
obuf[4] = ti >> 16; |
510 |
obuf[5] = ti >> 8; |
511 |
obuf[6] = ti;
|
512 |
cuda_send_packet_to_host(s, obuf, 7);
|
513 |
break;
|
514 |
case CUDA_FILE_SERVER_FLAG:
|
515 |
case CUDA_SET_DEVICE_LIST:
|
516 |
case CUDA_SET_AUTO_RATE:
|
517 |
case CUDA_SET_POWER_MESSAGES:
|
518 |
obuf[0] = CUDA_PACKET;
|
519 |
obuf[1] = 0; |
520 |
cuda_send_packet_to_host(s, obuf, 2);
|
521 |
break;
|
522 |
case CUDA_POWERDOWN:
|
523 |
obuf[0] = CUDA_PACKET;
|
524 |
obuf[1] = 0; |
525 |
cuda_send_packet_to_host(s, obuf, 2);
|
526 |
qemu_system_shutdown_request(); |
527 |
break;
|
528 |
case CUDA_RESET_SYSTEM:
|
529 |
obuf[0] = CUDA_PACKET;
|
530 |
obuf[1] = 0; |
531 |
cuda_send_packet_to_host(s, obuf, 2);
|
532 |
qemu_system_reset_request(); |
533 |
break;
|
534 |
default:
|
535 |
break;
|
536 |
} |
537 |
} |
538 |
|
539 |
static void cuda_receive_packet_from_host(CUDAState *s, |
540 |
const uint8_t *data, int len) |
541 |
{ |
542 |
#ifdef DEBUG_CUDA_PACKET
|
543 |
{ |
544 |
int i;
|
545 |
printf("cuda_receive_packet_from_host:\n");
|
546 |
for(i = 0; i < len; i++) |
547 |
printf(" %02x", data[i]);
|
548 |
printf("\n");
|
549 |
} |
550 |
#endif
|
551 |
switch(data[0]) { |
552 |
case ADB_PACKET:
|
553 |
{ |
554 |
uint8_t obuf[ADB_MAX_OUT_LEN + 2];
|
555 |
int olen;
|
556 |
olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1); |
557 |
if (olen > 0) { |
558 |
obuf[0] = ADB_PACKET;
|
559 |
obuf[1] = 0x00; |
560 |
} else {
|
561 |
/* error */
|
562 |
obuf[0] = ADB_PACKET;
|
563 |
obuf[1] = -olen;
|
564 |
olen = 0;
|
565 |
} |
566 |
cuda_send_packet_to_host(s, obuf, olen + 2);
|
567 |
} |
568 |
break;
|
569 |
case CUDA_PACKET:
|
570 |
cuda_receive_packet(s, data + 1, len - 1); |
571 |
break;
|
572 |
} |
573 |
} |
574 |
|
575 |
static void cuda_writew (void *opaque, hwaddr addr, uint32_t value) |
576 |
{ |
577 |
} |
578 |
|
579 |
static void cuda_writel (void *opaque, hwaddr addr, uint32_t value) |
580 |
{ |
581 |
} |
582 |
|
583 |
static uint32_t cuda_readw (void *opaque, hwaddr addr) |
584 |
{ |
585 |
return 0; |
586 |
} |
587 |
|
588 |
static uint32_t cuda_readl (void *opaque, hwaddr addr) |
589 |
{ |
590 |
return 0; |
591 |
} |
592 |
|
593 |
static const MemoryRegionOps cuda_ops = { |
594 |
.old_mmio = { |
595 |
.write = { |
596 |
cuda_writeb, |
597 |
cuda_writew, |
598 |
cuda_writel, |
599 |
}, |
600 |
.read = { |
601 |
cuda_readb, |
602 |
cuda_readw, |
603 |
cuda_readl, |
604 |
}, |
605 |
}, |
606 |
.endianness = DEVICE_NATIVE_ENDIAN, |
607 |
}; |
608 |
|
609 |
static bool cuda_timer_exist(void *opaque, int version_id) |
610 |
{ |
611 |
CUDATimer *s = opaque; |
612 |
|
613 |
return s->timer != NULL; |
614 |
} |
615 |
|
616 |
static const VMStateDescription vmstate_cuda_timer = { |
617 |
.name = "cuda_timer",
|
618 |
.version_id = 0,
|
619 |
.minimum_version_id = 0,
|
620 |
.minimum_version_id_old = 0,
|
621 |
.fields = (VMStateField[]) { |
622 |
VMSTATE_UINT16(latch, CUDATimer), |
623 |
VMSTATE_UINT16(counter_value, CUDATimer), |
624 |
VMSTATE_INT64(load_time, CUDATimer), |
625 |
VMSTATE_INT64(next_irq_time, CUDATimer), |
626 |
VMSTATE_TIMER_TEST(timer, CUDATimer, cuda_timer_exist), |
627 |
VMSTATE_END_OF_LIST() |
628 |
} |
629 |
}; |
630 |
|
631 |
static const VMStateDescription vmstate_cuda = { |
632 |
.name = "cuda",
|
633 |
.version_id = 1,
|
634 |
.minimum_version_id = 1,
|
635 |
.minimum_version_id_old = 1,
|
636 |
.fields = (VMStateField[]) { |
637 |
VMSTATE_UINT8(a, CUDAState), |
638 |
VMSTATE_UINT8(b, CUDAState), |
639 |
VMSTATE_UINT8(dira, CUDAState), |
640 |
VMSTATE_UINT8(dirb, CUDAState), |
641 |
VMSTATE_UINT8(sr, CUDAState), |
642 |
VMSTATE_UINT8(acr, CUDAState), |
643 |
VMSTATE_UINT8(pcr, CUDAState), |
644 |
VMSTATE_UINT8(ifr, CUDAState), |
645 |
VMSTATE_UINT8(ier, CUDAState), |
646 |
VMSTATE_UINT8(anh, CUDAState), |
647 |
VMSTATE_INT32(data_in_size, CUDAState), |
648 |
VMSTATE_INT32(data_in_index, CUDAState), |
649 |
VMSTATE_INT32(data_out_index, CUDAState), |
650 |
VMSTATE_UINT8(autopoll, CUDAState), |
651 |
VMSTATE_BUFFER(data_in, CUDAState), |
652 |
VMSTATE_BUFFER(data_out, CUDAState), |
653 |
VMSTATE_UINT32(tick_offset, CUDAState), |
654 |
VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1, |
655 |
vmstate_cuda_timer, CUDATimer), |
656 |
VMSTATE_END_OF_LIST() |
657 |
} |
658 |
}; |
659 |
|
660 |
static void cuda_reset(DeviceState *dev) |
661 |
{ |
662 |
CUDAState *s = CUDA(dev); |
663 |
|
664 |
s->b = 0;
|
665 |
s->a = 0;
|
666 |
s->dirb = 0;
|
667 |
s->dira = 0;
|
668 |
s->sr = 0;
|
669 |
s->acr = 0;
|
670 |
s->pcr = 0;
|
671 |
s->ifr = 0;
|
672 |
s->ier = 0;
|
673 |
// s->ier = T1_INT | SR_INT;
|
674 |
s->anh = 0;
|
675 |
s->data_in_size = 0;
|
676 |
s->data_in_index = 0;
|
677 |
s->data_out_index = 0;
|
678 |
s->autopoll = 0;
|
679 |
|
680 |
s->timers[0].latch = 0xffff; |
681 |
set_counter(s, &s->timers[0], 0xffff); |
682 |
|
683 |
s->timers[1].latch = 0; |
684 |
set_counter(s, &s->timers[1], 0xffff); |
685 |
} |
686 |
|
687 |
static void cuda_realizefn(DeviceState *dev, Error **errp) |
688 |
{ |
689 |
CUDAState *s = CUDA(dev); |
690 |
struct tm tm;
|
691 |
|
692 |
s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
|
693 |
|
694 |
qemu_get_timedate(&tm, 0);
|
695 |
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; |
696 |
|
697 |
s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s); |
698 |
} |
699 |
|
700 |
static void cuda_initfn(Object *obj) |
701 |
{ |
702 |
SysBusDevice *d = SYS_BUS_DEVICE(obj); |
703 |
CUDAState *s = CUDA(obj); |
704 |
int i;
|
705 |
|
706 |
memory_region_init_io(&s->mem, &cuda_ops, s, "cuda", 0x2000); |
707 |
sysbus_init_mmio(d, &s->mem); |
708 |
sysbus_init_irq(d, &s->irq); |
709 |
|
710 |
for (i = 0; i < ARRAY_SIZE(s->timers); i++) { |
711 |
s->timers[i].index = i; |
712 |
} |
713 |
|
714 |
qbus_create_inplace((BusState *)&s->adb_bus, TYPE_ADB_BUS, DEVICE(obj), |
715 |
"adb.0");
|
716 |
} |
717 |
|
718 |
static void cuda_class_init(ObjectClass *oc, void *data) |
719 |
{ |
720 |
DeviceClass *dc = DEVICE_CLASS(oc); |
721 |
|
722 |
dc->realize = cuda_realizefn; |
723 |
dc->reset = cuda_reset; |
724 |
dc->vmsd = &vmstate_cuda; |
725 |
} |
726 |
|
727 |
static const TypeInfo cuda_type_info = { |
728 |
.name = TYPE_CUDA, |
729 |
.parent = TYPE_SYS_BUS_DEVICE, |
730 |
.instance_size = sizeof(CUDAState),
|
731 |
.instance_init = cuda_initfn, |
732 |
.class_init = cuda_class_init, |
733 |
}; |
734 |
|
735 |
static void cuda_register_types(void) |
736 |
{ |
737 |
type_register_static(&cuda_type_info); |
738 |
} |
739 |
|
740 |
type_init(cuda_register_types) |