Revision 5fafdf24 hw/usb-uhci.c
b/hw/usb-uhci.c | ||
---|---|---|
1 | 1 |
/* |
2 | 2 |
* USB UHCI controller emulation |
3 |
*
|
|
3 |
* |
|
4 | 4 |
* Copyright (c) 2005 Fabrice Bellard |
5 |
*
|
|
5 |
* |
|
6 | 6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | 7 |
* of this software and associated documentation files (the "Software"), to deal |
8 | 8 |
* in the Software without restriction, including without limitation the rights |
... | ... | |
84 | 84 |
/* For simplicity of implementation we only allow a single pending USB |
85 | 85 |
request. This means all usb traffic on this controller is effectively |
86 | 86 |
suspended until that transfer completes. When the transfer completes |
87 |
the next transfer from that queue will be processed. However
|
|
87 |
the next transfer from that queue will be processed. However |
|
88 | 88 |
other queues will not be processed until the next frame. The solution |
89 | 89 |
is to allow multiple pending requests. */ |
90 | 90 |
uint32_t async_qh; |
... | ... | |
149 | 149 |
static void uhci_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) |
150 | 150 |
{ |
151 | 151 |
UHCIState *s = opaque; |
152 |
|
|
152 |
|
|
153 | 153 |
addr &= 0x1f; |
154 | 154 |
switch(addr) { |
155 | 155 |
case 0x0c: |
... | ... | |
178 | 178 |
static void uhci_ioport_writew(void *opaque, uint32_t addr, uint32_t val) |
179 | 179 |
{ |
180 | 180 |
UHCIState *s = opaque; |
181 |
|
|
181 |
|
|
182 | 182 |
addr &= 0x1f; |
183 | 183 |
#ifdef DEBUG |
184 | 184 |
printf("uhci writew port=0x%04x val=0x%04x\n", addr, val); |
... | ... | |
243 | 243 |
dev = port->port.dev; |
244 | 244 |
if (dev) { |
245 | 245 |
/* port reset */ |
246 |
if ( (val & UHCI_PORT_RESET) &&
|
|
246 |
if ( (val & UHCI_PORT_RESET) && |
|
247 | 247 |
!(port->ctrl & UHCI_PORT_RESET) ) { |
248 | 248 |
usb_send_msg(dev, USB_MSG_RESET); |
249 | 249 |
} |
... | ... | |
280 | 280 |
UHCIPort *port; |
281 | 281 |
int n; |
282 | 282 |
n = (addr >> 1) & 7; |
283 |
if (n >= NB_PORTS)
|
|
283 |
if (n >= NB_PORTS) |
|
284 | 284 |
goto read_default; |
285 | 285 |
port = &s->ports[n]; |
286 | 286 |
val = port->ctrl; |
... | ... | |
458 | 458 |
if (td->ctrl & TD_CTRL_IOC) { |
459 | 459 |
*int_mask |= 0x01; |
460 | 460 |
} |
461 |
|
|
461 |
|
|
462 | 462 |
if (!(td->ctrl & TD_CTRL_ACTIVE)) |
463 | 463 |
return 1; |
464 | 464 |
|
... | ... | |
530 | 530 |
here. The docs are somewhat unclear, but win2k relies on this |
531 | 531 |
behavior. */ |
532 | 532 |
td->ctrl &= ~(TD_CTRL_ACTIVE | TD_CTRL_NAK); |
533 |
if (pid == USB_TOKEN_IN &&
|
|
533 |
if (pid == USB_TOKEN_IN && |
|
534 | 534 |
(td->ctrl & TD_CTRL_SPD) && |
535 | 535 |
len < max_len) { |
536 | 536 |
*int_mask |= 0x02; |
... | ... | |
555 | 555 |
uhci_update_irq(s); |
556 | 556 |
} |
557 | 557 |
} |
558 |
td->ctrl = (td->ctrl & ~(3 << TD_CTRL_ERROR_SHIFT)) |
|
|
558 |
td->ctrl = (td->ctrl & ~(3 << TD_CTRL_ERROR_SHIFT)) | |
|
559 | 559 |
(err << TD_CTRL_ERROR_SHIFT); |
560 | 560 |
return 1; |
561 | 561 |
case USB_RET_NAK: |
... | ... | |
597 | 597 |
le32_to_cpus(&qh.el_link); |
598 | 598 |
/* Re-process the queue containing the async packet. */ |
599 | 599 |
while (1) { |
600 |
cpu_physical_memory_read(qh.el_link & ~0xf,
|
|
600 |
cpu_physical_memory_read(qh.el_link & ~0xf, |
|
601 | 601 |
(uint8_t *)&td, sizeof(td)); |
602 | 602 |
le32_to_cpus(&td.link); |
603 | 603 |
le32_to_cpus(&td.ctrl); |
... | ... | |
608 | 608 |
/* update the status bits of the TD */ |
609 | 609 |
if (old_td_ctrl != td.ctrl) { |
610 | 610 |
val = cpu_to_le32(td.ctrl); |
611 |
cpu_physical_memory_write((qh.el_link & ~0xf) + 4,
|
|
612 |
(const uint8_t *)&val,
|
|
611 |
cpu_physical_memory_write((qh.el_link & ~0xf) + 4, |
|
612 |
(const uint8_t *)&val, |
|
613 | 613 |
sizeof(val)); |
614 | 614 |
} |
615 | 615 |
if (ret < 0) |
... | ... | |
621 | 621 |
/* update qh element link */ |
622 | 622 |
qh.el_link = td.link; |
623 | 623 |
val = cpu_to_le32(qh.el_link); |
624 |
cpu_physical_memory_write((link & ~0xf) + 4,
|
|
625 |
(const uint8_t *)&val,
|
|
624 |
cpu_physical_memory_write((link & ~0xf) + 4, |
|
625 |
(const uint8_t *)&val, |
|
626 | 626 |
sizeof(val)); |
627 | 627 |
if (!(qh.el_link & 4)) |
628 | 628 |
break; |
... | ... | |
690 | 690 |
/* TD */ |
691 | 691 |
if (--cnt == 0) |
692 | 692 |
break; |
693 |
cpu_physical_memory_read(qh.el_link & ~0xf,
|
|
693 |
cpu_physical_memory_read(qh.el_link & ~0xf, |
|
694 | 694 |
(uint8_t *)&td, sizeof(td)); |
695 | 695 |
le32_to_cpus(&td.link); |
696 | 696 |
le32_to_cpus(&td.ctrl); |
... | ... | |
701 | 701 |
/* update the status bits of the TD */ |
702 | 702 |
if (old_td_ctrl != td.ctrl) { |
703 | 703 |
val = cpu_to_le32(td.ctrl); |
704 |
cpu_physical_memory_write((qh.el_link & ~0xf) + 4,
|
|
705 |
(const uint8_t *)&val,
|
|
704 |
cpu_physical_memory_write((qh.el_link & ~0xf) + 4, |
|
705 |
(const uint8_t *)&val, |
|
706 | 706 |
sizeof(val)); |
707 | 707 |
} |
708 | 708 |
if (ret < 0) |
... | ... | |
713 | 713 |
/* update qh element link */ |
714 | 714 |
qh.el_link = td.link; |
715 | 715 |
val = cpu_to_le32(qh.el_link); |
716 |
cpu_physical_memory_write((link & ~0xf) + 4,
|
|
717 |
(const uint8_t *)&val,
|
|
716 |
cpu_physical_memory_write((link & ~0xf) + 4, |
|
717 |
(const uint8_t *)&val, |
|
718 | 718 |
sizeof(val)); |
719 | 719 |
if (qh.el_link & 4) { |
720 | 720 |
/* depth first */ |
... | ... | |
740 | 740 |
/* update the status bits of the TD */ |
741 | 741 |
if (old_td_ctrl != td.ctrl) { |
742 | 742 |
val = cpu_to_le32(td.ctrl); |
743 |
cpu_physical_memory_write((link & ~0xf) + 4,
|
|
744 |
(const uint8_t *)&val,
|
|
743 |
cpu_physical_memory_write((link & ~0xf) + 4, |
|
744 |
(const uint8_t *)&val, |
|
745 | 745 |
sizeof(val)); |
746 | 746 |
} |
747 | 747 |
if (ret < 0) |
... | ... | |
768 | 768 |
s->async_qh = 0; |
769 | 769 |
} |
770 | 770 |
/* prepare the timer for the next frame */ |
771 |
expire_time = qemu_get_clock(vm_clock) +
|
|
771 |
expire_time = qemu_get_clock(vm_clock) + |
|
772 | 772 |
(ticks_per_sec / FRAME_TIMER_FREQ); |
773 | 773 |
qemu_mod_timer(s->frame_timer, expire_time); |
774 | 774 |
} |
775 | 775 |
|
776 |
static void uhci_map(PCIDevice *pci_dev, int region_num,
|
|
776 |
static void uhci_map(PCIDevice *pci_dev, int region_num, |
|
777 | 777 |
uint32_t addr, uint32_t size, int type) |
778 | 778 |
{ |
779 | 779 |
UHCIState *s = (UHCIState *)pci_dev; |
... | ... | |
807 | 807 |
pci_conf[0x0e] = 0x00; // header_type |
808 | 808 |
pci_conf[0x3d] = 4; // interrupt pin 3 |
809 | 809 |
pci_conf[0x60] = 0x10; // release number |
810 |
|
|
810 |
|
|
811 | 811 |
for(i = 0; i < NB_PORTS; i++) { |
812 | 812 |
qemu_register_usb_port(&s->ports[i].port, s, i, uhci_attach); |
813 | 813 |
} |
... | ... | |
817 | 817 |
|
818 | 818 |
/* Use region 4 for consistency with real hardware. BSD guests seem |
819 | 819 |
to rely on this. */ |
820 |
pci_register_io_region(&s->dev, 4, 0x20,
|
|
820 |
pci_register_io_region(&s->dev, 4, 0x20, |
|
821 | 821 |
PCI_ADDRESS_SPACE_IO, uhci_map); |
822 | 822 |
} |
823 | 823 |
|
Also available in: Unified diff