root / hw / misc / vfio.c @ 39360f0b
History | View | Annotate | Download (100.1 kB)
1 |
/*
|
---|---|
2 |
* vfio based device assignment support
|
3 |
*
|
4 |
* Copyright Red Hat, Inc. 2012
|
5 |
*
|
6 |
* Authors:
|
7 |
* Alex Williamson <alex.williamson@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Based on qemu-kvm device-assignment:
|
13 |
* Adapted for KVM by Qumranet.
|
14 |
* Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
|
15 |
* Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
|
16 |
* Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
|
17 |
* Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
|
18 |
* Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
|
19 |
*/
|
20 |
|
21 |
#include <dirent.h> |
22 |
#include <linux/vfio.h> |
23 |
#include <sys/ioctl.h> |
24 |
#include <sys/mman.h> |
25 |
#include <sys/stat.h> |
26 |
#include <sys/types.h> |
27 |
#include <unistd.h> |
28 |
|
29 |
#include "config.h" |
30 |
#include "exec/address-spaces.h" |
31 |
#include "exec/memory.h" |
32 |
#include "hw/pci/msi.h" |
33 |
#include "hw/pci/msix.h" |
34 |
#include "hw/pci/pci.h" |
35 |
#include "qemu-common.h" |
36 |
#include "qemu/error-report.h" |
37 |
#include "qemu/event_notifier.h" |
38 |
#include "qemu/queue.h" |
39 |
#include "qemu/range.h" |
40 |
#include "sysemu/kvm.h" |
41 |
#include "sysemu/sysemu.h" |
42 |
|
43 |
/* #define DEBUG_VFIO */
|
44 |
#ifdef DEBUG_VFIO
|
45 |
#define DPRINTF(fmt, ...) \
|
46 |
do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0) |
47 |
#else
|
48 |
#define DPRINTF(fmt, ...) \
|
49 |
do { } while (0) |
50 |
#endif
|
51 |
|
52 |
/* Extra debugging, trap acceleration paths for more logging */
|
53 |
#define VFIO_ALLOW_MMAP 1 |
54 |
#define VFIO_ALLOW_KVM_INTX 1 |
55 |
|
56 |
struct VFIODevice;
|
57 |
|
58 |
typedef struct VFIOQuirk { |
59 |
MemoryRegion mem; |
60 |
struct VFIODevice *vdev;
|
61 |
QLIST_ENTRY(VFIOQuirk) next; |
62 |
struct {
|
63 |
uint32_t base_offset:TARGET_PAGE_BITS; |
64 |
uint32_t address_offset:TARGET_PAGE_BITS; |
65 |
uint32_t address_size:3;
|
66 |
uint32_t bar:3;
|
67 |
|
68 |
uint32_t address_match; |
69 |
uint32_t address_mask; |
70 |
|
71 |
uint32_t address_val:TARGET_PAGE_BITS; |
72 |
uint32_t data_offset:TARGET_PAGE_BITS; |
73 |
uint32_t data_size:3;
|
74 |
|
75 |
uint8_t flags; |
76 |
uint8_t read_flags; |
77 |
uint8_t write_flags; |
78 |
} data; |
79 |
} VFIOQuirk; |
80 |
|
81 |
typedef struct VFIOBAR { |
82 |
off_t fd_offset; /* offset of BAR within device fd */
|
83 |
int fd; /* device fd, allows us to pass VFIOBAR as opaque data */ |
84 |
MemoryRegion mem; /* slow, read/write access */
|
85 |
MemoryRegion mmap_mem; /* direct mapped access */
|
86 |
void *mmap;
|
87 |
size_t size; |
88 |
uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
|
89 |
uint8_t nr; /* cache the BAR number for debug */
|
90 |
bool ioport;
|
91 |
bool mem64;
|
92 |
QLIST_HEAD(, VFIOQuirk) quirks; |
93 |
} VFIOBAR; |
94 |
|
95 |
typedef struct VFIOVGARegion { |
96 |
MemoryRegion mem; |
97 |
off_t offset; |
98 |
int nr;
|
99 |
QLIST_HEAD(, VFIOQuirk) quirks; |
100 |
} VFIOVGARegion; |
101 |
|
102 |
typedef struct VFIOVGA { |
103 |
off_t fd_offset; |
104 |
int fd;
|
105 |
VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; |
106 |
} VFIOVGA; |
107 |
|
108 |
typedef struct VFIOINTx { |
109 |
bool pending; /* interrupt pending */ |
110 |
bool kvm_accel; /* set when QEMU bypass through KVM enabled */ |
111 |
uint8_t pin; /* which pin to pull for qemu_set_irq */
|
112 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
113 |
EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
|
114 |
PCIINTxRoute route; /* routing info for QEMU bypass */
|
115 |
uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
|
116 |
QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
|
117 |
} VFIOINTx; |
118 |
|
119 |
typedef struct VFIOMSIVector { |
120 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
121 |
struct VFIODevice *vdev; /* back pointer to device */ |
122 |
int virq; /* KVM irqchip route for QEMU bypass */ |
123 |
bool use;
|
124 |
} VFIOMSIVector; |
125 |
|
126 |
enum {
|
127 |
VFIO_INT_NONE = 0,
|
128 |
VFIO_INT_INTx = 1,
|
129 |
VFIO_INT_MSI = 2,
|
130 |
VFIO_INT_MSIX = 3,
|
131 |
}; |
132 |
|
133 |
struct VFIOGroup;
|
134 |
|
135 |
typedef struct VFIOContainer { |
136 |
int fd; /* /dev/vfio/vfio, empowered by the attached groups */ |
137 |
struct {
|
138 |
/* enable abstraction to support various iommu backends */
|
139 |
union {
|
140 |
MemoryListener listener; /* Used by type1 iommu */
|
141 |
}; |
142 |
void (*release)(struct VFIOContainer *); |
143 |
} iommu_data; |
144 |
QLIST_HEAD(, VFIOGroup) group_list; |
145 |
QLIST_ENTRY(VFIOContainer) next; |
146 |
} VFIOContainer; |
147 |
|
148 |
/* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
|
149 |
typedef struct VFIOMSIXInfo { |
150 |
uint8_t table_bar; |
151 |
uint8_t pba_bar; |
152 |
uint16_t entries; |
153 |
uint32_t table_offset; |
154 |
uint32_t pba_offset; |
155 |
MemoryRegion mmap_mem; |
156 |
void *mmap;
|
157 |
} VFIOMSIXInfo; |
158 |
|
159 |
typedef struct VFIODevice { |
160 |
PCIDevice pdev; |
161 |
int fd;
|
162 |
VFIOINTx intx; |
163 |
unsigned int config_size; |
164 |
uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
|
165 |
off_t config_offset; /* Offset of config space region within device fd */
|
166 |
unsigned int rom_size; |
167 |
off_t rom_offset; /* Offset of ROM region within device fd */
|
168 |
int msi_cap_size;
|
169 |
VFIOMSIVector *msi_vectors; |
170 |
VFIOMSIXInfo *msix; |
171 |
int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ |
172 |
int interrupt; /* Current interrupt type */ |
173 |
VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ |
174 |
VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
|
175 |
PCIHostDeviceAddress host; |
176 |
QLIST_ENTRY(VFIODevice) next; |
177 |
struct VFIOGroup *group;
|
178 |
uint32_t features; |
179 |
#define VFIO_FEATURE_ENABLE_VGA_BIT 0 |
180 |
#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) |
181 |
int32_t bootindex; |
182 |
uint8_t pm_cap; |
183 |
bool reset_works;
|
184 |
bool has_vga;
|
185 |
} VFIODevice; |
186 |
|
187 |
typedef struct VFIOGroup { |
188 |
int fd;
|
189 |
int groupid;
|
190 |
VFIOContainer *container; |
191 |
QLIST_HEAD(, VFIODevice) device_list; |
192 |
QLIST_ENTRY(VFIOGroup) next; |
193 |
QLIST_ENTRY(VFIOGroup) container_next; |
194 |
} VFIOGroup; |
195 |
|
196 |
#define MSIX_CAP_LENGTH 12 |
197 |
|
198 |
static QLIST_HEAD(, VFIOContainer)
|
199 |
container_list = QLIST_HEAD_INITIALIZER(container_list); |
200 |
|
201 |
static QLIST_HEAD(, VFIOGroup)
|
202 |
group_list = QLIST_HEAD_INITIALIZER(group_list); |
203 |
|
204 |
static void vfio_disable_interrupts(VFIODevice *vdev); |
205 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); |
206 |
static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
207 |
uint32_t val, int len);
|
208 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled); |
209 |
|
210 |
/*
|
211 |
* Common VFIO interrupt disable
|
212 |
*/
|
213 |
static void vfio_disable_irqindex(VFIODevice *vdev, int index) |
214 |
{ |
215 |
struct vfio_irq_set irq_set = {
|
216 |
.argsz = sizeof(irq_set),
|
217 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, |
218 |
.index = index, |
219 |
.start = 0,
|
220 |
.count = 0,
|
221 |
}; |
222 |
|
223 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
224 |
} |
225 |
|
226 |
/*
|
227 |
* INTx
|
228 |
*/
|
229 |
static void vfio_unmask_intx(VFIODevice *vdev) |
230 |
{ |
231 |
struct vfio_irq_set irq_set = {
|
232 |
.argsz = sizeof(irq_set),
|
233 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, |
234 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
235 |
.start = 0,
|
236 |
.count = 1,
|
237 |
}; |
238 |
|
239 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
240 |
} |
241 |
|
242 |
#ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */ |
243 |
static void vfio_mask_intx(VFIODevice *vdev) |
244 |
{ |
245 |
struct vfio_irq_set irq_set = {
|
246 |
.argsz = sizeof(irq_set),
|
247 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, |
248 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
249 |
.start = 0,
|
250 |
.count = 1,
|
251 |
}; |
252 |
|
253 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
254 |
} |
255 |
#endif
|
256 |
|
257 |
/*
|
258 |
* Disabling BAR mmaping can be slow, but toggling it around INTx can
|
259 |
* also be a huge overhead. We try to get the best of both worlds by
|
260 |
* waiting until an interrupt to disable mmaps (subsequent transitions
|
261 |
* to the same state are effectively no overhead). If the interrupt has
|
262 |
* been serviced and the time gap is long enough, we re-enable mmaps for
|
263 |
* performance. This works well for things like graphics cards, which
|
264 |
* may not use their interrupt at all and are penalized to an unusable
|
265 |
* level by read/write BAR traps. Other devices, like NICs, have more
|
266 |
* regular interrupts and see much better latency by staying in non-mmap
|
267 |
* mode. We therefore set the default mmap_timeout such that a ping
|
268 |
* is just enough to keep the mmap disabled. Users can experiment with
|
269 |
* other options with the x-intx-mmap-timeout-ms parameter (a value of
|
270 |
* zero disables the timer).
|
271 |
*/
|
272 |
static void vfio_intx_mmap_enable(void *opaque) |
273 |
{ |
274 |
VFIODevice *vdev = opaque; |
275 |
|
276 |
if (vdev->intx.pending) {
|
277 |
qemu_mod_timer(vdev->intx.mmap_timer, |
278 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
279 |
return;
|
280 |
} |
281 |
|
282 |
vfio_mmap_set_enabled(vdev, true);
|
283 |
} |
284 |
|
285 |
static void vfio_intx_interrupt(void *opaque) |
286 |
{ |
287 |
VFIODevice *vdev = opaque; |
288 |
|
289 |
if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
|
290 |
return;
|
291 |
} |
292 |
|
293 |
DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
|
294 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
295 |
'A' + vdev->intx.pin);
|
296 |
|
297 |
vdev->intx.pending = true;
|
298 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
|
299 |
vfio_mmap_set_enabled(vdev, false);
|
300 |
if (vdev->intx.mmap_timeout) {
|
301 |
qemu_mod_timer(vdev->intx.mmap_timer, |
302 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
303 |
} |
304 |
} |
305 |
|
306 |
static void vfio_eoi(VFIODevice *vdev) |
307 |
{ |
308 |
if (!vdev->intx.pending) {
|
309 |
return;
|
310 |
} |
311 |
|
312 |
DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
|
313 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
314 |
|
315 |
vdev->intx.pending = false;
|
316 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
317 |
vfio_unmask_intx(vdev); |
318 |
} |
319 |
|
320 |
static void vfio_enable_intx_kvm(VFIODevice *vdev) |
321 |
{ |
322 |
#ifdef CONFIG_KVM
|
323 |
struct kvm_irqfd irqfd = {
|
324 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
325 |
.gsi = vdev->intx.route.irq, |
326 |
.flags = KVM_IRQFD_FLAG_RESAMPLE, |
327 |
}; |
328 |
struct vfio_irq_set *irq_set;
|
329 |
int ret, argsz;
|
330 |
int32_t *pfd; |
331 |
|
332 |
if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
|
333 |
vdev->intx.route.mode != PCI_INTX_ENABLED || |
334 |
!kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
335 |
return;
|
336 |
} |
337 |
|
338 |
/* Get to a known interrupt state */
|
339 |
qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); |
340 |
vfio_mask_intx(vdev); |
341 |
vdev->intx.pending = false;
|
342 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
343 |
|
344 |
/* Get an eventfd for resample/unmask */
|
345 |
if (event_notifier_init(&vdev->intx.unmask, 0)) { |
346 |
error_report("vfio: Error: event_notifier_init failed eoi");
|
347 |
goto fail;
|
348 |
} |
349 |
|
350 |
/* KVM triggers it, VFIO listens for it */
|
351 |
irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); |
352 |
|
353 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
354 |
error_report("vfio: Error: Failed to setup resample irqfd: %m");
|
355 |
goto fail_irqfd;
|
356 |
} |
357 |
|
358 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
359 |
|
360 |
irq_set = g_malloc0(argsz); |
361 |
irq_set->argsz = argsz; |
362 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; |
363 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
364 |
irq_set->start = 0;
|
365 |
irq_set->count = 1;
|
366 |
pfd = (int32_t *)&irq_set->data; |
367 |
|
368 |
*pfd = irqfd.resamplefd; |
369 |
|
370 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
371 |
g_free(irq_set); |
372 |
if (ret) {
|
373 |
error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
|
374 |
goto fail_vfio;
|
375 |
} |
376 |
|
377 |
/* Let'em rip */
|
378 |
vfio_unmask_intx(vdev); |
379 |
|
380 |
vdev->intx.kvm_accel = true;
|
381 |
|
382 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
|
383 |
__func__, vdev->host.domain, vdev->host.bus, |
384 |
vdev->host.slot, vdev->host.function); |
385 |
|
386 |
return;
|
387 |
|
388 |
fail_vfio:
|
389 |
irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; |
390 |
kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); |
391 |
fail_irqfd:
|
392 |
event_notifier_cleanup(&vdev->intx.unmask); |
393 |
fail:
|
394 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
395 |
vfio_unmask_intx(vdev); |
396 |
#endif
|
397 |
} |
398 |
|
399 |
static void vfio_disable_intx_kvm(VFIODevice *vdev) |
400 |
{ |
401 |
#ifdef CONFIG_KVM
|
402 |
struct kvm_irqfd irqfd = {
|
403 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
404 |
.gsi = vdev->intx.route.irq, |
405 |
.flags = KVM_IRQFD_FLAG_DEASSIGN, |
406 |
}; |
407 |
|
408 |
if (!vdev->intx.kvm_accel) {
|
409 |
return;
|
410 |
} |
411 |
|
412 |
/*
|
413 |
* Get to a known state, hardware masked, QEMU ready to accept new
|
414 |
* interrupts, QEMU IRQ de-asserted.
|
415 |
*/
|
416 |
vfio_mask_intx(vdev); |
417 |
vdev->intx.pending = false;
|
418 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
419 |
|
420 |
/* Tell KVM to stop listening for an INTx irqfd */
|
421 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
422 |
error_report("vfio: Error: Failed to disable INTx irqfd: %m");
|
423 |
} |
424 |
|
425 |
/* We only need to close the eventfd for VFIO to cleanup the kernel side */
|
426 |
event_notifier_cleanup(&vdev->intx.unmask); |
427 |
|
428 |
/* QEMU starts listening for interrupt events. */
|
429 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
430 |
|
431 |
vdev->intx.kvm_accel = false;
|
432 |
|
433 |
/* If we've missed an event, let it re-fire through QEMU */
|
434 |
vfio_unmask_intx(vdev); |
435 |
|
436 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
|
437 |
__func__, vdev->host.domain, vdev->host.bus, |
438 |
vdev->host.slot, vdev->host.function); |
439 |
#endif
|
440 |
} |
441 |
|
442 |
static void vfio_update_irq(PCIDevice *pdev) |
443 |
{ |
444 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
445 |
PCIINTxRoute route; |
446 |
|
447 |
if (vdev->interrupt != VFIO_INT_INTx) {
|
448 |
return;
|
449 |
} |
450 |
|
451 |
route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); |
452 |
|
453 |
if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
|
454 |
return; /* Nothing changed */ |
455 |
} |
456 |
|
457 |
DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
|
458 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
459 |
vdev->host.function, vdev->intx.route.irq, route.irq); |
460 |
|
461 |
vfio_disable_intx_kvm(vdev); |
462 |
|
463 |
vdev->intx.route = route; |
464 |
|
465 |
if (route.mode != PCI_INTX_ENABLED) {
|
466 |
return;
|
467 |
} |
468 |
|
469 |
vfio_enable_intx_kvm(vdev); |
470 |
|
471 |
/* Re-enable the interrupt in cased we missed an EOI */
|
472 |
vfio_eoi(vdev); |
473 |
} |
474 |
|
475 |
static int vfio_enable_intx(VFIODevice *vdev) |
476 |
{ |
477 |
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
|
478 |
int ret, argsz;
|
479 |
struct vfio_irq_set *irq_set;
|
480 |
int32_t *pfd; |
481 |
|
482 |
if (!pin) {
|
483 |
return 0; |
484 |
} |
485 |
|
486 |
vfio_disable_interrupts(vdev); |
487 |
|
488 |
vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ |
489 |
|
490 |
#ifdef CONFIG_KVM
|
491 |
/*
|
492 |
* Only conditional to avoid generating error messages on platforms
|
493 |
* where we won't actually use the result anyway.
|
494 |
*/
|
495 |
if (kvm_irqfds_enabled() &&
|
496 |
kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
497 |
vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
498 |
vdev->intx.pin); |
499 |
} |
500 |
#endif
|
501 |
|
502 |
ret = event_notifier_init(&vdev->intx.interrupt, 0);
|
503 |
if (ret) {
|
504 |
error_report("vfio: Error: event_notifier_init failed");
|
505 |
return ret;
|
506 |
} |
507 |
|
508 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
509 |
|
510 |
irq_set = g_malloc0(argsz); |
511 |
irq_set->argsz = argsz; |
512 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
513 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
514 |
irq_set->start = 0;
|
515 |
irq_set->count = 1;
|
516 |
pfd = (int32_t *)&irq_set->data; |
517 |
|
518 |
*pfd = event_notifier_get_fd(&vdev->intx.interrupt); |
519 |
qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
|
520 |
|
521 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
522 |
g_free(irq_set); |
523 |
if (ret) {
|
524 |
error_report("vfio: Error: Failed to setup INTx fd: %m");
|
525 |
qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
526 |
event_notifier_cleanup(&vdev->intx.interrupt); |
527 |
return -errno;
|
528 |
} |
529 |
|
530 |
vfio_enable_intx_kvm(vdev); |
531 |
|
532 |
vdev->interrupt = VFIO_INT_INTx; |
533 |
|
534 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
535 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
536 |
|
537 |
return 0; |
538 |
} |
539 |
|
540 |
static void vfio_disable_intx(VFIODevice *vdev) |
541 |
{ |
542 |
int fd;
|
543 |
|
544 |
qemu_del_timer(vdev->intx.mmap_timer); |
545 |
vfio_disable_intx_kvm(vdev); |
546 |
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX); |
547 |
vdev->intx.pending = false;
|
548 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
549 |
vfio_mmap_set_enabled(vdev, true);
|
550 |
|
551 |
fd = event_notifier_get_fd(&vdev->intx.interrupt); |
552 |
qemu_set_fd_handler(fd, NULL, NULL, vdev); |
553 |
event_notifier_cleanup(&vdev->intx.interrupt); |
554 |
|
555 |
vdev->interrupt = VFIO_INT_NONE; |
556 |
|
557 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
558 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
559 |
} |
560 |
|
561 |
/*
|
562 |
* MSI/X
|
563 |
*/
|
564 |
static void vfio_msi_interrupt(void *opaque) |
565 |
{ |
566 |
VFIOMSIVector *vector = opaque; |
567 |
VFIODevice *vdev = vector->vdev; |
568 |
int nr = vector - vdev->msi_vectors;
|
569 |
|
570 |
if (!event_notifier_test_and_clear(&vector->interrupt)) {
|
571 |
return;
|
572 |
} |
573 |
|
574 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
|
575 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
576 |
vdev->host.function, nr); |
577 |
|
578 |
if (vdev->interrupt == VFIO_INT_MSIX) {
|
579 |
msix_notify(&vdev->pdev, nr); |
580 |
} else if (vdev->interrupt == VFIO_INT_MSI) { |
581 |
msi_notify(&vdev->pdev, nr); |
582 |
} else {
|
583 |
error_report("vfio: MSI interrupt receieved, but not enabled?");
|
584 |
} |
585 |
} |
586 |
|
587 |
static int vfio_enable_vectors(VFIODevice *vdev, bool msix) |
588 |
{ |
589 |
struct vfio_irq_set *irq_set;
|
590 |
int ret = 0, i, argsz; |
591 |
int32_t *fds; |
592 |
|
593 |
argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); |
594 |
|
595 |
irq_set = g_malloc0(argsz); |
596 |
irq_set->argsz = argsz; |
597 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
598 |
irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; |
599 |
irq_set->start = 0;
|
600 |
irq_set->count = vdev->nr_vectors; |
601 |
fds = (int32_t *)&irq_set->data; |
602 |
|
603 |
for (i = 0; i < vdev->nr_vectors; i++) { |
604 |
if (!vdev->msi_vectors[i].use) {
|
605 |
fds[i] = -1;
|
606 |
continue;
|
607 |
} |
608 |
|
609 |
fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); |
610 |
} |
611 |
|
612 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
613 |
|
614 |
g_free(irq_set); |
615 |
|
616 |
return ret;
|
617 |
} |
618 |
|
619 |
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, |
620 |
MSIMessage *msg, IOHandler *handler) |
621 |
{ |
622 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
623 |
VFIOMSIVector *vector; |
624 |
int ret;
|
625 |
|
626 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
|
627 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
628 |
vdev->host.function, nr); |
629 |
|
630 |
vector = &vdev->msi_vectors[nr]; |
631 |
vector->vdev = vdev; |
632 |
vector->use = true;
|
633 |
|
634 |
msix_vector_use(pdev, nr); |
635 |
|
636 |
if (event_notifier_init(&vector->interrupt, 0)) { |
637 |
error_report("vfio: Error: event_notifier_init failed");
|
638 |
} |
639 |
|
640 |
/*
|
641 |
* Attempt to enable route through KVM irqchip,
|
642 |
* default to userspace handling if unavailable.
|
643 |
*/
|
644 |
vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
|
645 |
if (vector->virq < 0 || |
646 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
647 |
vector->virq) < 0) {
|
648 |
if (vector->virq >= 0) { |
649 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
650 |
vector->virq = -1;
|
651 |
} |
652 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
653 |
handler, NULL, vector);
|
654 |
} |
655 |
|
656 |
/*
|
657 |
* We don't want to have the host allocate all possible MSI vectors
|
658 |
* for a device if they're not in use, so we shutdown and incrementally
|
659 |
* increase them as needed.
|
660 |
*/
|
661 |
if (vdev->nr_vectors < nr + 1) { |
662 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
663 |
vdev->nr_vectors = nr + 1;
|
664 |
ret = vfio_enable_vectors(vdev, true);
|
665 |
if (ret) {
|
666 |
error_report("vfio: failed to enable vectors, %d", ret);
|
667 |
} |
668 |
} else {
|
669 |
int argsz;
|
670 |
struct vfio_irq_set *irq_set;
|
671 |
int32_t *pfd; |
672 |
|
673 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
674 |
|
675 |
irq_set = g_malloc0(argsz); |
676 |
irq_set->argsz = argsz; |
677 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
678 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
679 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
680 |
irq_set->start = nr; |
681 |
irq_set->count = 1;
|
682 |
pfd = (int32_t *)&irq_set->data; |
683 |
|
684 |
*pfd = event_notifier_get_fd(&vector->interrupt); |
685 |
|
686 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
687 |
g_free(irq_set); |
688 |
if (ret) {
|
689 |
error_report("vfio: failed to modify vector, %d", ret);
|
690 |
} |
691 |
} |
692 |
|
693 |
return 0; |
694 |
} |
695 |
|
696 |
static int vfio_msix_vector_use(PCIDevice *pdev, |
697 |
unsigned int nr, MSIMessage msg) |
698 |
{ |
699 |
return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
|
700 |
} |
701 |
|
702 |
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
703 |
{ |
704 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
705 |
VFIOMSIVector *vector = &vdev->msi_vectors[nr]; |
706 |
int argsz;
|
707 |
struct vfio_irq_set *irq_set;
|
708 |
int32_t *pfd; |
709 |
|
710 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
|
711 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
712 |
vdev->host.function, nr); |
713 |
|
714 |
/*
|
715 |
* XXX What's the right thing to do here? This turns off the interrupt
|
716 |
* completely, but do we really just want to switch the interrupt to
|
717 |
* bouncing through userspace and let msix.c drop it? Not sure.
|
718 |
*/
|
719 |
msix_vector_unuse(pdev, nr); |
720 |
|
721 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
722 |
|
723 |
irq_set = g_malloc0(argsz); |
724 |
irq_set->argsz = argsz; |
725 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
726 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
727 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
728 |
irq_set->start = nr; |
729 |
irq_set->count = 1;
|
730 |
pfd = (int32_t *)&irq_set->data; |
731 |
|
732 |
*pfd = -1;
|
733 |
|
734 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
735 |
|
736 |
g_free(irq_set); |
737 |
|
738 |
if (vector->virq < 0) { |
739 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
740 |
NULL, NULL, NULL); |
741 |
} else {
|
742 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
743 |
vector->virq); |
744 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
745 |
vector->virq = -1;
|
746 |
} |
747 |
|
748 |
event_notifier_cleanup(&vector->interrupt); |
749 |
vector->use = false;
|
750 |
} |
751 |
|
752 |
static void vfio_enable_msix(VFIODevice *vdev) |
753 |
{ |
754 |
vfio_disable_interrupts(vdev); |
755 |
|
756 |
vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
|
757 |
|
758 |
vdev->interrupt = VFIO_INT_MSIX; |
759 |
|
760 |
/*
|
761 |
* Some communication channels between VF & PF or PF & fw rely on the
|
762 |
* physical state of the device and expect that enabling MSI-X from the
|
763 |
* guest enables the same on the host. When our guest is Linux, the
|
764 |
* guest driver call to pci_enable_msix() sets the enabling bit in the
|
765 |
* MSI-X capability, but leaves the vector table masked. We therefore
|
766 |
* can't rely on a vector_use callback (from request_irq() in the guest)
|
767 |
* to switch the physical device into MSI-X mode because that may come a
|
768 |
* long time after pci_enable_msix(). This code enables vector 0 with
|
769 |
* triggering to userspace, then immediately release the vector, leaving
|
770 |
* the physical device with no vectors enabled, but MSI-X enabled, just
|
771 |
* like the guest view.
|
772 |
*/
|
773 |
vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); |
774 |
vfio_msix_vector_release(&vdev->pdev, 0);
|
775 |
|
776 |
if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
|
777 |
vfio_msix_vector_release, NULL)) {
|
778 |
error_report("vfio: msix_set_vector_notifiers failed");
|
779 |
} |
780 |
|
781 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
782 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
783 |
} |
784 |
|
785 |
static void vfio_enable_msi(VFIODevice *vdev) |
786 |
{ |
787 |
int ret, i;
|
788 |
|
789 |
vfio_disable_interrupts(vdev); |
790 |
|
791 |
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); |
792 |
retry:
|
793 |
vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
|
794 |
|
795 |
for (i = 0; i < vdev->nr_vectors; i++) { |
796 |
MSIMessage msg; |
797 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
798 |
|
799 |
vector->vdev = vdev; |
800 |
vector->use = true;
|
801 |
|
802 |
if (event_notifier_init(&vector->interrupt, 0)) { |
803 |
error_report("vfio: Error: event_notifier_init failed");
|
804 |
} |
805 |
|
806 |
msg = msi_get_message(&vdev->pdev, i); |
807 |
|
808 |
/*
|
809 |
* Attempt to enable route through KVM irqchip,
|
810 |
* default to userspace handling if unavailable.
|
811 |
*/
|
812 |
vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg); |
813 |
if (vector->virq < 0 || |
814 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
815 |
vector->virq) < 0) {
|
816 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
817 |
vfio_msi_interrupt, NULL, vector);
|
818 |
} |
819 |
} |
820 |
|
821 |
ret = vfio_enable_vectors(vdev, false);
|
822 |
if (ret) {
|
823 |
if (ret < 0) { |
824 |
error_report("vfio: Error: Failed to setup MSI fds: %m");
|
825 |
} else if (ret != vdev->nr_vectors) { |
826 |
error_report("vfio: Error: Failed to enable %d "
|
827 |
"MSI vectors, retry with %d", vdev->nr_vectors, ret);
|
828 |
} |
829 |
|
830 |
for (i = 0; i < vdev->nr_vectors; i++) { |
831 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
832 |
if (vector->virq >= 0) { |
833 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
834 |
vector->virq); |
835 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
836 |
vector->virq = -1;
|
837 |
} else {
|
838 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
839 |
NULL, NULL, NULL); |
840 |
} |
841 |
event_notifier_cleanup(&vector->interrupt); |
842 |
} |
843 |
|
844 |
g_free(vdev->msi_vectors); |
845 |
|
846 |
if (ret > 0 && ret != vdev->nr_vectors) { |
847 |
vdev->nr_vectors = ret; |
848 |
goto retry;
|
849 |
} |
850 |
vdev->nr_vectors = 0;
|
851 |
|
852 |
return;
|
853 |
} |
854 |
|
855 |
vdev->interrupt = VFIO_INT_MSI; |
856 |
|
857 |
DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
|
858 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
859 |
vdev->host.function, vdev->nr_vectors); |
860 |
} |
861 |
|
862 |
static void vfio_disable_msi_common(VFIODevice *vdev) |
863 |
{ |
864 |
g_free(vdev->msi_vectors); |
865 |
vdev->msi_vectors = NULL;
|
866 |
vdev->nr_vectors = 0;
|
867 |
vdev->interrupt = VFIO_INT_NONE; |
868 |
|
869 |
vfio_enable_intx(vdev); |
870 |
} |
871 |
|
872 |
static void vfio_disable_msix(VFIODevice *vdev) |
873 |
{ |
874 |
msix_unset_vector_notifiers(&vdev->pdev); |
875 |
|
876 |
if (vdev->nr_vectors) {
|
877 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
878 |
} |
879 |
|
880 |
vfio_disable_msi_common(vdev); |
881 |
|
882 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
883 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
884 |
} |
885 |
|
886 |
static void vfio_disable_msi(VFIODevice *vdev) |
887 |
{ |
888 |
int i;
|
889 |
|
890 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX); |
891 |
|
892 |
for (i = 0; i < vdev->nr_vectors; i++) { |
893 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
894 |
|
895 |
if (!vector->use) {
|
896 |
continue;
|
897 |
} |
898 |
|
899 |
if (vector->virq >= 0) { |
900 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, |
901 |
&vector->interrupt, vector->virq); |
902 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
903 |
vector->virq = -1;
|
904 |
} else {
|
905 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
906 |
NULL, NULL, NULL); |
907 |
} |
908 |
|
909 |
event_notifier_cleanup(&vector->interrupt); |
910 |
} |
911 |
|
912 |
vfio_disable_msi_common(vdev); |
913 |
|
914 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
915 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
916 |
} |
917 |
|
918 |
/*
|
919 |
* IO Port/MMIO - Beware of the endians, VFIO is always little endian
|
920 |
*/
|
921 |
static void vfio_bar_write(void *opaque, hwaddr addr, |
922 |
uint64_t data, unsigned size)
|
923 |
{ |
924 |
VFIOBAR *bar = opaque; |
925 |
union {
|
926 |
uint8_t byte; |
927 |
uint16_t word; |
928 |
uint32_t dword; |
929 |
uint64_t qword; |
930 |
} buf; |
931 |
|
932 |
switch (size) {
|
933 |
case 1: |
934 |
buf.byte = data; |
935 |
break;
|
936 |
case 2: |
937 |
buf.word = cpu_to_le16(data); |
938 |
break;
|
939 |
case 4: |
940 |
buf.dword = cpu_to_le32(data); |
941 |
break;
|
942 |
default:
|
943 |
hw_error("vfio: unsupported write size, %d bytes\n", size);
|
944 |
break;
|
945 |
} |
946 |
|
947 |
if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
948 |
error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
949 |
__func__, addr, data, size); |
950 |
} |
951 |
|
952 |
#ifdef DEBUG_VFIO
|
953 |
{ |
954 |
VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); |
955 |
|
956 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64 |
957 |
", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
|
958 |
vdev->host.slot, vdev->host.function, bar->nr, addr, |
959 |
data, size); |
960 |
} |
961 |
#endif
|
962 |
|
963 |
/*
|
964 |
* A read or write to a BAR always signals an INTx EOI. This will
|
965 |
* do nothing if not pending (including not in INTx mode). We assume
|
966 |
* that a BAR access is in response to an interrupt and that BAR
|
967 |
* accesses will service the interrupt. Unfortunately, we don't know
|
968 |
* which access will service the interrupt, so we're potentially
|
969 |
* getting quite a few host interrupts per guest interrupt.
|
970 |
*/
|
971 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
972 |
} |
973 |
|
974 |
static uint64_t vfio_bar_read(void *opaque, |
975 |
hwaddr addr, unsigned size)
|
976 |
{ |
977 |
VFIOBAR *bar = opaque; |
978 |
union {
|
979 |
uint8_t byte; |
980 |
uint16_t word; |
981 |
uint32_t dword; |
982 |
uint64_t qword; |
983 |
} buf; |
984 |
uint64_t data = 0;
|
985 |
|
986 |
if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
987 |
error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
988 |
__func__, addr, size); |
989 |
return (uint64_t)-1; |
990 |
} |
991 |
|
992 |
switch (size) {
|
993 |
case 1: |
994 |
data = buf.byte; |
995 |
break;
|
996 |
case 2: |
997 |
data = le16_to_cpu(buf.word); |
998 |
break;
|
999 |
case 4: |
1000 |
data = le32_to_cpu(buf.dword); |
1001 |
break;
|
1002 |
default:
|
1003 |
hw_error("vfio: unsupported read size, %d bytes\n", size);
|
1004 |
break;
|
1005 |
} |
1006 |
|
1007 |
#ifdef DEBUG_VFIO
|
1008 |
{ |
1009 |
VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); |
1010 |
|
1011 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
|
1012 |
", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain, |
1013 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
1014 |
bar->nr, addr, size, data); |
1015 |
} |
1016 |
#endif
|
1017 |
|
1018 |
/* Same as write above */
|
1019 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
1020 |
|
1021 |
return data;
|
1022 |
} |
1023 |
|
1024 |
static const MemoryRegionOps vfio_bar_ops = { |
1025 |
.read = vfio_bar_read, |
1026 |
.write = vfio_bar_write, |
1027 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1028 |
}; |
1029 |
|
1030 |
static void vfio_vga_write(void *opaque, hwaddr addr, |
1031 |
uint64_t data, unsigned size)
|
1032 |
{ |
1033 |
VFIOVGARegion *region = opaque; |
1034 |
VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); |
1035 |
union {
|
1036 |
uint8_t byte; |
1037 |
uint16_t word; |
1038 |
uint32_t dword; |
1039 |
uint64_t qword; |
1040 |
} buf; |
1041 |
off_t offset = vga->fd_offset + region->offset + addr; |
1042 |
|
1043 |
switch (size) {
|
1044 |
case 1: |
1045 |
buf.byte = data; |
1046 |
break;
|
1047 |
case 2: |
1048 |
buf.word = cpu_to_le16(data); |
1049 |
break;
|
1050 |
case 4: |
1051 |
buf.dword = cpu_to_le32(data); |
1052 |
break;
|
1053 |
default:
|
1054 |
hw_error("vfio: unsupported write size, %d bytes\n", size);
|
1055 |
break;
|
1056 |
} |
1057 |
|
1058 |
if (pwrite(vga->fd, &buf, size, offset) != size) {
|
1059 |
error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
1060 |
__func__, region->offset + addr, data, size); |
1061 |
} |
1062 |
|
1063 |
DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n", |
1064 |
__func__, region->offset + addr, data, size); |
1065 |
} |
1066 |
|
1067 |
static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) |
1068 |
{ |
1069 |
VFIOVGARegion *region = opaque; |
1070 |
VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); |
1071 |
union {
|
1072 |
uint8_t byte; |
1073 |
uint16_t word; |
1074 |
uint32_t dword; |
1075 |
uint64_t qword; |
1076 |
} buf; |
1077 |
uint64_t data = 0;
|
1078 |
off_t offset = vga->fd_offset + region->offset + addr; |
1079 |
|
1080 |
if (pread(vga->fd, &buf, size, offset) != size) {
|
1081 |
error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
1082 |
__func__, region->offset + addr, size); |
1083 |
return (uint64_t)-1; |
1084 |
} |
1085 |
|
1086 |
switch (size) {
|
1087 |
case 1: |
1088 |
data = buf.byte; |
1089 |
break;
|
1090 |
case 2: |
1091 |
data = le16_to_cpu(buf.word); |
1092 |
break;
|
1093 |
case 4: |
1094 |
data = le32_to_cpu(buf.dword); |
1095 |
break;
|
1096 |
default:
|
1097 |
hw_error("vfio: unsupported read size, %d bytes\n", size);
|
1098 |
break;
|
1099 |
} |
1100 |
|
1101 |
DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n", |
1102 |
__func__, region->offset + addr, size, data); |
1103 |
|
1104 |
return data;
|
1105 |
} |
1106 |
|
1107 |
static const MemoryRegionOps vfio_vga_ops = { |
1108 |
.read = vfio_vga_read, |
1109 |
.write = vfio_vga_write, |
1110 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1111 |
}; |
1112 |
|
1113 |
/*
|
1114 |
* Device specific quirks
|
1115 |
*/
|
1116 |
|
1117 |
/* Is range1 fully contained within range2? */
|
1118 |
static bool vfio_range_contained(uint64_t first1, uint64_t len1, |
1119 |
uint64_t first2, uint64_t len2) { |
1120 |
return (first1 >= first2 && first1 + len1 <= first2 + len2);
|
1121 |
} |
1122 |
|
1123 |
static bool vfio_flags_enabled(uint8_t flags, uint8_t mask) |
1124 |
{ |
1125 |
return (mask && (flags & mask) == mask);
|
1126 |
} |
1127 |
|
1128 |
static uint64_t vfio_generic_window_quirk_read(void *opaque, |
1129 |
hwaddr addr, unsigned size)
|
1130 |
{ |
1131 |
VFIOQuirk *quirk = opaque; |
1132 |
VFIODevice *vdev = quirk->vdev; |
1133 |
uint64_t data; |
1134 |
|
1135 |
if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
|
1136 |
ranges_overlap(addr, size, |
1137 |
quirk->data.data_offset, quirk->data.data_size)) { |
1138 |
hwaddr offset = addr - quirk->data.data_offset; |
1139 |
|
1140 |
if (!vfio_range_contained(addr, size, quirk->data.data_offset,
|
1141 |
quirk->data.data_size)) { |
1142 |
hw_error("%s: window data read not fully contained: %s\n",
|
1143 |
__func__, memory_region_name(&quirk->mem)); |
1144 |
} |
1145 |
|
1146 |
data = vfio_pci_read_config(&vdev->pdev, |
1147 |
quirk->data.address_val + offset, size); |
1148 |
|
1149 |
DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" |
1150 |
PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain,
|
1151 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
1152 |
quirk->data.bar, addr, size, data); |
1153 |
} else {
|
1154 |
data = vfio_bar_read(&vdev->bars[quirk->data.bar], |
1155 |
addr + quirk->data.base_offset, size); |
1156 |
} |
1157 |
|
1158 |
return data;
|
1159 |
} |
1160 |
|
1161 |
static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr, |
1162 |
uint64_t data, unsigned size)
|
1163 |
{ |
1164 |
VFIOQuirk *quirk = opaque; |
1165 |
VFIODevice *vdev = quirk->vdev; |
1166 |
|
1167 |
if (ranges_overlap(addr, size,
|
1168 |
quirk->data.address_offset, quirk->data.address_size)) { |
1169 |
|
1170 |
if (addr != quirk->data.address_offset) {
|
1171 |
hw_error("%s: offset write into address window: %s\n",
|
1172 |
__func__, memory_region_name(&quirk->mem)); |
1173 |
} |
1174 |
|
1175 |
if ((data & ~quirk->data.address_mask) == quirk->data.address_match) {
|
1176 |
quirk->data.flags |= quirk->data.write_flags | |
1177 |
quirk->data.read_flags; |
1178 |
quirk->data.address_val = data & quirk->data.address_mask; |
1179 |
} else {
|
1180 |
quirk->data.flags &= ~(quirk->data.write_flags | |
1181 |
quirk->data.read_flags); |
1182 |
} |
1183 |
} |
1184 |
|
1185 |
if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
|
1186 |
ranges_overlap(addr, size, |
1187 |
quirk->data.data_offset, quirk->data.data_size)) { |
1188 |
hwaddr offset = addr - quirk->data.data_offset; |
1189 |
|
1190 |
if (!vfio_range_contained(addr, size, quirk->data.data_offset,
|
1191 |
quirk->data.data_size)) { |
1192 |
hw_error("%s: window data write not fully contained: %s\n",
|
1193 |
__func__, memory_region_name(&quirk->mem)); |
1194 |
} |
1195 |
|
1196 |
vfio_pci_write_config(&vdev->pdev, |
1197 |
quirk->data.address_val + offset, data, size); |
1198 |
DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" |
1199 |
PRIx64", %d)\n", memory_region_name(&quirk->mem),
|
1200 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1201 |
vdev->host.function, quirk->data.bar, addr, data, size); |
1202 |
return;
|
1203 |
} |
1204 |
|
1205 |
vfio_bar_write(&vdev->bars[quirk->data.bar], |
1206 |
addr + quirk->data.base_offset, data, size); |
1207 |
} |
1208 |
|
1209 |
static const MemoryRegionOps vfio_generic_window_quirk = { |
1210 |
.read = vfio_generic_window_quirk_read, |
1211 |
.write = vfio_generic_window_quirk_write, |
1212 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1213 |
}; |
1214 |
|
1215 |
static uint64_t vfio_generic_quirk_read(void *opaque, |
1216 |
hwaddr addr, unsigned size)
|
1217 |
{ |
1218 |
VFIOQuirk *quirk = opaque; |
1219 |
VFIODevice *vdev = quirk->vdev; |
1220 |
hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1221 |
hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; |
1222 |
uint64_t data; |
1223 |
|
1224 |
if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
|
1225 |
ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
|
1226 |
if (!vfio_range_contained(addr, size, offset,
|
1227 |
quirk->data.address_mask + 1)) {
|
1228 |
hw_error("%s: read not fully contained: %s\n",
|
1229 |
__func__, memory_region_name(&quirk->mem)); |
1230 |
} |
1231 |
|
1232 |
data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); |
1233 |
|
1234 |
DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%" |
1235 |
PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain,
|
1236 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
1237 |
quirk->data.bar, addr + base, size, data); |
1238 |
} else {
|
1239 |
data = vfio_bar_read(&vdev->bars[quirk->data.bar], addr + base, size); |
1240 |
} |
1241 |
|
1242 |
return data;
|
1243 |
} |
1244 |
|
1245 |
static void vfio_generic_quirk_write(void *opaque, hwaddr addr, |
1246 |
uint64_t data, unsigned size)
|
1247 |
{ |
1248 |
VFIOQuirk *quirk = opaque; |
1249 |
VFIODevice *vdev = quirk->vdev; |
1250 |
hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; |
1251 |
hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; |
1252 |
|
1253 |
if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
|
1254 |
ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
|
1255 |
if (!vfio_range_contained(addr, size, offset,
|
1256 |
quirk->data.address_mask + 1)) {
|
1257 |
hw_error("%s: write not fully contained: %s\n",
|
1258 |
__func__, memory_region_name(&quirk->mem)); |
1259 |
} |
1260 |
|
1261 |
vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); |
1262 |
|
1263 |
DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%" |
1264 |
PRIx64", %d)\n", memory_region_name(&quirk->mem),
|
1265 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1266 |
vdev->host.function, quirk->data.bar, addr + base, data, size); |
1267 |
} else {
|
1268 |
vfio_bar_write(&vdev->bars[quirk->data.bar], addr + base, data, size); |
1269 |
} |
1270 |
} |
1271 |
|
1272 |
static const MemoryRegionOps vfio_generic_quirk = { |
1273 |
.read = vfio_generic_quirk_read, |
1274 |
.write = vfio_generic_quirk_write, |
1275 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1276 |
}; |
1277 |
|
1278 |
#define PCI_VENDOR_ID_ATI 0x1002 |
1279 |
|
1280 |
/*
|
1281 |
* Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
|
1282 |
* through VGA register 0x3c3. On newer cards, the I/O port BAR is always
|
1283 |
* BAR4 (older cards like the X550 used BAR1, but we don't care to support
|
1284 |
* those). Note that on bare metal, a read of 0x3c3 doesn't always return the
|
1285 |
* I/O port BAR address. Originally this was coded to return the virtual BAR
|
1286 |
* address only if the physical register read returns the actual BAR address,
|
1287 |
* but users have reported greater success if we return the virtual address
|
1288 |
* unconditionally.
|
1289 |
*/
|
1290 |
static uint64_t vfio_ati_3c3_quirk_read(void *opaque, |
1291 |
hwaddr addr, unsigned size)
|
1292 |
{ |
1293 |
VFIOQuirk *quirk = opaque; |
1294 |
VFIODevice *vdev = quirk->vdev; |
1295 |
uint64_t data = vfio_pci_read_config(&vdev->pdev, |
1296 |
PCI_BASE_ADDRESS_0 + (4 * 4) + 1, |
1297 |
size); |
1298 |
DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data); |
1299 |
|
1300 |
return data;
|
1301 |
} |
1302 |
|
1303 |
static const MemoryRegionOps vfio_ati_3c3_quirk = { |
1304 |
.read = vfio_ati_3c3_quirk_read, |
1305 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1306 |
}; |
1307 |
|
1308 |
static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev) |
1309 |
{ |
1310 |
PCIDevice *pdev = &vdev->pdev; |
1311 |
VFIOQuirk *quirk; |
1312 |
|
1313 |
if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
|
1314 |
return;
|
1315 |
} |
1316 |
|
1317 |
/*
|
1318 |
* As long as the BAR is >= 256 bytes it will be aligned such that the
|
1319 |
* lower byte is always zero. Filter out anything else, if it exists.
|
1320 |
*/
|
1321 |
if (!vdev->bars[4].ioport || vdev->bars[4].size < 256) { |
1322 |
return;
|
1323 |
} |
1324 |
|
1325 |
quirk = g_malloc0(sizeof(*quirk));
|
1326 |
quirk->vdev = vdev; |
1327 |
|
1328 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk, |
1329 |
"vfio-ati-3c3-quirk", 1); |
1330 |
memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
1331 |
3 /* offset 3 bytes from 0x3c0 */, &quirk->mem); |
1332 |
|
1333 |
QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1334 |
quirk, next); |
1335 |
|
1336 |
DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n",
|
1337 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1338 |
vdev->host.function); |
1339 |
} |
1340 |
|
1341 |
/*
|
1342 |
* Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
|
1343 |
* config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
|
1344 |
* the MMIO space directly, but a window to this space is provided through
|
1345 |
* I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
|
1346 |
* data register. When the address is programmed to a range of 0x4000-0x4fff
|
1347 |
* PCI configuration space is available. Experimentation seems to indicate
|
1348 |
* that only read-only access is provided, but we drop writes when the window
|
1349 |
* is enabled to config space nonetheless.
|
1350 |
*/
|
1351 |
static void vfio_probe_ati_bar4_window_quirk(VFIODevice *vdev, int nr) |
1352 |
{ |
1353 |
PCIDevice *pdev = &vdev->pdev; |
1354 |
VFIOQuirk *quirk; |
1355 |
|
1356 |
if (!vdev->has_vga || nr != 4 || |
1357 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1358 |
return;
|
1359 |
} |
1360 |
|
1361 |
quirk = g_malloc0(sizeof(*quirk));
|
1362 |
quirk->vdev = vdev; |
1363 |
quirk->data.address_size = 4;
|
1364 |
quirk->data.data_offset = 4;
|
1365 |
quirk->data.data_size = 4;
|
1366 |
quirk->data.address_match = 0x4000;
|
1367 |
quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
|
1368 |
quirk->data.bar = nr; |
1369 |
quirk->data.read_flags = quirk->data.write_flags = 1;
|
1370 |
|
1371 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), |
1372 |
&vfio_generic_window_quirk, quirk, |
1373 |
"vfio-ati-bar4-window-quirk", 8); |
1374 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1375 |
quirk->data.base_offset, &quirk->mem, 1);
|
1376 |
|
1377 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1378 |
|
1379 |
DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n",
|
1380 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1381 |
vdev->host.function); |
1382 |
} |
1383 |
|
1384 |
/*
|
1385 |
* Trap the BAR2 MMIO window to config space as well.
|
1386 |
*/
|
1387 |
static void vfio_probe_ati_bar2_4000_quirk(VFIODevice *vdev, int nr) |
1388 |
{ |
1389 |
PCIDevice *pdev = &vdev->pdev; |
1390 |
VFIOQuirk *quirk; |
1391 |
|
1392 |
/* Only enable on newer devices where BAR2 is 64bit */
|
1393 |
if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 || |
1394 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1395 |
return;
|
1396 |
} |
1397 |
|
1398 |
quirk = g_malloc0(sizeof(*quirk));
|
1399 |
quirk->vdev = vdev; |
1400 |
quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
|
1401 |
quirk->data.address_match = 0x4000;
|
1402 |
quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
|
1403 |
quirk->data.bar = nr; |
1404 |
|
1405 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, |
1406 |
"vfio-ati-bar2-4000-quirk",
|
1407 |
TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
|
1408 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1409 |
quirk->data.address_match & TARGET_PAGE_MASK, |
1410 |
&quirk->mem, 1);
|
1411 |
|
1412 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1413 |
|
1414 |
DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n",
|
1415 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1416 |
vdev->host.function); |
1417 |
} |
1418 |
|
1419 |
/*
|
1420 |
* Older ATI/AMD cards like the X550 have a similar window to that above.
|
1421 |
* I/O port BAR1 provides a window to a mirror of PCI config space located
|
1422 |
* in BAR2 at offset 0xf00. We don't care to support such older cards, but
|
1423 |
* note it for future reference.
|
1424 |
*/
|
1425 |
|
1426 |
#define PCI_VENDOR_ID_NVIDIA 0x10de |
1427 |
|
1428 |
/*
|
1429 |
* Nvidia has several different methods to get to config space, the
|
1430 |
* nouveu project has several of these documented here:
|
1431 |
* https://github.com/pathscale/envytools/tree/master/hwdocs
|
1432 |
*
|
1433 |
* The first quirk is actually not documented in envytools and is found
|
1434 |
* on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
|
1435 |
* NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
|
1436 |
* the mirror of PCI config space found at BAR0 offset 0x1800. The access
|
1437 |
* sequence first writes 0x338 to I/O port 0x3d4. The target offset is
|
1438 |
* then written to 0x3d0. Finally 0x538 is written for a read and 0x738
|
1439 |
* is written for a write to 0x3d4. The BAR0 offset is then accessible
|
1440 |
* through 0x3d0. This quirk doesn't seem to be necessary on newer cards
|
1441 |
* that use the I/O port BAR5 window but it doesn't hurt to leave it.
|
1442 |
*/
|
1443 |
enum {
|
1444 |
NV_3D0_NONE = 0,
|
1445 |
NV_3D0_SELECT, |
1446 |
NV_3D0_WINDOW, |
1447 |
NV_3D0_READ, |
1448 |
NV_3D0_WRITE, |
1449 |
}; |
1450 |
|
1451 |
static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, |
1452 |
hwaddr addr, unsigned size)
|
1453 |
{ |
1454 |
VFIOQuirk *quirk = opaque; |
1455 |
VFIODevice *vdev = quirk->vdev; |
1456 |
PCIDevice *pdev = &vdev->pdev; |
1457 |
uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1458 |
addr + quirk->data.base_offset, size); |
1459 |
|
1460 |
if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) {
|
1461 |
data = vfio_pci_read_config(pdev, quirk->data.address_val, size); |
1462 |
DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data); |
1463 |
} |
1464 |
|
1465 |
quirk->data.flags = NV_3D0_NONE; |
1466 |
|
1467 |
return data;
|
1468 |
} |
1469 |
|
1470 |
static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, |
1471 |
uint64_t data, unsigned size)
|
1472 |
{ |
1473 |
VFIOQuirk *quirk = opaque; |
1474 |
VFIODevice *vdev = quirk->vdev; |
1475 |
PCIDevice *pdev = &vdev->pdev; |
1476 |
|
1477 |
switch (quirk->data.flags) {
|
1478 |
case NV_3D0_NONE:
|
1479 |
if (addr == quirk->data.address_offset && data == 0x338) { |
1480 |
quirk->data.flags = NV_3D0_SELECT; |
1481 |
} |
1482 |
break;
|
1483 |
case NV_3D0_SELECT:
|
1484 |
quirk->data.flags = NV_3D0_NONE; |
1485 |
if (addr == quirk->data.data_offset &&
|
1486 |
(data & ~quirk->data.address_mask) == quirk->data.address_match) { |
1487 |
quirk->data.flags = NV_3D0_WINDOW; |
1488 |
quirk->data.address_val = data & quirk->data.address_mask; |
1489 |
} |
1490 |
break;
|
1491 |
case NV_3D0_WINDOW:
|
1492 |
quirk->data.flags = NV_3D0_NONE; |
1493 |
if (addr == quirk->data.address_offset) {
|
1494 |
if (data == 0x538) { |
1495 |
quirk->data.flags = NV_3D0_READ; |
1496 |
} else if (data == 0x738) { |
1497 |
quirk->data.flags = NV_3D0_WRITE; |
1498 |
} |
1499 |
} |
1500 |
break;
|
1501 |
case NV_3D0_WRITE:
|
1502 |
quirk->data.flags = NV_3D0_NONE; |
1503 |
if (addr == quirk->data.data_offset) {
|
1504 |
vfio_pci_write_config(pdev, quirk->data.address_val, data, size); |
1505 |
DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size); |
1506 |
return;
|
1507 |
} |
1508 |
break;
|
1509 |
} |
1510 |
|
1511 |
vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1512 |
addr + quirk->data.base_offset, data, size); |
1513 |
} |
1514 |
|
1515 |
static const MemoryRegionOps vfio_nvidia_3d0_quirk = { |
1516 |
.read = vfio_nvidia_3d0_quirk_read, |
1517 |
.write = vfio_nvidia_3d0_quirk_write, |
1518 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1519 |
}; |
1520 |
|
1521 |
static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev) |
1522 |
{ |
1523 |
PCIDevice *pdev = &vdev->pdev; |
1524 |
VFIOQuirk *quirk; |
1525 |
|
1526 |
if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
|
1527 |
!vdev->bars[1].size) {
|
1528 |
return;
|
1529 |
} |
1530 |
|
1531 |
quirk = g_malloc0(sizeof(*quirk));
|
1532 |
quirk->vdev = vdev; |
1533 |
quirk->data.base_offset = 0x10;
|
1534 |
quirk->data.address_offset = 4;
|
1535 |
quirk->data.address_size = 2;
|
1536 |
quirk->data.address_match = 0x1800;
|
1537 |
quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
|
1538 |
quirk->data.data_offset = 0;
|
1539 |
quirk->data.data_size = 4;
|
1540 |
|
1541 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk, |
1542 |
quirk, "vfio-nvidia-3d0-quirk", 6); |
1543 |
memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
1544 |
quirk->data.base_offset, &quirk->mem); |
1545 |
|
1546 |
QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1547 |
quirk, next); |
1548 |
|
1549 |
DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
|
1550 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1551 |
vdev->host.function); |
1552 |
} |
1553 |
|
1554 |
/*
|
1555 |
* The second quirk is documented in envytools. The I/O port BAR5 is just
|
1556 |
* a set of address/data ports to the MMIO BARs. The BAR we care about is
|
1557 |
* again BAR0. This backdoor is apparently a bit newer than the one above
|
1558 |
* so we need to not only trap 256 bytes @0x1800, but all of PCI config
|
1559 |
* space, including extended space is available at the 4k @0x88000.
|
1560 |
*/
|
1561 |
enum {
|
1562 |
NV_BAR5_ADDRESS = 0x1,
|
1563 |
NV_BAR5_ENABLE = 0x2,
|
1564 |
NV_BAR5_MASTER = 0x4,
|
1565 |
NV_BAR5_VALID = 0x7,
|
1566 |
}; |
1567 |
|
1568 |
static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr, |
1569 |
uint64_t data, unsigned size)
|
1570 |
{ |
1571 |
VFIOQuirk *quirk = opaque; |
1572 |
|
1573 |
switch (addr) {
|
1574 |
case 0x0: |
1575 |
if (data & 0x1) { |
1576 |
quirk->data.flags |= NV_BAR5_MASTER; |
1577 |
} else {
|
1578 |
quirk->data.flags &= ~NV_BAR5_MASTER; |
1579 |
} |
1580 |
break;
|
1581 |
case 0x4: |
1582 |
if (data & 0x1) { |
1583 |
quirk->data.flags |= NV_BAR5_ENABLE; |
1584 |
} else {
|
1585 |
quirk->data.flags &= ~NV_BAR5_ENABLE; |
1586 |
} |
1587 |
break;
|
1588 |
case 0x8: |
1589 |
if (quirk->data.flags & NV_BAR5_MASTER) {
|
1590 |
if ((data & ~0xfff) == 0x88000) { |
1591 |
quirk->data.flags |= NV_BAR5_ADDRESS; |
1592 |
quirk->data.address_val = data & 0xfff;
|
1593 |
} else if ((data & ~0xff) == 0x1800) { |
1594 |
quirk->data.flags |= NV_BAR5_ADDRESS; |
1595 |
quirk->data.address_val = data & 0xff;
|
1596 |
} else {
|
1597 |
quirk->data.flags &= ~NV_BAR5_ADDRESS; |
1598 |
} |
1599 |
} |
1600 |
break;
|
1601 |
} |
1602 |
|
1603 |
vfio_generic_window_quirk_write(opaque, addr, data, size); |
1604 |
} |
1605 |
|
1606 |
static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = { |
1607 |
.read = vfio_generic_window_quirk_read, |
1608 |
.write = vfio_nvidia_bar5_window_quirk_write, |
1609 |
.valid.min_access_size = 4,
|
1610 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1611 |
}; |
1612 |
|
1613 |
static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr) |
1614 |
{ |
1615 |
PCIDevice *pdev = &vdev->pdev; |
1616 |
VFIOQuirk *quirk; |
1617 |
|
1618 |
if (!vdev->has_vga || nr != 5 || |
1619 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1620 |
return;
|
1621 |
} |
1622 |
|
1623 |
quirk = g_malloc0(sizeof(*quirk));
|
1624 |
quirk->vdev = vdev; |
1625 |
quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID; |
1626 |
quirk->data.address_offset = 0x8;
|
1627 |
quirk->data.address_size = 0; /* actually 4, but avoids generic code */ |
1628 |
quirk->data.data_offset = 0xc;
|
1629 |
quirk->data.data_size = 4;
|
1630 |
quirk->data.bar = nr; |
1631 |
|
1632 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), |
1633 |
&vfio_nvidia_bar5_window_quirk, quirk, |
1634 |
"vfio-nvidia-bar5-window-quirk", 16); |
1635 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); |
1636 |
|
1637 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1638 |
|
1639 |
DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
|
1640 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1641 |
vdev->host.function); |
1642 |
} |
1643 |
|
1644 |
/*
|
1645 |
* Finally, BAR0 itself. We want to redirect any accesses to either
|
1646 |
* 0x1800 or 0x88000 through the PCI config space access functions.
|
1647 |
*
|
1648 |
* NB - quirk at a page granularity or else they don't seem to work when
|
1649 |
* BARs are mmap'd
|
1650 |
*
|
1651 |
* Here's offset 0x88000...
|
1652 |
*/
|
1653 |
static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr) |
1654 |
{ |
1655 |
PCIDevice *pdev = &vdev->pdev; |
1656 |
VFIOQuirk *quirk; |
1657 |
|
1658 |
if (!vdev->has_vga || nr != 0 || |
1659 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1660 |
return;
|
1661 |
} |
1662 |
|
1663 |
quirk = g_malloc0(sizeof(*quirk));
|
1664 |
quirk->vdev = vdev; |
1665 |
quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
|
1666 |
quirk->data.address_match = 0x88000;
|
1667 |
quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
|
1668 |
quirk->data.bar = nr; |
1669 |
|
1670 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, |
1671 |
quirk, "vfio-nvidia-bar0-88000-quirk",
|
1672 |
TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
|
1673 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1674 |
quirk->data.address_match & TARGET_PAGE_MASK, |
1675 |
&quirk->mem, 1);
|
1676 |
|
1677 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1678 |
|
1679 |
DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
|
1680 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1681 |
vdev->host.function); |
1682 |
} |
1683 |
|
1684 |
/*
|
1685 |
* And here's the same for BAR0 offset 0x1800...
|
1686 |
*/
|
1687 |
static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr) |
1688 |
{ |
1689 |
PCIDevice *pdev = &vdev->pdev; |
1690 |
VFIOQuirk *quirk; |
1691 |
|
1692 |
if (!vdev->has_vga || nr != 0 || |
1693 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1694 |
return;
|
1695 |
} |
1696 |
|
1697 |
/* Log the chipset ID */
|
1698 |
DPRINTF("Nvidia NV%02x\n",
|
1699 |
(unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff); |
1700 |
|
1701 |
quirk = g_malloc0(sizeof(*quirk));
|
1702 |
quirk->vdev = vdev; |
1703 |
quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
|
1704 |
quirk->data.address_match = 0x1800;
|
1705 |
quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
|
1706 |
quirk->data.bar = nr; |
1707 |
|
1708 |
memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, |
1709 |
"vfio-nvidia-bar0-1800-quirk",
|
1710 |
TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
|
1711 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1712 |
quirk->data.address_match & TARGET_PAGE_MASK, |
1713 |
&quirk->mem, 1);
|
1714 |
|
1715 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1716 |
|
1717 |
DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
|
1718 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1719 |
vdev->host.function); |
1720 |
} |
1721 |
|
1722 |
/*
|
1723 |
* TODO - Some Nvidia devices provide config access to their companion HDA
|
1724 |
* device and even to their parent bridge via these config space mirrors.
|
1725 |
* Add quirks for those regions.
|
1726 |
*/
|
1727 |
|
1728 |
/*
|
1729 |
* Common quirk probe entry points.
|
1730 |
*/
|
1731 |
static void vfio_vga_quirk_setup(VFIODevice *vdev) |
1732 |
{ |
1733 |
vfio_vga_probe_ati_3c3_quirk(vdev); |
1734 |
vfio_vga_probe_nvidia_3d0_quirk(vdev); |
1735 |
} |
1736 |
|
1737 |
static void vfio_vga_quirk_teardown(VFIODevice *vdev) |
1738 |
{ |
1739 |
int i;
|
1740 |
|
1741 |
for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { |
1742 |
while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
|
1743 |
VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); |
1744 |
memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); |
1745 |
QLIST_REMOVE(quirk, next); |
1746 |
g_free(quirk); |
1747 |
} |
1748 |
} |
1749 |
} |
1750 |
|
1751 |
static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr) |
1752 |
{ |
1753 |
vfio_probe_ati_bar4_window_quirk(vdev, nr); |
1754 |
vfio_probe_ati_bar2_4000_quirk(vdev, nr); |
1755 |
vfio_probe_nvidia_bar5_window_quirk(vdev, nr); |
1756 |
vfio_probe_nvidia_bar0_88000_quirk(vdev, nr); |
1757 |
vfio_probe_nvidia_bar0_1800_quirk(vdev, nr); |
1758 |
} |
1759 |
|
1760 |
static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr) |
1761 |
{ |
1762 |
VFIOBAR *bar = &vdev->bars[nr]; |
1763 |
|
1764 |
while (!QLIST_EMPTY(&bar->quirks)) {
|
1765 |
VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); |
1766 |
memory_region_del_subregion(&bar->mem, &quirk->mem); |
1767 |
QLIST_REMOVE(quirk, next); |
1768 |
g_free(quirk); |
1769 |
} |
1770 |
} |
1771 |
|
1772 |
/*
|
1773 |
* PCI config space
|
1774 |
*/
|
1775 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) |
1776 |
{ |
1777 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
1778 |
uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; |
1779 |
|
1780 |
memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); |
1781 |
emu_bits = le32_to_cpu(emu_bits); |
1782 |
|
1783 |
if (emu_bits) {
|
1784 |
emu_val = pci_default_read_config(pdev, addr, len); |
1785 |
} |
1786 |
|
1787 |
if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { |
1788 |
ssize_t ret; |
1789 |
|
1790 |
ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr); |
1791 |
if (ret != len) {
|
1792 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
|
1793 |
__func__, vdev->host.domain, vdev->host.bus, |
1794 |
vdev->host.slot, vdev->host.function, addr, len); |
1795 |
return -errno;
|
1796 |
} |
1797 |
phys_val = le32_to_cpu(phys_val); |
1798 |
} |
1799 |
|
1800 |
val = (emu_val & emu_bits) | (phys_val & ~emu_bits); |
1801 |
|
1802 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
|
1803 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1804 |
vdev->host.function, addr, len, val); |
1805 |
|
1806 |
return val;
|
1807 |
} |
1808 |
|
1809 |
static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
1810 |
uint32_t val, int len)
|
1811 |
{ |
1812 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
1813 |
uint32_t val_le = cpu_to_le32(val); |
1814 |
|
1815 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
|
1816 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1817 |
vdev->host.function, addr, val, len); |
1818 |
|
1819 |
/* Write everything to VFIO, let it filter out what we can't write */
|
1820 |
if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
|
1821 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
|
1822 |
__func__, vdev->host.domain, vdev->host.bus, |
1823 |
vdev->host.slot, vdev->host.function, addr, val, len); |
1824 |
} |
1825 |
|
1826 |
/* MSI/MSI-X Enabling/Disabling */
|
1827 |
if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
|
1828 |
ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { |
1829 |
int is_enabled, was_enabled = msi_enabled(pdev);
|
1830 |
|
1831 |
pci_default_write_config(pdev, addr, val, len); |
1832 |
|
1833 |
is_enabled = msi_enabled(pdev); |
1834 |
|
1835 |
if (!was_enabled && is_enabled) {
|
1836 |
vfio_enable_msi(vdev); |
1837 |
} else if (was_enabled && !is_enabled) { |
1838 |
vfio_disable_msi(vdev); |
1839 |
} |
1840 |
} else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && |
1841 |
ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
1842 |
int is_enabled, was_enabled = msix_enabled(pdev);
|
1843 |
|
1844 |
pci_default_write_config(pdev, addr, val, len); |
1845 |
|
1846 |
is_enabled = msix_enabled(pdev); |
1847 |
|
1848 |
if (!was_enabled && is_enabled) {
|
1849 |
vfio_enable_msix(vdev); |
1850 |
} else if (was_enabled && !is_enabled) { |
1851 |
vfio_disable_msix(vdev); |
1852 |
} |
1853 |
} else {
|
1854 |
/* Write everything to QEMU to keep emulated bits correct */
|
1855 |
pci_default_write_config(pdev, addr, val, len); |
1856 |
} |
1857 |
} |
1858 |
|
1859 |
/*
|
1860 |
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
|
1861 |
*/
|
1862 |
static int vfio_dma_unmap(VFIOContainer *container, |
1863 |
hwaddr iova, ram_addr_t size) |
1864 |
{ |
1865 |
struct vfio_iommu_type1_dma_unmap unmap = {
|
1866 |
.argsz = sizeof(unmap),
|
1867 |
.flags = 0,
|
1868 |
.iova = iova, |
1869 |
.size = size, |
1870 |
}; |
1871 |
|
1872 |
if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
1873 |
DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
|
1874 |
return -errno;
|
1875 |
} |
1876 |
|
1877 |
return 0; |
1878 |
} |
1879 |
|
1880 |
static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
1881 |
ram_addr_t size, void *vaddr, bool readonly) |
1882 |
{ |
1883 |
struct vfio_iommu_type1_dma_map map = {
|
1884 |
.argsz = sizeof(map),
|
1885 |
.flags = VFIO_DMA_MAP_FLAG_READ, |
1886 |
.vaddr = (__u64)(uintptr_t)vaddr, |
1887 |
.iova = iova, |
1888 |
.size = size, |
1889 |
}; |
1890 |
|
1891 |
if (!readonly) {
|
1892 |
map.flags |= VFIO_DMA_MAP_FLAG_WRITE; |
1893 |
} |
1894 |
|
1895 |
/*
|
1896 |
* Try the mapping, if it fails with EBUSY, unmap the region and try
|
1897 |
* again. This shouldn't be necessary, but we sometimes see it in
|
1898 |
* the the VGA ROM space.
|
1899 |
*/
|
1900 |
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || |
1901 |
(errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
|
1902 |
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
|
1903 |
return 0; |
1904 |
} |
1905 |
|
1906 |
DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
|
1907 |
return -errno;
|
1908 |
} |
1909 |
|
1910 |
static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
1911 |
{ |
1912 |
return !memory_region_is_ram(section->mr);
|
1913 |
} |
1914 |
|
1915 |
static void vfio_listener_region_add(MemoryListener *listener, |
1916 |
MemoryRegionSection *section) |
1917 |
{ |
1918 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1919 |
iommu_data.listener); |
1920 |
hwaddr iova, end; |
1921 |
void *vaddr;
|
1922 |
int ret;
|
1923 |
|
1924 |
assert(!memory_region_is_iommu(section->mr)); |
1925 |
|
1926 |
if (vfio_listener_skipped_section(section)) {
|
1927 |
DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n", |
1928 |
section->offset_within_address_space, |
1929 |
section->offset_within_address_space + section->size - 1);
|
1930 |
return;
|
1931 |
} |
1932 |
|
1933 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1934 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1935 |
error_report("%s received unaligned region", __func__);
|
1936 |
return;
|
1937 |
} |
1938 |
|
1939 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
1940 |
end = (section->offset_within_address_space + int128_get64(section->size)) & |
1941 |
TARGET_PAGE_MASK; |
1942 |
|
1943 |
if (iova >= end) {
|
1944 |
return;
|
1945 |
} |
1946 |
|
1947 |
vaddr = memory_region_get_ram_ptr(section->mr) + |
1948 |
section->offset_within_region + |
1949 |
(iova - section->offset_within_address_space); |
1950 |
|
1951 |
DPRINTF("region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n", |
1952 |
iova, end - 1, vaddr);
|
1953 |
|
1954 |
memory_region_ref(section->mr); |
1955 |
ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); |
1956 |
if (ret) {
|
1957 |
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
1958 |
"0x%"HWADDR_PRIx", %p) = %d (%m)", |
1959 |
container, iova, end - iova, vaddr, ret); |
1960 |
} |
1961 |
} |
1962 |
|
1963 |
static void vfio_listener_region_del(MemoryListener *listener, |
1964 |
MemoryRegionSection *section) |
1965 |
{ |
1966 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1967 |
iommu_data.listener); |
1968 |
hwaddr iova, end; |
1969 |
int ret;
|
1970 |
|
1971 |
if (vfio_listener_skipped_section(section)) {
|
1972 |
DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", |
1973 |
section->offset_within_address_space, |
1974 |
section->offset_within_address_space + section->size - 1);
|
1975 |
return;
|
1976 |
} |
1977 |
|
1978 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1979 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1980 |
error_report("%s received unaligned region", __func__);
|
1981 |
return;
|
1982 |
} |
1983 |
|
1984 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
1985 |
end = (section->offset_within_address_space + int128_get64(section->size)) & |
1986 |
TARGET_PAGE_MASK; |
1987 |
|
1988 |
if (iova >= end) {
|
1989 |
return;
|
1990 |
} |
1991 |
|
1992 |
DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
1993 |
iova, end - 1);
|
1994 |
|
1995 |
ret = vfio_dma_unmap(container, iova, end - iova); |
1996 |
memory_region_unref(section->mr); |
1997 |
if (ret) {
|
1998 |
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " |
1999 |
"0x%"HWADDR_PRIx") = %d (%m)", |
2000 |
container, iova, end - iova, ret); |
2001 |
} |
2002 |
} |
2003 |
|
2004 |
static MemoryListener vfio_memory_listener = {
|
2005 |
.region_add = vfio_listener_region_add, |
2006 |
.region_del = vfio_listener_region_del, |
2007 |
}; |
2008 |
|
2009 |
static void vfio_listener_release(VFIOContainer *container) |
2010 |
{ |
2011 |
memory_listener_unregister(&container->iommu_data.listener); |
2012 |
} |
2013 |
|
2014 |
/*
|
2015 |
* Interrupt setup
|
2016 |
*/
|
2017 |
static void vfio_disable_interrupts(VFIODevice *vdev) |
2018 |
{ |
2019 |
switch (vdev->interrupt) {
|
2020 |
case VFIO_INT_INTx:
|
2021 |
vfio_disable_intx(vdev); |
2022 |
break;
|
2023 |
case VFIO_INT_MSI:
|
2024 |
vfio_disable_msi(vdev); |
2025 |
break;
|
2026 |
case VFIO_INT_MSIX:
|
2027 |
vfio_disable_msix(vdev); |
2028 |
break;
|
2029 |
} |
2030 |
} |
2031 |
|
2032 |
static int vfio_setup_msi(VFIODevice *vdev, int pos) |
2033 |
{ |
2034 |
uint16_t ctrl; |
2035 |
bool msi_64bit, msi_maskbit;
|
2036 |
int ret, entries;
|
2037 |
|
2038 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2039 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
2040 |
return -errno;
|
2041 |
} |
2042 |
ctrl = le16_to_cpu(ctrl); |
2043 |
|
2044 |
msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); |
2045 |
msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); |
2046 |
entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); |
2047 |
|
2048 |
DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
|
2049 |
vdev->host.bus, vdev->host.slot, vdev->host.function, pos); |
2050 |
|
2051 |
ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); |
2052 |
if (ret < 0) { |
2053 |
if (ret == -ENOTSUP) {
|
2054 |
return 0; |
2055 |
} |
2056 |
error_report("vfio: msi_init failed");
|
2057 |
return ret;
|
2058 |
} |
2059 |
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); |
2060 |
|
2061 |
return 0; |
2062 |
} |
2063 |
|
2064 |
/*
|
2065 |
* We don't have any control over how pci_add_capability() inserts
|
2066 |
* capabilities into the chain. In order to setup MSI-X we need a
|
2067 |
* MemoryRegion for the BAR. In order to setup the BAR and not
|
2068 |
* attempt to mmap the MSI-X table area, which VFIO won't allow, we
|
2069 |
* need to first look for where the MSI-X table lives. So we
|
2070 |
* unfortunately split MSI-X setup across two functions.
|
2071 |
*/
|
2072 |
static int vfio_early_setup_msix(VFIODevice *vdev) |
2073 |
{ |
2074 |
uint8_t pos; |
2075 |
uint16_t ctrl; |
2076 |
uint32_t table, pba; |
2077 |
|
2078 |
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); |
2079 |
if (!pos) {
|
2080 |
return 0; |
2081 |
} |
2082 |
|
2083 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2084 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
2085 |
return -errno;
|
2086 |
} |
2087 |
|
2088 |
if (pread(vdev->fd, &table, sizeof(table), |
2089 |
vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
|
2090 |
return -errno;
|
2091 |
} |
2092 |
|
2093 |
if (pread(vdev->fd, &pba, sizeof(pba), |
2094 |
vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
|
2095 |
return -errno;
|
2096 |
} |
2097 |
|
2098 |
ctrl = le16_to_cpu(ctrl); |
2099 |
table = le32_to_cpu(table); |
2100 |
pba = le32_to_cpu(pba); |
2101 |
|
2102 |
vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
|
2103 |
vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; |
2104 |
vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; |
2105 |
vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; |
2106 |
vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; |
2107 |
vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
|
2108 |
|
2109 |
DPRINTF("%04x:%02x:%02x.%x "
|
2110 |
"PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
|
2111 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2112 |
vdev->host.function, pos, vdev->msix->table_bar, |
2113 |
vdev->msix->table_offset, vdev->msix->entries); |
2114 |
|
2115 |
return 0; |
2116 |
} |
2117 |
|
2118 |
static int vfio_setup_msix(VFIODevice *vdev, int pos) |
2119 |
{ |
2120 |
int ret;
|
2121 |
|
2122 |
ret = msix_init(&vdev->pdev, vdev->msix->entries, |
2123 |
&vdev->bars[vdev->msix->table_bar].mem, |
2124 |
vdev->msix->table_bar, vdev->msix->table_offset, |
2125 |
&vdev->bars[vdev->msix->pba_bar].mem, |
2126 |
vdev->msix->pba_bar, vdev->msix->pba_offset, pos); |
2127 |
if (ret < 0) { |
2128 |
if (ret == -ENOTSUP) {
|
2129 |
return 0; |
2130 |
} |
2131 |
error_report("vfio: msix_init failed");
|
2132 |
return ret;
|
2133 |
} |
2134 |
|
2135 |
return 0; |
2136 |
} |
2137 |
|
2138 |
static void vfio_teardown_msi(VFIODevice *vdev) |
2139 |
{ |
2140 |
msi_uninit(&vdev->pdev); |
2141 |
|
2142 |
if (vdev->msix) {
|
2143 |
msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem, |
2144 |
&vdev->bars[vdev->msix->pba_bar].mem); |
2145 |
} |
2146 |
} |
2147 |
|
2148 |
/*
|
2149 |
* Resource setup
|
2150 |
*/
|
2151 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled) |
2152 |
{ |
2153 |
int i;
|
2154 |
|
2155 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2156 |
VFIOBAR *bar = &vdev->bars[i]; |
2157 |
|
2158 |
if (!bar->size) {
|
2159 |
continue;
|
2160 |
} |
2161 |
|
2162 |
memory_region_set_enabled(&bar->mmap_mem, enabled); |
2163 |
if (vdev->msix && vdev->msix->table_bar == i) {
|
2164 |
memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); |
2165 |
} |
2166 |
} |
2167 |
} |
2168 |
|
2169 |
static void vfio_unmap_bar(VFIODevice *vdev, int nr) |
2170 |
{ |
2171 |
VFIOBAR *bar = &vdev->bars[nr]; |
2172 |
|
2173 |
if (!bar->size) {
|
2174 |
return;
|
2175 |
} |
2176 |
|
2177 |
vfio_bar_quirk_teardown(vdev, nr); |
2178 |
|
2179 |
memory_region_del_subregion(&bar->mem, &bar->mmap_mem); |
2180 |
munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); |
2181 |
|
2182 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2183 |
memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); |
2184 |
munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); |
2185 |
} |
2186 |
|
2187 |
memory_region_destroy(&bar->mem); |
2188 |
} |
2189 |
|
2190 |
static int vfio_mmap_bar(VFIODevice *vdev, VFIOBAR *bar, |
2191 |
MemoryRegion *mem, MemoryRegion *submem, |
2192 |
void **map, size_t size, off_t offset,
|
2193 |
const char *name) |
2194 |
{ |
2195 |
int ret = 0; |
2196 |
|
2197 |
if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
|
2198 |
int prot = 0; |
2199 |
|
2200 |
if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
|
2201 |
prot |= PROT_READ; |
2202 |
} |
2203 |
|
2204 |
if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
|
2205 |
prot |= PROT_WRITE; |
2206 |
} |
2207 |
|
2208 |
*map = mmap(NULL, size, prot, MAP_SHARED,
|
2209 |
bar->fd, bar->fd_offset + offset); |
2210 |
if (*map == MAP_FAILED) {
|
2211 |
*map = NULL;
|
2212 |
ret = -errno; |
2213 |
goto empty_region;
|
2214 |
} |
2215 |
|
2216 |
memory_region_init_ram_ptr(submem, OBJECT(vdev), name, size, *map); |
2217 |
} else {
|
2218 |
empty_region:
|
2219 |
/* Create a zero sized sub-region to make cleanup easy. */
|
2220 |
memory_region_init(submem, OBJECT(vdev), name, 0);
|
2221 |
} |
2222 |
|
2223 |
memory_region_add_subregion(mem, offset, submem); |
2224 |
|
2225 |
return ret;
|
2226 |
} |
2227 |
|
2228 |
static void vfio_map_bar(VFIODevice *vdev, int nr) |
2229 |
{ |
2230 |
VFIOBAR *bar = &vdev->bars[nr]; |
2231 |
unsigned size = bar->size;
|
2232 |
char name[64]; |
2233 |
uint32_t pci_bar; |
2234 |
uint8_t type; |
2235 |
int ret;
|
2236 |
|
2237 |
/* Skip both unimplemented BARs and the upper half of 64bit BARS. */
|
2238 |
if (!size) {
|
2239 |
return;
|
2240 |
} |
2241 |
|
2242 |
snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", |
2243 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2244 |
vdev->host.function, nr); |
2245 |
|
2246 |
/* Determine what type of BAR this is for registration */
|
2247 |
ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
|
2248 |
vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
|
2249 |
if (ret != sizeof(pci_bar)) { |
2250 |
error_report("vfio: Failed to read BAR %d (%m)", nr);
|
2251 |
return;
|
2252 |
} |
2253 |
|
2254 |
pci_bar = le32_to_cpu(pci_bar); |
2255 |
bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); |
2256 |
bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
|
2257 |
type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : |
2258 |
~PCI_BASE_ADDRESS_MEM_MASK); |
2259 |
|
2260 |
/* A "slow" read/write mapping underlies all BARs */
|
2261 |
memory_region_init_io(&bar->mem, OBJECT(vdev), &vfio_bar_ops, |
2262 |
bar, name, size); |
2263 |
pci_register_bar(&vdev->pdev, nr, type, &bar->mem); |
2264 |
|
2265 |
/*
|
2266 |
* We can't mmap areas overlapping the MSIX vector table, so we
|
2267 |
* potentially insert a direct-mapped subregion before and after it.
|
2268 |
*/
|
2269 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2270 |
size = vdev->msix->table_offset & TARGET_PAGE_MASK; |
2271 |
} |
2272 |
|
2273 |
strncat(name, " mmap", sizeof(name) - strlen(name) - 1); |
2274 |
if (vfio_mmap_bar(vdev, bar, &bar->mem,
|
2275 |
&bar->mmap_mem, &bar->mmap, size, 0, name)) {
|
2276 |
error_report("%s unsupported. Performance may be slow", name);
|
2277 |
} |
2278 |
|
2279 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2280 |
unsigned start;
|
2281 |
|
2282 |
start = TARGET_PAGE_ALIGN(vdev->msix->table_offset + |
2283 |
(vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); |
2284 |
|
2285 |
size = start < bar->size ? bar->size - start : 0;
|
2286 |
strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); |
2287 |
/* VFIOMSIXInfo contains another MemoryRegion for this mapping */
|
2288 |
if (vfio_mmap_bar(vdev, bar, &bar->mem, &vdev->msix->mmap_mem,
|
2289 |
&vdev->msix->mmap, size, start, name)) { |
2290 |
error_report("%s unsupported. Performance may be slow", name);
|
2291 |
} |
2292 |
} |
2293 |
|
2294 |
vfio_bar_quirk_setup(vdev, nr); |
2295 |
} |
2296 |
|
2297 |
static void vfio_map_bars(VFIODevice *vdev) |
2298 |
{ |
2299 |
int i;
|
2300 |
|
2301 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2302 |
vfio_map_bar(vdev, i); |
2303 |
} |
2304 |
|
2305 |
if (vdev->has_vga) {
|
2306 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
2307 |
OBJECT(vdev), &vfio_vga_ops, |
2308 |
&vdev->vga.region[QEMU_PCI_VGA_MEM], |
2309 |
"vfio-vga-mmio@0xa0000",
|
2310 |
QEMU_PCI_VGA_MEM_SIZE); |
2311 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
2312 |
OBJECT(vdev), &vfio_vga_ops, |
2313 |
&vdev->vga.region[QEMU_PCI_VGA_IO_LO], |
2314 |
"vfio-vga-io@0x3b0",
|
2315 |
QEMU_PCI_VGA_IO_LO_SIZE); |
2316 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
2317 |
OBJECT(vdev), &vfio_vga_ops, |
2318 |
&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
2319 |
"vfio-vga-io@0x3c0",
|
2320 |
QEMU_PCI_VGA_IO_HI_SIZE); |
2321 |
|
2322 |
pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
2323 |
&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
2324 |
&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); |
2325 |
vfio_vga_quirk_setup(vdev); |
2326 |
} |
2327 |
} |
2328 |
|
2329 |
static void vfio_unmap_bars(VFIODevice *vdev) |
2330 |
{ |
2331 |
int i;
|
2332 |
|
2333 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2334 |
vfio_unmap_bar(vdev, i); |
2335 |
} |
2336 |
|
2337 |
if (vdev->has_vga) {
|
2338 |
vfio_vga_quirk_teardown(vdev); |
2339 |
pci_unregister_vga(&vdev->pdev); |
2340 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem); |
2341 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem); |
2342 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); |
2343 |
} |
2344 |
} |
2345 |
|
2346 |
/*
|
2347 |
* General setup
|
2348 |
*/
|
2349 |
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
|
2350 |
{ |
2351 |
uint8_t tmp, next = 0xff;
|
2352 |
|
2353 |
for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
|
2354 |
tmp = pdev->config[tmp + 1]) {
|
2355 |
if (tmp > pos && tmp < next) {
|
2356 |
next = tmp; |
2357 |
} |
2358 |
} |
2359 |
|
2360 |
return next - pos;
|
2361 |
} |
2362 |
|
2363 |
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) |
2364 |
{ |
2365 |
pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); |
2366 |
} |
2367 |
|
2368 |
static void vfio_add_emulated_word(VFIODevice *vdev, int pos, |
2369 |
uint16_t val, uint16_t mask) |
2370 |
{ |
2371 |
vfio_set_word_bits(vdev->pdev.config + pos, val, mask); |
2372 |
vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); |
2373 |
vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); |
2374 |
} |
2375 |
|
2376 |
static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) |
2377 |
{ |
2378 |
pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); |
2379 |
} |
2380 |
|
2381 |
static void vfio_add_emulated_long(VFIODevice *vdev, int pos, |
2382 |
uint32_t val, uint32_t mask) |
2383 |
{ |
2384 |
vfio_set_long_bits(vdev->pdev.config + pos, val, mask); |
2385 |
vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); |
2386 |
vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); |
2387 |
} |
2388 |
|
2389 |
static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size) |
2390 |
{ |
2391 |
uint16_t flags; |
2392 |
uint8_t type; |
2393 |
|
2394 |
flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); |
2395 |
type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
|
2396 |
|
2397 |
if (type != PCI_EXP_TYPE_ENDPOINT &&
|
2398 |
type != PCI_EXP_TYPE_LEG_END && |
2399 |
type != PCI_EXP_TYPE_RC_END) { |
2400 |
|
2401 |
error_report("vfio: Assignment of PCIe type 0x%x "
|
2402 |
"devices is not currently supported", type);
|
2403 |
return -EINVAL;
|
2404 |
} |
2405 |
|
2406 |
if (!pci_bus_is_express(vdev->pdev.bus)) {
|
2407 |
/*
|
2408 |
* Use express capability as-is on PCI bus. It doesn't make much
|
2409 |
* sense to even expose, but some drivers (ex. tg3) depend on it
|
2410 |
* and guests don't seem to be particular about it. We'll need
|
2411 |
* to revist this or force express devices to express buses if we
|
2412 |
* ever expose an IOMMU to the guest.
|
2413 |
*/
|
2414 |
} else if (pci_bus_is_root(vdev->pdev.bus)) { |
2415 |
/*
|
2416 |
* On a Root Complex bus Endpoints become Root Complex Integrated
|
2417 |
* Endpoints, which changes the type and clears the LNK & LNK2 fields.
|
2418 |
*/
|
2419 |
if (type == PCI_EXP_TYPE_ENDPOINT) {
|
2420 |
vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, |
2421 |
PCI_EXP_TYPE_RC_END << 4,
|
2422 |
PCI_EXP_FLAGS_TYPE); |
2423 |
|
2424 |
/* Link Capabilities, Status, and Control goes away */
|
2425 |
if (size > PCI_EXP_LNKCTL) {
|
2426 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); |
2427 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); |
2428 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); |
2429 |
|
2430 |
#ifndef PCI_EXP_LNKCAP2
|
2431 |
#define PCI_EXP_LNKCAP2 44 |
2432 |
#endif
|
2433 |
#ifndef PCI_EXP_LNKSTA2
|
2434 |
#define PCI_EXP_LNKSTA2 50 |
2435 |
#endif
|
2436 |
/* Link 2 Capabilities, Status, and Control goes away */
|
2437 |
if (size > PCI_EXP_LNKCAP2) {
|
2438 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); |
2439 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); |
2440 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); |
2441 |
} |
2442 |
} |
2443 |
|
2444 |
} else if (type == PCI_EXP_TYPE_LEG_END) { |
2445 |
/*
|
2446 |
* Legacy endpoints don't belong on the root complex. Windows
|
2447 |
* seems to be happier with devices if we skip the capability.
|
2448 |
*/
|
2449 |
return 0; |
2450 |
} |
2451 |
|
2452 |
} else {
|
2453 |
/*
|
2454 |
* Convert Root Complex Integrated Endpoints to regular endpoints.
|
2455 |
* These devices don't support LNK/LNK2 capabilities, so make them up.
|
2456 |
*/
|
2457 |
if (type == PCI_EXP_TYPE_RC_END) {
|
2458 |
vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, |
2459 |
PCI_EXP_TYPE_ENDPOINT << 4,
|
2460 |
PCI_EXP_FLAGS_TYPE); |
2461 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, |
2462 |
PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
|
2463 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); |
2464 |
} |
2465 |
|
2466 |
/* Mark the Link Status bits as emulated to allow virtual negotiation */
|
2467 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, |
2468 |
pci_get_word(vdev->pdev.config + pos + |
2469 |
PCI_EXP_LNKSTA), |
2470 |
PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); |
2471 |
} |
2472 |
|
2473 |
pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); |
2474 |
if (pos >= 0) { |
2475 |
vdev->pdev.exp.exp_cap = pos; |
2476 |
} |
2477 |
|
2478 |
return pos;
|
2479 |
} |
2480 |
|
2481 |
static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos) |
2482 |
{ |
2483 |
PCIDevice *pdev = &vdev->pdev; |
2484 |
uint8_t cap_id, next, size; |
2485 |
int ret;
|
2486 |
|
2487 |
cap_id = pdev->config[pos]; |
2488 |
next = pdev->config[pos + 1];
|
2489 |
|
2490 |
/*
|
2491 |
* If it becomes important to configure capabilities to their actual
|
2492 |
* size, use this as the default when it's something we don't recognize.
|
2493 |
* Since QEMU doesn't actually handle many of the config accesses,
|
2494 |
* exact size doesn't seem worthwhile.
|
2495 |
*/
|
2496 |
size = vfio_std_cap_max_size(pdev, pos); |
2497 |
|
2498 |
/*
|
2499 |
* pci_add_capability always inserts the new capability at the head
|
2500 |
* of the chain. Therefore to end up with a chain that matches the
|
2501 |
* physical device, we insert from the end by making this recursive.
|
2502 |
* This is also why we pre-caclulate size above as cached config space
|
2503 |
* will be changed as we unwind the stack.
|
2504 |
*/
|
2505 |
if (next) {
|
2506 |
ret = vfio_add_std_cap(vdev, next); |
2507 |
if (ret) {
|
2508 |
return ret;
|
2509 |
} |
2510 |
} else {
|
2511 |
/* Begin the rebuild, use QEMU emulated list bits */
|
2512 |
pdev->config[PCI_CAPABILITY_LIST] = 0;
|
2513 |
vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
|
2514 |
vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; |
2515 |
} |
2516 |
|
2517 |
/* Use emulated next pointer to allow dropping caps */
|
2518 |
pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); |
2519 |
|
2520 |
switch (cap_id) {
|
2521 |
case PCI_CAP_ID_MSI:
|
2522 |
ret = vfio_setup_msi(vdev, pos); |
2523 |
break;
|
2524 |
case PCI_CAP_ID_EXP:
|
2525 |
ret = vfio_setup_pcie_cap(vdev, pos, size); |
2526 |
break;
|
2527 |
case PCI_CAP_ID_MSIX:
|
2528 |
ret = vfio_setup_msix(vdev, pos); |
2529 |
break;
|
2530 |
case PCI_CAP_ID_PM:
|
2531 |
vdev->pm_cap = pos; |
2532 |
default:
|
2533 |
ret = pci_add_capability(pdev, cap_id, pos, size); |
2534 |
break;
|
2535 |
} |
2536 |
|
2537 |
if (ret < 0) { |
2538 |
error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
|
2539 |
"0x%x[0x%x]@0x%x: %d", vdev->host.domain,
|
2540 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
2541 |
cap_id, size, pos, ret); |
2542 |
return ret;
|
2543 |
} |
2544 |
|
2545 |
return 0; |
2546 |
} |
2547 |
|
2548 |
static int vfio_add_capabilities(VFIODevice *vdev) |
2549 |
{ |
2550 |
PCIDevice *pdev = &vdev->pdev; |
2551 |
|
2552 |
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
|
2553 |
!pdev->config[PCI_CAPABILITY_LIST]) { |
2554 |
return 0; /* Nothing to add */ |
2555 |
} |
2556 |
|
2557 |
return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
|
2558 |
} |
2559 |
|
2560 |
static int vfio_load_rom(VFIODevice *vdev) |
2561 |
{ |
2562 |
uint64_t size = vdev->rom_size; |
2563 |
char name[32]; |
2564 |
off_t off = 0, voff = vdev->rom_offset;
|
2565 |
ssize_t bytes; |
2566 |
void *ptr;
|
2567 |
|
2568 |
/* If loading ROM from file, pci handles it */
|
2569 |
if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) {
|
2570 |
return 0; |
2571 |
} |
2572 |
|
2573 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
2574 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
2575 |
|
2576 |
snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", |
2577 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2578 |
vdev->host.function); |
2579 |
memory_region_init_ram(&vdev->pdev.rom, OBJECT(vdev), name, size); |
2580 |
ptr = memory_region_get_ram_ptr(&vdev->pdev.rom); |
2581 |
memset(ptr, 0xff, size);
|
2582 |
|
2583 |
while (size) {
|
2584 |
bytes = pread(vdev->fd, ptr + off, size, voff + off); |
2585 |
if (bytes == 0) { |
2586 |
break; /* expect that we could get back less than the ROM BAR */ |
2587 |
} else if (bytes > 0) { |
2588 |
off += bytes; |
2589 |
size -= bytes; |
2590 |
} else {
|
2591 |
if (errno == EINTR || errno == EAGAIN) {
|
2592 |
continue;
|
2593 |
} |
2594 |
error_report("vfio: Error reading device ROM: %m");
|
2595 |
memory_region_destroy(&vdev->pdev.rom); |
2596 |
return -errno;
|
2597 |
} |
2598 |
} |
2599 |
|
2600 |
pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom);
|
2601 |
vdev->pdev.has_rom = true;
|
2602 |
return 0; |
2603 |
} |
2604 |
|
2605 |
static int vfio_connect_container(VFIOGroup *group) |
2606 |
{ |
2607 |
VFIOContainer *container; |
2608 |
int ret, fd;
|
2609 |
|
2610 |
if (group->container) {
|
2611 |
return 0; |
2612 |
} |
2613 |
|
2614 |
QLIST_FOREACH(container, &container_list, next) { |
2615 |
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
|
2616 |
group->container = container; |
2617 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
2618 |
return 0; |
2619 |
} |
2620 |
} |
2621 |
|
2622 |
fd = qemu_open("/dev/vfio/vfio", O_RDWR);
|
2623 |
if (fd < 0) { |
2624 |
error_report("vfio: failed to open /dev/vfio/vfio: %m");
|
2625 |
return -errno;
|
2626 |
} |
2627 |
|
2628 |
ret = ioctl(fd, VFIO_GET_API_VERSION); |
2629 |
if (ret != VFIO_API_VERSION) {
|
2630 |
error_report("vfio: supported vfio version: %d, "
|
2631 |
"reported version: %d", VFIO_API_VERSION, ret);
|
2632 |
close(fd); |
2633 |
return -EINVAL;
|
2634 |
} |
2635 |
|
2636 |
container = g_malloc0(sizeof(*container));
|
2637 |
container->fd = fd; |
2638 |
|
2639 |
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
|
2640 |
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
2641 |
if (ret) {
|
2642 |
error_report("vfio: failed to set group container: %m");
|
2643 |
g_free(container); |
2644 |
close(fd); |
2645 |
return -errno;
|
2646 |
} |
2647 |
|
2648 |
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); |
2649 |
if (ret) {
|
2650 |
error_report("vfio: failed to set iommu for container: %m");
|
2651 |
g_free(container); |
2652 |
close(fd); |
2653 |
return -errno;
|
2654 |
} |
2655 |
|
2656 |
container->iommu_data.listener = vfio_memory_listener; |
2657 |
container->iommu_data.release = vfio_listener_release; |
2658 |
|
2659 |
memory_listener_register(&container->iommu_data.listener, &address_space_memory); |
2660 |
} else {
|
2661 |
error_report("vfio: No available IOMMU models");
|
2662 |
g_free(container); |
2663 |
close(fd); |
2664 |
return -EINVAL;
|
2665 |
} |
2666 |
|
2667 |
QLIST_INIT(&container->group_list); |
2668 |
QLIST_INSERT_HEAD(&container_list, container, next); |
2669 |
|
2670 |
group->container = container; |
2671 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
2672 |
|
2673 |
return 0; |
2674 |
} |
2675 |
|
2676 |
static void vfio_disconnect_container(VFIOGroup *group) |
2677 |
{ |
2678 |
VFIOContainer *container = group->container; |
2679 |
|
2680 |
if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
|
2681 |
error_report("vfio: error disconnecting group %d from container",
|
2682 |
group->groupid); |
2683 |
} |
2684 |
|
2685 |
QLIST_REMOVE(group, container_next); |
2686 |
group->container = NULL;
|
2687 |
|
2688 |
if (QLIST_EMPTY(&container->group_list)) {
|
2689 |
if (container->iommu_data.release) {
|
2690 |
container->iommu_data.release(container); |
2691 |
} |
2692 |
QLIST_REMOVE(container, next); |
2693 |
DPRINTF("vfio_disconnect_container: close container->fd\n");
|
2694 |
close(container->fd); |
2695 |
g_free(container); |
2696 |
} |
2697 |
} |
2698 |
|
2699 |
static VFIOGroup *vfio_get_group(int groupid) |
2700 |
{ |
2701 |
VFIOGroup *group; |
2702 |
char path[32]; |
2703 |
struct vfio_group_status status = { .argsz = sizeof(status) }; |
2704 |
|
2705 |
QLIST_FOREACH(group, &group_list, next) { |
2706 |
if (group->groupid == groupid) {
|
2707 |
return group;
|
2708 |
} |
2709 |
} |
2710 |
|
2711 |
group = g_malloc0(sizeof(*group));
|
2712 |
|
2713 |
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); |
2714 |
group->fd = qemu_open(path, O_RDWR); |
2715 |
if (group->fd < 0) { |
2716 |
error_report("vfio: error opening %s: %m", path);
|
2717 |
g_free(group); |
2718 |
return NULL; |
2719 |
} |
2720 |
|
2721 |
if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
|
2722 |
error_report("vfio: error getting group status: %m");
|
2723 |
close(group->fd); |
2724 |
g_free(group); |
2725 |
return NULL; |
2726 |
} |
2727 |
|
2728 |
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
|
2729 |
error_report("vfio: error, group %d is not viable, please ensure "
|
2730 |
"all devices within the iommu_group are bound to their "
|
2731 |
"vfio bus driver.", groupid);
|
2732 |
close(group->fd); |
2733 |
g_free(group); |
2734 |
return NULL; |
2735 |
} |
2736 |
|
2737 |
group->groupid = groupid; |
2738 |
QLIST_INIT(&group->device_list); |
2739 |
|
2740 |
if (vfio_connect_container(group)) {
|
2741 |
error_report("vfio: failed to setup container for group %d", groupid);
|
2742 |
close(group->fd); |
2743 |
g_free(group); |
2744 |
return NULL; |
2745 |
} |
2746 |
|
2747 |
QLIST_INSERT_HEAD(&group_list, group, next); |
2748 |
|
2749 |
return group;
|
2750 |
} |
2751 |
|
2752 |
static void vfio_put_group(VFIOGroup *group) |
2753 |
{ |
2754 |
if (!QLIST_EMPTY(&group->device_list)) {
|
2755 |
return;
|
2756 |
} |
2757 |
|
2758 |
vfio_disconnect_container(group); |
2759 |
QLIST_REMOVE(group, next); |
2760 |
DPRINTF("vfio_put_group: close group->fd\n");
|
2761 |
close(group->fd); |
2762 |
g_free(group); |
2763 |
} |
2764 |
|
2765 |
static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) |
2766 |
{ |
2767 |
struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; |
2768 |
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; |
2769 |
int ret, i;
|
2770 |
|
2771 |
ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
2772 |
if (ret < 0) { |
2773 |
error_report("vfio: error getting device %s from group %d: %m",
|
2774 |
name, group->groupid); |
2775 |
error_printf("Verify all devices in group %d are bound to vfio-pci "
|
2776 |
"or pci-stub and not already in use\n", group->groupid);
|
2777 |
return ret;
|
2778 |
} |
2779 |
|
2780 |
vdev->fd = ret; |
2781 |
vdev->group = group; |
2782 |
QLIST_INSERT_HEAD(&group->device_list, vdev, next); |
2783 |
|
2784 |
/* Sanity check device */
|
2785 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); |
2786 |
if (ret) {
|
2787 |
error_report("vfio: error getting device info: %m");
|
2788 |
goto error;
|
2789 |
} |
2790 |
|
2791 |
DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
|
2792 |
dev_info.flags, dev_info.num_regions, dev_info.num_irqs); |
2793 |
|
2794 |
if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
|
2795 |
error_report("vfio: Um, this isn't a PCI device");
|
2796 |
goto error;
|
2797 |
} |
2798 |
|
2799 |
vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); |
2800 |
if (!vdev->reset_works) {
|
2801 |
error_report("Warning, device %s does not support reset", name);
|
2802 |
} |
2803 |
|
2804 |
if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { |
2805 |
error_report("vfio: unexpected number of io regions %u",
|
2806 |
dev_info.num_regions); |
2807 |
goto error;
|
2808 |
} |
2809 |
|
2810 |
if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { |
2811 |
error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs);
|
2812 |
goto error;
|
2813 |
} |
2814 |
|
2815 |
for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
|
2816 |
reg_info.index = i; |
2817 |
|
2818 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2819 |
if (ret) {
|
2820 |
error_report("vfio: Error getting region %d info: %m", i);
|
2821 |
goto error;
|
2822 |
} |
2823 |
|
2824 |
DPRINTF("Device %s region %d:\n", name, i);
|
2825 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2826 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2827 |
(unsigned long)reg_info.flags); |
2828 |
|
2829 |
vdev->bars[i].flags = reg_info.flags; |
2830 |
vdev->bars[i].size = reg_info.size; |
2831 |
vdev->bars[i].fd_offset = reg_info.offset; |
2832 |
vdev->bars[i].fd = vdev->fd; |
2833 |
vdev->bars[i].nr = i; |
2834 |
QLIST_INIT(&vdev->bars[i].quirks); |
2835 |
} |
2836 |
|
2837 |
reg_info.index = VFIO_PCI_ROM_REGION_INDEX; |
2838 |
|
2839 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2840 |
if (ret) {
|
2841 |
error_report("vfio: Error getting ROM info: %m");
|
2842 |
goto error;
|
2843 |
} |
2844 |
|
2845 |
DPRINTF("Device %s ROM:\n", name);
|
2846 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2847 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2848 |
(unsigned long)reg_info.flags); |
2849 |
|
2850 |
vdev->rom_size = reg_info.size; |
2851 |
vdev->rom_offset = reg_info.offset; |
2852 |
|
2853 |
reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
2854 |
|
2855 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2856 |
if (ret) {
|
2857 |
error_report("vfio: Error getting config info: %m");
|
2858 |
goto error;
|
2859 |
} |
2860 |
|
2861 |
DPRINTF("Device %s config:\n", name);
|
2862 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2863 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2864 |
(unsigned long)reg_info.flags); |
2865 |
|
2866 |
vdev->config_size = reg_info.size; |
2867 |
if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
|
2868 |
vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; |
2869 |
} |
2870 |
vdev->config_offset = reg_info.offset; |
2871 |
|
2872 |
if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
|
2873 |
dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) { |
2874 |
struct vfio_region_info vga_info = {
|
2875 |
.argsz = sizeof(vga_info),
|
2876 |
.index = VFIO_PCI_VGA_REGION_INDEX, |
2877 |
}; |
2878 |
|
2879 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); |
2880 |
if (ret) {
|
2881 |
error_report( |
2882 |
"vfio: Device does not support requested feature x-vga");
|
2883 |
goto error;
|
2884 |
} |
2885 |
|
2886 |
if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
|
2887 |
!(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || |
2888 |
vga_info.size < 0xbffff + 1) { |
2889 |
error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
|
2890 |
(unsigned long)vga_info.flags, |
2891 |
(unsigned long)vga_info.size); |
2892 |
goto error;
|
2893 |
} |
2894 |
|
2895 |
vdev->vga.fd_offset = vga_info.offset; |
2896 |
vdev->vga.fd = vdev->fd; |
2897 |
|
2898 |
vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; |
2899 |
vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; |
2900 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); |
2901 |
|
2902 |
vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; |
2903 |
vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; |
2904 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); |
2905 |
|
2906 |
vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; |
2907 |
vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; |
2908 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); |
2909 |
|
2910 |
vdev->has_vga = true;
|
2911 |
} |
2912 |
|
2913 |
error:
|
2914 |
if (ret) {
|
2915 |
QLIST_REMOVE(vdev, next); |
2916 |
vdev->group = NULL;
|
2917 |
close(vdev->fd); |
2918 |
} |
2919 |
return ret;
|
2920 |
} |
2921 |
|
2922 |
static void vfio_put_device(VFIODevice *vdev) |
2923 |
{ |
2924 |
QLIST_REMOVE(vdev, next); |
2925 |
vdev->group = NULL;
|
2926 |
DPRINTF("vfio_put_device: close vdev->fd\n");
|
2927 |
close(vdev->fd); |
2928 |
if (vdev->msix) {
|
2929 |
g_free(vdev->msix); |
2930 |
vdev->msix = NULL;
|
2931 |
} |
2932 |
} |
2933 |
|
2934 |
static int vfio_initfn(PCIDevice *pdev) |
2935 |
{ |
2936 |
VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
2937 |
VFIOGroup *group; |
2938 |
char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
|
2939 |
ssize_t len; |
2940 |
struct stat st;
|
2941 |
int groupid;
|
2942 |
int ret;
|
2943 |
|
2944 |
/* Check that the host device exists */
|
2945 |
snprintf(path, sizeof(path),
|
2946 |
"/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
|
2947 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2948 |
vdev->host.function); |
2949 |
if (stat(path, &st) < 0) { |
2950 |
error_report("vfio: error: no such host device: %s", path);
|
2951 |
return -errno;
|
2952 |
} |
2953 |
|
2954 |
strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); |
2955 |
|
2956 |
len = readlink(path, iommu_group_path, PATH_MAX); |
2957 |
if (len <= 0) { |
2958 |
error_report("vfio: error no iommu_group for device");
|
2959 |
return -errno;
|
2960 |
} |
2961 |
|
2962 |
iommu_group_path[len] = 0;
|
2963 |
group_name = basename(iommu_group_path); |
2964 |
|
2965 |
if (sscanf(group_name, "%d", &groupid) != 1) { |
2966 |
error_report("vfio: error reading %s: %m", path);
|
2967 |
return -errno;
|
2968 |
} |
2969 |
|
2970 |
DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
|
2971 |
vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); |
2972 |
|
2973 |
group = vfio_get_group(groupid); |
2974 |
if (!group) {
|
2975 |
error_report("vfio: failed to get group %d", groupid);
|
2976 |
return -ENOENT;
|
2977 |
} |
2978 |
|
2979 |
snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", |
2980 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2981 |
vdev->host.function); |
2982 |
|
2983 |
QLIST_FOREACH(pvdev, &group->device_list, next) { |
2984 |
if (pvdev->host.domain == vdev->host.domain &&
|
2985 |
pvdev->host.bus == vdev->host.bus && |
2986 |
pvdev->host.slot == vdev->host.slot && |
2987 |
pvdev->host.function == vdev->host.function) { |
2988 |
|
2989 |
error_report("vfio: error: device %s is already attached", path);
|
2990 |
vfio_put_group(group); |
2991 |
return -EBUSY;
|
2992 |
} |
2993 |
} |
2994 |
|
2995 |
ret = vfio_get_device(group, path, vdev); |
2996 |
if (ret) {
|
2997 |
error_report("vfio: failed to get device %s", path);
|
2998 |
vfio_put_group(group); |
2999 |
return ret;
|
3000 |
} |
3001 |
|
3002 |
/* Get a copy of config space */
|
3003 |
ret = pread(vdev->fd, vdev->pdev.config, |
3004 |
MIN(pci_config_size(&vdev->pdev), vdev->config_size), |
3005 |
vdev->config_offset); |
3006 |
if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { |
3007 |
ret = ret < 0 ? -errno : -EFAULT;
|
3008 |
error_report("vfio: Failed to read device config space");
|
3009 |
goto out_put;
|
3010 |
} |
3011 |
|
3012 |
/* vfio emulates a lot for us, but some bits need extra love */
|
3013 |
vdev->emulated_config_bits = g_malloc0(vdev->config_size); |
3014 |
|
3015 |
/* QEMU can choose to expose the ROM or not */
|
3016 |
memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); |
3017 |
|
3018 |
/* QEMU can change multi-function devices to single function, or reverse */
|
3019 |
vdev->emulated_config_bits[PCI_HEADER_TYPE] = |
3020 |
PCI_HEADER_TYPE_MULTI_FUNCTION; |
3021 |
|
3022 |
/*
|
3023 |
* Clear host resource mapping info. If we choose not to register a
|
3024 |
* BAR, such as might be the case with the option ROM, we can get
|
3025 |
* confusing, unwritable, residual addresses from the host here.
|
3026 |
*/
|
3027 |
memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); |
3028 |
memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); |
3029 |
|
3030 |
vfio_load_rom(vdev); |
3031 |
|
3032 |
ret = vfio_early_setup_msix(vdev); |
3033 |
if (ret) {
|
3034 |
goto out_put;
|
3035 |
} |
3036 |
|
3037 |
vfio_map_bars(vdev); |
3038 |
|
3039 |
ret = vfio_add_capabilities(vdev); |
3040 |
if (ret) {
|
3041 |
goto out_teardown;
|
3042 |
} |
3043 |
|
3044 |
/* QEMU emulates all of MSI & MSIX */
|
3045 |
if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
|
3046 |
memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
|
3047 |
MSIX_CAP_LENGTH); |
3048 |
} |
3049 |
|
3050 |
if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
|
3051 |
memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
|
3052 |
vdev->msi_cap_size); |
3053 |
} |
3054 |
|
3055 |
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
3056 |
vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock, |
3057 |
vfio_intx_mmap_enable, vdev); |
3058 |
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); |
3059 |
ret = vfio_enable_intx(vdev); |
3060 |
if (ret) {
|
3061 |
goto out_teardown;
|
3062 |
} |
3063 |
} |
3064 |
|
3065 |
add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL);
|
3066 |
|
3067 |
return 0; |
3068 |
|
3069 |
out_teardown:
|
3070 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
3071 |
vfio_teardown_msi(vdev); |
3072 |
vfio_unmap_bars(vdev); |
3073 |
out_put:
|
3074 |
g_free(vdev->emulated_config_bits); |
3075 |
vfio_put_device(vdev); |
3076 |
vfio_put_group(group); |
3077 |
return ret;
|
3078 |
} |
3079 |
|
3080 |
static void vfio_exitfn(PCIDevice *pdev) |
3081 |
{ |
3082 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
3083 |
VFIOGroup *group = vdev->group; |
3084 |
|
3085 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
3086 |
vfio_disable_interrupts(vdev); |
3087 |
if (vdev->intx.mmap_timer) {
|
3088 |
qemu_free_timer(vdev->intx.mmap_timer); |
3089 |
} |
3090 |
vfio_teardown_msi(vdev); |
3091 |
vfio_unmap_bars(vdev); |
3092 |
g_free(vdev->emulated_config_bits); |
3093 |
vfio_put_device(vdev); |
3094 |
vfio_put_group(group); |
3095 |
} |
3096 |
|
3097 |
static void vfio_pci_reset(DeviceState *dev) |
3098 |
{ |
3099 |
PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); |
3100 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
3101 |
uint16_t cmd; |
3102 |
|
3103 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
3104 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
3105 |
|
3106 |
vfio_disable_interrupts(vdev); |
3107 |
|
3108 |
/* Make sure the device is in D0 */
|
3109 |
if (vdev->pm_cap) {
|
3110 |
uint16_t pmcsr; |
3111 |
uint8_t state; |
3112 |
|
3113 |
pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
|
3114 |
state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
3115 |
if (state) {
|
3116 |
pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3117 |
vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
|
3118 |
/* vfio handles the necessary delay here */
|
3119 |
pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
|
3120 |
state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
3121 |
if (state) {
|
3122 |
error_report("vfio: Unable to power on device, stuck in D%d\n",
|
3123 |
state); |
3124 |
} |
3125 |
} |
3126 |
} |
3127 |
|
3128 |
/*
|
3129 |
* Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
|
3130 |
* Also put INTx Disable in known state.
|
3131 |
*/
|
3132 |
cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
|
3133 |
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | |
3134 |
PCI_COMMAND_INTX_DISABLE); |
3135 |
vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
|
3136 |
|
3137 |
if (vdev->reset_works) {
|
3138 |
if (ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
|
3139 |
error_report("vfio: Error unable to reset physical device "
|
3140 |
"(%04x:%02x:%02x.%x): %m", vdev->host.domain,
|
3141 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
3142 |
} |
3143 |
} |
3144 |
|
3145 |
vfio_enable_intx(vdev); |
3146 |
} |
3147 |
|
3148 |
static Property vfio_pci_dev_properties[] = {
|
3149 |
DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
|
3150 |
DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
|
3151 |
intx.mmap_timeout, 1100),
|
3152 |
DEFINE_PROP_BIT("x-vga", VFIODevice, features,
|
3153 |
VFIO_FEATURE_ENABLE_VGA_BIT, false),
|
3154 |
DEFINE_PROP_INT32("bootindex", VFIODevice, bootindex, -1), |
3155 |
/*
|
3156 |
* TODO - support passed fds... is this necessary?
|
3157 |
* DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
|
3158 |
* DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
|
3159 |
*/
|
3160 |
DEFINE_PROP_END_OF_LIST(), |
3161 |
}; |
3162 |
|
3163 |
static const VMStateDescription vfio_pci_vmstate = { |
3164 |
.name = "vfio-pci",
|
3165 |
.unmigratable = 1,
|
3166 |
}; |
3167 |
|
3168 |
static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) |
3169 |
{ |
3170 |
DeviceClass *dc = DEVICE_CLASS(klass); |
3171 |
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); |
3172 |
|
3173 |
dc->reset = vfio_pci_reset; |
3174 |
dc->props = vfio_pci_dev_properties; |
3175 |
dc->vmsd = &vfio_pci_vmstate; |
3176 |
dc->desc = "VFIO-based PCI device assignment";
|
3177 |
pdc->init = vfio_initfn; |
3178 |
pdc->exit = vfio_exitfn; |
3179 |
pdc->config_read = vfio_pci_read_config; |
3180 |
pdc->config_write = vfio_pci_write_config; |
3181 |
pdc->is_express = 1; /* We might be */ |
3182 |
} |
3183 |
|
3184 |
static const TypeInfo vfio_pci_dev_info = { |
3185 |
.name = "vfio-pci",
|
3186 |
.parent = TYPE_PCI_DEVICE, |
3187 |
.instance_size = sizeof(VFIODevice),
|
3188 |
.class_init = vfio_pci_dev_class_init, |
3189 |
}; |
3190 |
|
3191 |
static void register_vfio_pci_dev_type(void) |
3192 |
{ |
3193 |
type_register_static(&vfio_pci_dev_info); |
3194 |
} |
3195 |
|
3196 |
type_init(register_vfio_pci_dev_type) |