root / hw / misc / vfio.c @ ba25df88
History | View | Annotate | Download (98.9 kB)
1 |
/*
|
---|---|
2 |
* vfio based device assignment support
|
3 |
*
|
4 |
* Copyright Red Hat, Inc. 2012
|
5 |
*
|
6 |
* Authors:
|
7 |
* Alex Williamson <alex.williamson@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Based on qemu-kvm device-assignment:
|
13 |
* Adapted for KVM by Qumranet.
|
14 |
* Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
|
15 |
* Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
|
16 |
* Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
|
17 |
* Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
|
18 |
* Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
|
19 |
*/
|
20 |
|
21 |
#include <dirent.h> |
22 |
#include <linux/vfio.h> |
23 |
#include <sys/ioctl.h> |
24 |
#include <sys/mman.h> |
25 |
#include <sys/stat.h> |
26 |
#include <sys/types.h> |
27 |
#include <unistd.h> |
28 |
|
29 |
#include "config.h" |
30 |
#include "exec/address-spaces.h" |
31 |
#include "exec/memory.h" |
32 |
#include "hw/pci/msi.h" |
33 |
#include "hw/pci/msix.h" |
34 |
#include "hw/pci/pci.h" |
35 |
#include "qemu-common.h" |
36 |
#include "qemu/error-report.h" |
37 |
#include "qemu/event_notifier.h" |
38 |
#include "qemu/queue.h" |
39 |
#include "qemu/range.h" |
40 |
#include "sysemu/kvm.h" |
41 |
#include "sysemu/sysemu.h" |
42 |
|
43 |
/* #define DEBUG_VFIO */
|
44 |
#ifdef DEBUG_VFIO
|
45 |
#define DPRINTF(fmt, ...) \
|
46 |
do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0) |
47 |
#else
|
48 |
#define DPRINTF(fmt, ...) \
|
49 |
do { } while (0) |
50 |
#endif
|
51 |
|
52 |
/* Extra debugging, trap acceleration paths for more logging */
|
53 |
#define VFIO_ALLOW_MMAP 1 |
54 |
#define VFIO_ALLOW_KVM_INTX 1 |
55 |
|
56 |
struct VFIODevice;
|
57 |
|
58 |
typedef struct VFIOQuirk { |
59 |
MemoryRegion mem; |
60 |
struct VFIODevice *vdev;
|
61 |
QLIST_ENTRY(VFIOQuirk) next; |
62 |
uint32_t data; |
63 |
uint32_t data2; |
64 |
} VFIOQuirk; |
65 |
|
66 |
typedef struct VFIOBAR { |
67 |
off_t fd_offset; /* offset of BAR within device fd */
|
68 |
int fd; /* device fd, allows us to pass VFIOBAR as opaque data */ |
69 |
MemoryRegion mem; /* slow, read/write access */
|
70 |
MemoryRegion mmap_mem; /* direct mapped access */
|
71 |
void *mmap;
|
72 |
size_t size; |
73 |
uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
|
74 |
uint8_t nr; /* cache the BAR number for debug */
|
75 |
QLIST_HEAD(, VFIOQuirk) quirks; |
76 |
} VFIOBAR; |
77 |
|
78 |
typedef struct VFIOVGARegion { |
79 |
MemoryRegion mem; |
80 |
off_t offset; |
81 |
int nr;
|
82 |
QLIST_HEAD(, VFIOQuirk) quirks; |
83 |
} VFIOVGARegion; |
84 |
|
85 |
typedef struct VFIOVGA { |
86 |
off_t fd_offset; |
87 |
int fd;
|
88 |
VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; |
89 |
} VFIOVGA; |
90 |
|
91 |
typedef struct VFIOINTx { |
92 |
bool pending; /* interrupt pending */ |
93 |
bool kvm_accel; /* set when QEMU bypass through KVM enabled */ |
94 |
uint8_t pin; /* which pin to pull for qemu_set_irq */
|
95 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
96 |
EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
|
97 |
PCIINTxRoute route; /* routing info for QEMU bypass */
|
98 |
uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
|
99 |
QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
|
100 |
} VFIOINTx; |
101 |
|
102 |
typedef struct VFIOMSIVector { |
103 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
104 |
struct VFIODevice *vdev; /* back pointer to device */ |
105 |
int virq; /* KVM irqchip route for QEMU bypass */ |
106 |
bool use;
|
107 |
} VFIOMSIVector; |
108 |
|
109 |
enum {
|
110 |
VFIO_INT_NONE = 0,
|
111 |
VFIO_INT_INTx = 1,
|
112 |
VFIO_INT_MSI = 2,
|
113 |
VFIO_INT_MSIX = 3,
|
114 |
}; |
115 |
|
116 |
struct VFIOGroup;
|
117 |
|
118 |
typedef struct VFIOContainer { |
119 |
int fd; /* /dev/vfio/vfio, empowered by the attached groups */ |
120 |
struct {
|
121 |
/* enable abstraction to support various iommu backends */
|
122 |
union {
|
123 |
MemoryListener listener; /* Used by type1 iommu */
|
124 |
}; |
125 |
void (*release)(struct VFIOContainer *); |
126 |
} iommu_data; |
127 |
QLIST_HEAD(, VFIOGroup) group_list; |
128 |
QLIST_ENTRY(VFIOContainer) next; |
129 |
} VFIOContainer; |
130 |
|
131 |
/* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
|
132 |
typedef struct VFIOMSIXInfo { |
133 |
uint8_t table_bar; |
134 |
uint8_t pba_bar; |
135 |
uint16_t entries; |
136 |
uint32_t table_offset; |
137 |
uint32_t pba_offset; |
138 |
MemoryRegion mmap_mem; |
139 |
void *mmap;
|
140 |
} VFIOMSIXInfo; |
141 |
|
142 |
typedef struct VFIODevice { |
143 |
PCIDevice pdev; |
144 |
int fd;
|
145 |
VFIOINTx intx; |
146 |
unsigned int config_size; |
147 |
uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
|
148 |
off_t config_offset; /* Offset of config space region within device fd */
|
149 |
unsigned int rom_size; |
150 |
off_t rom_offset; /* Offset of ROM region within device fd */
|
151 |
int msi_cap_size;
|
152 |
VFIOMSIVector *msi_vectors; |
153 |
VFIOMSIXInfo *msix; |
154 |
int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ |
155 |
int interrupt; /* Current interrupt type */ |
156 |
VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ |
157 |
VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
|
158 |
PCIHostDeviceAddress host; |
159 |
QLIST_ENTRY(VFIODevice) next; |
160 |
struct VFIOGroup *group;
|
161 |
uint32_t features; |
162 |
#define VFIO_FEATURE_ENABLE_VGA_BIT 0 |
163 |
#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) |
164 |
int32_t bootindex; |
165 |
uint8_t pm_cap; |
166 |
bool reset_works;
|
167 |
bool has_vga;
|
168 |
} VFIODevice; |
169 |
|
170 |
typedef struct VFIOGroup { |
171 |
int fd;
|
172 |
int groupid;
|
173 |
VFIOContainer *container; |
174 |
QLIST_HEAD(, VFIODevice) device_list; |
175 |
QLIST_ENTRY(VFIOGroup) next; |
176 |
QLIST_ENTRY(VFIOGroup) container_next; |
177 |
} VFIOGroup; |
178 |
|
179 |
#define MSIX_CAP_LENGTH 12 |
180 |
|
181 |
static QLIST_HEAD(, VFIOContainer)
|
182 |
container_list = QLIST_HEAD_INITIALIZER(container_list); |
183 |
|
184 |
static QLIST_HEAD(, VFIOGroup)
|
185 |
group_list = QLIST_HEAD_INITIALIZER(group_list); |
186 |
|
187 |
static void vfio_disable_interrupts(VFIODevice *vdev); |
188 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); |
189 |
static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
190 |
uint32_t val, int len);
|
191 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled); |
192 |
|
193 |
/*
|
194 |
* Common VFIO interrupt disable
|
195 |
*/
|
196 |
static void vfio_disable_irqindex(VFIODevice *vdev, int index) |
197 |
{ |
198 |
struct vfio_irq_set irq_set = {
|
199 |
.argsz = sizeof(irq_set),
|
200 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, |
201 |
.index = index, |
202 |
.start = 0,
|
203 |
.count = 0,
|
204 |
}; |
205 |
|
206 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
207 |
} |
208 |
|
209 |
/*
|
210 |
* INTx
|
211 |
*/
|
212 |
static void vfio_unmask_intx(VFIODevice *vdev) |
213 |
{ |
214 |
struct vfio_irq_set irq_set = {
|
215 |
.argsz = sizeof(irq_set),
|
216 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, |
217 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
218 |
.start = 0,
|
219 |
.count = 1,
|
220 |
}; |
221 |
|
222 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
223 |
} |
224 |
|
225 |
#ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */ |
226 |
static void vfio_mask_intx(VFIODevice *vdev) |
227 |
{ |
228 |
struct vfio_irq_set irq_set = {
|
229 |
.argsz = sizeof(irq_set),
|
230 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, |
231 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
232 |
.start = 0,
|
233 |
.count = 1,
|
234 |
}; |
235 |
|
236 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
237 |
} |
238 |
#endif
|
239 |
|
240 |
/*
|
241 |
* Disabling BAR mmaping can be slow, but toggling it around INTx can
|
242 |
* also be a huge overhead. We try to get the best of both worlds by
|
243 |
* waiting until an interrupt to disable mmaps (subsequent transitions
|
244 |
* to the same state are effectively no overhead). If the interrupt has
|
245 |
* been serviced and the time gap is long enough, we re-enable mmaps for
|
246 |
* performance. This works well for things like graphics cards, which
|
247 |
* may not use their interrupt at all and are penalized to an unusable
|
248 |
* level by read/write BAR traps. Other devices, like NICs, have more
|
249 |
* regular interrupts and see much better latency by staying in non-mmap
|
250 |
* mode. We therefore set the default mmap_timeout such that a ping
|
251 |
* is just enough to keep the mmap disabled. Users can experiment with
|
252 |
* other options with the x-intx-mmap-timeout-ms parameter (a value of
|
253 |
* zero disables the timer).
|
254 |
*/
|
255 |
static void vfio_intx_mmap_enable(void *opaque) |
256 |
{ |
257 |
VFIODevice *vdev = opaque; |
258 |
|
259 |
if (vdev->intx.pending) {
|
260 |
qemu_mod_timer(vdev->intx.mmap_timer, |
261 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
262 |
return;
|
263 |
} |
264 |
|
265 |
vfio_mmap_set_enabled(vdev, true);
|
266 |
} |
267 |
|
268 |
static void vfio_intx_interrupt(void *opaque) |
269 |
{ |
270 |
VFIODevice *vdev = opaque; |
271 |
|
272 |
if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
|
273 |
return;
|
274 |
} |
275 |
|
276 |
DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
|
277 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
278 |
'A' + vdev->intx.pin);
|
279 |
|
280 |
vdev->intx.pending = true;
|
281 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
|
282 |
vfio_mmap_set_enabled(vdev, false);
|
283 |
if (vdev->intx.mmap_timeout) {
|
284 |
qemu_mod_timer(vdev->intx.mmap_timer, |
285 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
286 |
} |
287 |
} |
288 |
|
289 |
static void vfio_eoi(VFIODevice *vdev) |
290 |
{ |
291 |
if (!vdev->intx.pending) {
|
292 |
return;
|
293 |
} |
294 |
|
295 |
DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
|
296 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
297 |
|
298 |
vdev->intx.pending = false;
|
299 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
300 |
vfio_unmask_intx(vdev); |
301 |
} |
302 |
|
303 |
static void vfio_enable_intx_kvm(VFIODevice *vdev) |
304 |
{ |
305 |
#ifdef CONFIG_KVM
|
306 |
struct kvm_irqfd irqfd = {
|
307 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
308 |
.gsi = vdev->intx.route.irq, |
309 |
.flags = KVM_IRQFD_FLAG_RESAMPLE, |
310 |
}; |
311 |
struct vfio_irq_set *irq_set;
|
312 |
int ret, argsz;
|
313 |
int32_t *pfd; |
314 |
|
315 |
if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
|
316 |
vdev->intx.route.mode != PCI_INTX_ENABLED || |
317 |
!kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
318 |
return;
|
319 |
} |
320 |
|
321 |
/* Get to a known interrupt state */
|
322 |
qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); |
323 |
vfio_mask_intx(vdev); |
324 |
vdev->intx.pending = false;
|
325 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
326 |
|
327 |
/* Get an eventfd for resample/unmask */
|
328 |
if (event_notifier_init(&vdev->intx.unmask, 0)) { |
329 |
error_report("vfio: Error: event_notifier_init failed eoi");
|
330 |
goto fail;
|
331 |
} |
332 |
|
333 |
/* KVM triggers it, VFIO listens for it */
|
334 |
irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); |
335 |
|
336 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
337 |
error_report("vfio: Error: Failed to setup resample irqfd: %m");
|
338 |
goto fail_irqfd;
|
339 |
} |
340 |
|
341 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
342 |
|
343 |
irq_set = g_malloc0(argsz); |
344 |
irq_set->argsz = argsz; |
345 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; |
346 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
347 |
irq_set->start = 0;
|
348 |
irq_set->count = 1;
|
349 |
pfd = (int32_t *)&irq_set->data; |
350 |
|
351 |
*pfd = irqfd.resamplefd; |
352 |
|
353 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
354 |
g_free(irq_set); |
355 |
if (ret) {
|
356 |
error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
|
357 |
goto fail_vfio;
|
358 |
} |
359 |
|
360 |
/* Let'em rip */
|
361 |
vfio_unmask_intx(vdev); |
362 |
|
363 |
vdev->intx.kvm_accel = true;
|
364 |
|
365 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
|
366 |
__func__, vdev->host.domain, vdev->host.bus, |
367 |
vdev->host.slot, vdev->host.function); |
368 |
|
369 |
return;
|
370 |
|
371 |
fail_vfio:
|
372 |
irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; |
373 |
kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); |
374 |
fail_irqfd:
|
375 |
event_notifier_cleanup(&vdev->intx.unmask); |
376 |
fail:
|
377 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
378 |
vfio_unmask_intx(vdev); |
379 |
#endif
|
380 |
} |
381 |
|
382 |
static void vfio_disable_intx_kvm(VFIODevice *vdev) |
383 |
{ |
384 |
#ifdef CONFIG_KVM
|
385 |
struct kvm_irqfd irqfd = {
|
386 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
387 |
.gsi = vdev->intx.route.irq, |
388 |
.flags = KVM_IRQFD_FLAG_DEASSIGN, |
389 |
}; |
390 |
|
391 |
if (!vdev->intx.kvm_accel) {
|
392 |
return;
|
393 |
} |
394 |
|
395 |
/*
|
396 |
* Get to a known state, hardware masked, QEMU ready to accept new
|
397 |
* interrupts, QEMU IRQ de-asserted.
|
398 |
*/
|
399 |
vfio_mask_intx(vdev); |
400 |
vdev->intx.pending = false;
|
401 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
402 |
|
403 |
/* Tell KVM to stop listening for an INTx irqfd */
|
404 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
405 |
error_report("vfio: Error: Failed to disable INTx irqfd: %m");
|
406 |
} |
407 |
|
408 |
/* We only need to close the eventfd for VFIO to cleanup the kernel side */
|
409 |
event_notifier_cleanup(&vdev->intx.unmask); |
410 |
|
411 |
/* QEMU starts listening for interrupt events. */
|
412 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
413 |
|
414 |
vdev->intx.kvm_accel = false;
|
415 |
|
416 |
/* If we've missed an event, let it re-fire through QEMU */
|
417 |
vfio_unmask_intx(vdev); |
418 |
|
419 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
|
420 |
__func__, vdev->host.domain, vdev->host.bus, |
421 |
vdev->host.slot, vdev->host.function); |
422 |
#endif
|
423 |
} |
424 |
|
425 |
static void vfio_update_irq(PCIDevice *pdev) |
426 |
{ |
427 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
428 |
PCIINTxRoute route; |
429 |
|
430 |
if (vdev->interrupt != VFIO_INT_INTx) {
|
431 |
return;
|
432 |
} |
433 |
|
434 |
route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); |
435 |
|
436 |
if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
|
437 |
return; /* Nothing changed */ |
438 |
} |
439 |
|
440 |
DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
|
441 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
442 |
vdev->host.function, vdev->intx.route.irq, route.irq); |
443 |
|
444 |
vfio_disable_intx_kvm(vdev); |
445 |
|
446 |
vdev->intx.route = route; |
447 |
|
448 |
if (route.mode != PCI_INTX_ENABLED) {
|
449 |
return;
|
450 |
} |
451 |
|
452 |
vfio_enable_intx_kvm(vdev); |
453 |
|
454 |
/* Re-enable the interrupt in cased we missed an EOI */
|
455 |
vfio_eoi(vdev); |
456 |
} |
457 |
|
458 |
static int vfio_enable_intx(VFIODevice *vdev) |
459 |
{ |
460 |
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
|
461 |
int ret, argsz;
|
462 |
struct vfio_irq_set *irq_set;
|
463 |
int32_t *pfd; |
464 |
|
465 |
if (!pin) {
|
466 |
return 0; |
467 |
} |
468 |
|
469 |
vfio_disable_interrupts(vdev); |
470 |
|
471 |
vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ |
472 |
|
473 |
#ifdef CONFIG_KVM
|
474 |
/*
|
475 |
* Only conditional to avoid generating error messages on platforms
|
476 |
* where we won't actually use the result anyway.
|
477 |
*/
|
478 |
if (kvm_irqfds_enabled() &&
|
479 |
kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
480 |
vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
481 |
vdev->intx.pin); |
482 |
} |
483 |
#endif
|
484 |
|
485 |
ret = event_notifier_init(&vdev->intx.interrupt, 0);
|
486 |
if (ret) {
|
487 |
error_report("vfio: Error: event_notifier_init failed");
|
488 |
return ret;
|
489 |
} |
490 |
|
491 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
492 |
|
493 |
irq_set = g_malloc0(argsz); |
494 |
irq_set->argsz = argsz; |
495 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
496 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
497 |
irq_set->start = 0;
|
498 |
irq_set->count = 1;
|
499 |
pfd = (int32_t *)&irq_set->data; |
500 |
|
501 |
*pfd = event_notifier_get_fd(&vdev->intx.interrupt); |
502 |
qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
|
503 |
|
504 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
505 |
g_free(irq_set); |
506 |
if (ret) {
|
507 |
error_report("vfio: Error: Failed to setup INTx fd: %m");
|
508 |
qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
509 |
event_notifier_cleanup(&vdev->intx.interrupt); |
510 |
return -errno;
|
511 |
} |
512 |
|
513 |
vfio_enable_intx_kvm(vdev); |
514 |
|
515 |
vdev->interrupt = VFIO_INT_INTx; |
516 |
|
517 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
518 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
519 |
|
520 |
return 0; |
521 |
} |
522 |
|
523 |
static void vfio_disable_intx(VFIODevice *vdev) |
524 |
{ |
525 |
int fd;
|
526 |
|
527 |
qemu_del_timer(vdev->intx.mmap_timer); |
528 |
vfio_disable_intx_kvm(vdev); |
529 |
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX); |
530 |
vdev->intx.pending = false;
|
531 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
532 |
vfio_mmap_set_enabled(vdev, true);
|
533 |
|
534 |
fd = event_notifier_get_fd(&vdev->intx.interrupt); |
535 |
qemu_set_fd_handler(fd, NULL, NULL, vdev); |
536 |
event_notifier_cleanup(&vdev->intx.interrupt); |
537 |
|
538 |
vdev->interrupt = VFIO_INT_NONE; |
539 |
|
540 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
541 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
542 |
} |
543 |
|
544 |
/*
|
545 |
* MSI/X
|
546 |
*/
|
547 |
static void vfio_msi_interrupt(void *opaque) |
548 |
{ |
549 |
VFIOMSIVector *vector = opaque; |
550 |
VFIODevice *vdev = vector->vdev; |
551 |
int nr = vector - vdev->msi_vectors;
|
552 |
|
553 |
if (!event_notifier_test_and_clear(&vector->interrupt)) {
|
554 |
return;
|
555 |
} |
556 |
|
557 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
|
558 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
559 |
vdev->host.function, nr); |
560 |
|
561 |
if (vdev->interrupt == VFIO_INT_MSIX) {
|
562 |
msix_notify(&vdev->pdev, nr); |
563 |
} else if (vdev->interrupt == VFIO_INT_MSI) { |
564 |
msi_notify(&vdev->pdev, nr); |
565 |
} else {
|
566 |
error_report("vfio: MSI interrupt receieved, but not enabled?");
|
567 |
} |
568 |
} |
569 |
|
570 |
static int vfio_enable_vectors(VFIODevice *vdev, bool msix) |
571 |
{ |
572 |
struct vfio_irq_set *irq_set;
|
573 |
int ret = 0, i, argsz; |
574 |
int32_t *fds; |
575 |
|
576 |
argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); |
577 |
|
578 |
irq_set = g_malloc0(argsz); |
579 |
irq_set->argsz = argsz; |
580 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
581 |
irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; |
582 |
irq_set->start = 0;
|
583 |
irq_set->count = vdev->nr_vectors; |
584 |
fds = (int32_t *)&irq_set->data; |
585 |
|
586 |
for (i = 0; i < vdev->nr_vectors; i++) { |
587 |
if (!vdev->msi_vectors[i].use) {
|
588 |
fds[i] = -1;
|
589 |
continue;
|
590 |
} |
591 |
|
592 |
fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); |
593 |
} |
594 |
|
595 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
596 |
|
597 |
g_free(irq_set); |
598 |
|
599 |
return ret;
|
600 |
} |
601 |
|
602 |
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, |
603 |
MSIMessage *msg, IOHandler *handler) |
604 |
{ |
605 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
606 |
VFIOMSIVector *vector; |
607 |
int ret;
|
608 |
|
609 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
|
610 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
611 |
vdev->host.function, nr); |
612 |
|
613 |
vector = &vdev->msi_vectors[nr]; |
614 |
vector->vdev = vdev; |
615 |
vector->use = true;
|
616 |
|
617 |
msix_vector_use(pdev, nr); |
618 |
|
619 |
if (event_notifier_init(&vector->interrupt, 0)) { |
620 |
error_report("vfio: Error: event_notifier_init failed");
|
621 |
} |
622 |
|
623 |
/*
|
624 |
* Attempt to enable route through KVM irqchip,
|
625 |
* default to userspace handling if unavailable.
|
626 |
*/
|
627 |
vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
|
628 |
if (vector->virq < 0 || |
629 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
630 |
vector->virq) < 0) {
|
631 |
if (vector->virq >= 0) { |
632 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
633 |
vector->virq = -1;
|
634 |
} |
635 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
636 |
handler, NULL, vector);
|
637 |
} |
638 |
|
639 |
/*
|
640 |
* We don't want to have the host allocate all possible MSI vectors
|
641 |
* for a device if they're not in use, so we shutdown and incrementally
|
642 |
* increase them as needed.
|
643 |
*/
|
644 |
if (vdev->nr_vectors < nr + 1) { |
645 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
646 |
vdev->nr_vectors = nr + 1;
|
647 |
ret = vfio_enable_vectors(vdev, true);
|
648 |
if (ret) {
|
649 |
error_report("vfio: failed to enable vectors, %d", ret);
|
650 |
} |
651 |
} else {
|
652 |
int argsz;
|
653 |
struct vfio_irq_set *irq_set;
|
654 |
int32_t *pfd; |
655 |
|
656 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
657 |
|
658 |
irq_set = g_malloc0(argsz); |
659 |
irq_set->argsz = argsz; |
660 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
661 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
662 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
663 |
irq_set->start = nr; |
664 |
irq_set->count = 1;
|
665 |
pfd = (int32_t *)&irq_set->data; |
666 |
|
667 |
*pfd = event_notifier_get_fd(&vector->interrupt); |
668 |
|
669 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
670 |
g_free(irq_set); |
671 |
if (ret) {
|
672 |
error_report("vfio: failed to modify vector, %d", ret);
|
673 |
} |
674 |
} |
675 |
|
676 |
return 0; |
677 |
} |
678 |
|
679 |
static int vfio_msix_vector_use(PCIDevice *pdev, |
680 |
unsigned int nr, MSIMessage msg) |
681 |
{ |
682 |
return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
|
683 |
} |
684 |
|
685 |
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
686 |
{ |
687 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
688 |
VFIOMSIVector *vector = &vdev->msi_vectors[nr]; |
689 |
int argsz;
|
690 |
struct vfio_irq_set *irq_set;
|
691 |
int32_t *pfd; |
692 |
|
693 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
|
694 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
695 |
vdev->host.function, nr); |
696 |
|
697 |
/*
|
698 |
* XXX What's the right thing to do here? This turns off the interrupt
|
699 |
* completely, but do we really just want to switch the interrupt to
|
700 |
* bouncing through userspace and let msix.c drop it? Not sure.
|
701 |
*/
|
702 |
msix_vector_unuse(pdev, nr); |
703 |
|
704 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
705 |
|
706 |
irq_set = g_malloc0(argsz); |
707 |
irq_set->argsz = argsz; |
708 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
709 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
710 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
711 |
irq_set->start = nr; |
712 |
irq_set->count = 1;
|
713 |
pfd = (int32_t *)&irq_set->data; |
714 |
|
715 |
*pfd = -1;
|
716 |
|
717 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
718 |
|
719 |
g_free(irq_set); |
720 |
|
721 |
if (vector->virq < 0) { |
722 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
723 |
NULL, NULL, NULL); |
724 |
} else {
|
725 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
726 |
vector->virq); |
727 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
728 |
vector->virq = -1;
|
729 |
} |
730 |
|
731 |
event_notifier_cleanup(&vector->interrupt); |
732 |
vector->use = false;
|
733 |
} |
734 |
|
735 |
static void vfio_enable_msix(VFIODevice *vdev) |
736 |
{ |
737 |
vfio_disable_interrupts(vdev); |
738 |
|
739 |
vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
|
740 |
|
741 |
vdev->interrupt = VFIO_INT_MSIX; |
742 |
|
743 |
/*
|
744 |
* Some communication channels between VF & PF or PF & fw rely on the
|
745 |
* physical state of the device and expect that enabling MSI-X from the
|
746 |
* guest enables the same on the host. When our guest is Linux, the
|
747 |
* guest driver call to pci_enable_msix() sets the enabling bit in the
|
748 |
* MSI-X capability, but leaves the vector table masked. We therefore
|
749 |
* can't rely on a vector_use callback (from request_irq() in the guest)
|
750 |
* to switch the physical device into MSI-X mode because that may come a
|
751 |
* long time after pci_enable_msix(). This code enables vector 0 with
|
752 |
* triggering to userspace, then immediately release the vector, leaving
|
753 |
* the physical device with no vectors enabled, but MSI-X enabled, just
|
754 |
* like the guest view.
|
755 |
*/
|
756 |
vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); |
757 |
vfio_msix_vector_release(&vdev->pdev, 0);
|
758 |
|
759 |
if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
|
760 |
vfio_msix_vector_release, NULL)) {
|
761 |
error_report("vfio: msix_set_vector_notifiers failed");
|
762 |
} |
763 |
|
764 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
765 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
766 |
} |
767 |
|
768 |
static void vfio_enable_msi(VFIODevice *vdev) |
769 |
{ |
770 |
int ret, i;
|
771 |
|
772 |
vfio_disable_interrupts(vdev); |
773 |
|
774 |
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); |
775 |
retry:
|
776 |
vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
|
777 |
|
778 |
for (i = 0; i < vdev->nr_vectors; i++) { |
779 |
MSIMessage msg; |
780 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
781 |
|
782 |
vector->vdev = vdev; |
783 |
vector->use = true;
|
784 |
|
785 |
if (event_notifier_init(&vector->interrupt, 0)) { |
786 |
error_report("vfio: Error: event_notifier_init failed");
|
787 |
} |
788 |
|
789 |
msg = msi_get_message(&vdev->pdev, i); |
790 |
|
791 |
/*
|
792 |
* Attempt to enable route through KVM irqchip,
|
793 |
* default to userspace handling if unavailable.
|
794 |
*/
|
795 |
vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg); |
796 |
if (vector->virq < 0 || |
797 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
798 |
vector->virq) < 0) {
|
799 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
800 |
vfio_msi_interrupt, NULL, vector);
|
801 |
} |
802 |
} |
803 |
|
804 |
ret = vfio_enable_vectors(vdev, false);
|
805 |
if (ret) {
|
806 |
if (ret < 0) { |
807 |
error_report("vfio: Error: Failed to setup MSI fds: %m");
|
808 |
} else if (ret != vdev->nr_vectors) { |
809 |
error_report("vfio: Error: Failed to enable %d "
|
810 |
"MSI vectors, retry with %d", vdev->nr_vectors, ret);
|
811 |
} |
812 |
|
813 |
for (i = 0; i < vdev->nr_vectors; i++) { |
814 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
815 |
if (vector->virq >= 0) { |
816 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
817 |
vector->virq); |
818 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
819 |
vector->virq = -1;
|
820 |
} else {
|
821 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
822 |
NULL, NULL, NULL); |
823 |
} |
824 |
event_notifier_cleanup(&vector->interrupt); |
825 |
} |
826 |
|
827 |
g_free(vdev->msi_vectors); |
828 |
|
829 |
if (ret > 0 && ret != vdev->nr_vectors) { |
830 |
vdev->nr_vectors = ret; |
831 |
goto retry;
|
832 |
} |
833 |
vdev->nr_vectors = 0;
|
834 |
|
835 |
return;
|
836 |
} |
837 |
|
838 |
vdev->interrupt = VFIO_INT_MSI; |
839 |
|
840 |
DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
|
841 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
842 |
vdev->host.function, vdev->nr_vectors); |
843 |
} |
844 |
|
845 |
static void vfio_disable_msi_common(VFIODevice *vdev) |
846 |
{ |
847 |
g_free(vdev->msi_vectors); |
848 |
vdev->msi_vectors = NULL;
|
849 |
vdev->nr_vectors = 0;
|
850 |
vdev->interrupt = VFIO_INT_NONE; |
851 |
|
852 |
vfio_enable_intx(vdev); |
853 |
} |
854 |
|
855 |
static void vfio_disable_msix(VFIODevice *vdev) |
856 |
{ |
857 |
msix_unset_vector_notifiers(&vdev->pdev); |
858 |
|
859 |
if (vdev->nr_vectors) {
|
860 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
861 |
} |
862 |
|
863 |
vfio_disable_msi_common(vdev); |
864 |
|
865 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
866 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
867 |
} |
868 |
|
869 |
static void vfio_disable_msi(VFIODevice *vdev) |
870 |
{ |
871 |
int i;
|
872 |
|
873 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX); |
874 |
|
875 |
for (i = 0; i < vdev->nr_vectors; i++) { |
876 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
877 |
|
878 |
if (!vector->use) {
|
879 |
continue;
|
880 |
} |
881 |
|
882 |
if (vector->virq >= 0) { |
883 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, |
884 |
&vector->interrupt, vector->virq); |
885 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
886 |
vector->virq = -1;
|
887 |
} else {
|
888 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
889 |
NULL, NULL, NULL); |
890 |
} |
891 |
|
892 |
event_notifier_cleanup(&vector->interrupt); |
893 |
} |
894 |
|
895 |
vfio_disable_msi_common(vdev); |
896 |
|
897 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
898 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
899 |
} |
900 |
|
901 |
/*
|
902 |
* IO Port/MMIO - Beware of the endians, VFIO is always little endian
|
903 |
*/
|
904 |
static void vfio_bar_write(void *opaque, hwaddr addr, |
905 |
uint64_t data, unsigned size)
|
906 |
{ |
907 |
VFIOBAR *bar = opaque; |
908 |
union {
|
909 |
uint8_t byte; |
910 |
uint16_t word; |
911 |
uint32_t dword; |
912 |
uint64_t qword; |
913 |
} buf; |
914 |
|
915 |
switch (size) {
|
916 |
case 1: |
917 |
buf.byte = data; |
918 |
break;
|
919 |
case 2: |
920 |
buf.word = cpu_to_le16(data); |
921 |
break;
|
922 |
case 4: |
923 |
buf.dword = cpu_to_le32(data); |
924 |
break;
|
925 |
default:
|
926 |
hw_error("vfio: unsupported write size, %d bytes\n", size);
|
927 |
break;
|
928 |
} |
929 |
|
930 |
if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
931 |
error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
932 |
__func__, addr, data, size); |
933 |
} |
934 |
|
935 |
#ifdef DEBUG_VFIO
|
936 |
{ |
937 |
VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); |
938 |
|
939 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64 |
940 |
", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
|
941 |
vdev->host.slot, vdev->host.function, bar->nr, addr, |
942 |
data, size); |
943 |
} |
944 |
#endif
|
945 |
|
946 |
/*
|
947 |
* A read or write to a BAR always signals an INTx EOI. This will
|
948 |
* do nothing if not pending (including not in INTx mode). We assume
|
949 |
* that a BAR access is in response to an interrupt and that BAR
|
950 |
* accesses will service the interrupt. Unfortunately, we don't know
|
951 |
* which access will service the interrupt, so we're potentially
|
952 |
* getting quite a few host interrupts per guest interrupt.
|
953 |
*/
|
954 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
955 |
} |
956 |
|
957 |
static uint64_t vfio_bar_read(void *opaque, |
958 |
hwaddr addr, unsigned size)
|
959 |
{ |
960 |
VFIOBAR *bar = opaque; |
961 |
union {
|
962 |
uint8_t byte; |
963 |
uint16_t word; |
964 |
uint32_t dword; |
965 |
uint64_t qword; |
966 |
} buf; |
967 |
uint64_t data = 0;
|
968 |
|
969 |
if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
970 |
error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
971 |
__func__, addr, size); |
972 |
return (uint64_t)-1; |
973 |
} |
974 |
|
975 |
switch (size) {
|
976 |
case 1: |
977 |
data = buf.byte; |
978 |
break;
|
979 |
case 2: |
980 |
data = le16_to_cpu(buf.word); |
981 |
break;
|
982 |
case 4: |
983 |
data = le32_to_cpu(buf.dword); |
984 |
break;
|
985 |
default:
|
986 |
hw_error("vfio: unsupported read size, %d bytes\n", size);
|
987 |
break;
|
988 |
} |
989 |
|
990 |
#ifdef DEBUG_VFIO
|
991 |
{ |
992 |
VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]); |
993 |
|
994 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
|
995 |
", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain, |
996 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
997 |
bar->nr, addr, size, data); |
998 |
} |
999 |
#endif
|
1000 |
|
1001 |
/* Same as write above */
|
1002 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
1003 |
|
1004 |
return data;
|
1005 |
} |
1006 |
|
1007 |
static const MemoryRegionOps vfio_bar_ops = { |
1008 |
.read = vfio_bar_read, |
1009 |
.write = vfio_bar_write, |
1010 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1011 |
}; |
1012 |
|
1013 |
static void vfio_vga_write(void *opaque, hwaddr addr, |
1014 |
uint64_t data, unsigned size)
|
1015 |
{ |
1016 |
VFIOVGARegion *region = opaque; |
1017 |
VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); |
1018 |
union {
|
1019 |
uint8_t byte; |
1020 |
uint16_t word; |
1021 |
uint32_t dword; |
1022 |
uint64_t qword; |
1023 |
} buf; |
1024 |
off_t offset = vga->fd_offset + region->offset + addr; |
1025 |
|
1026 |
switch (size) {
|
1027 |
case 1: |
1028 |
buf.byte = data; |
1029 |
break;
|
1030 |
case 2: |
1031 |
buf.word = cpu_to_le16(data); |
1032 |
break;
|
1033 |
case 4: |
1034 |
buf.dword = cpu_to_le32(data); |
1035 |
break;
|
1036 |
default:
|
1037 |
hw_error("vfio: unsupported write size, %d bytes\n", size);
|
1038 |
break;
|
1039 |
} |
1040 |
|
1041 |
if (pwrite(vga->fd, &buf, size, offset) != size) {
|
1042 |
error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", |
1043 |
__func__, region->offset + addr, data, size); |
1044 |
} |
1045 |
|
1046 |
DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n", |
1047 |
__func__, region->offset + addr, data, size); |
1048 |
} |
1049 |
|
1050 |
static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) |
1051 |
{ |
1052 |
VFIOVGARegion *region = opaque; |
1053 |
VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); |
1054 |
union {
|
1055 |
uint8_t byte; |
1056 |
uint16_t word; |
1057 |
uint32_t dword; |
1058 |
uint64_t qword; |
1059 |
} buf; |
1060 |
uint64_t data = 0;
|
1061 |
off_t offset = vga->fd_offset + region->offset + addr; |
1062 |
|
1063 |
if (pread(vga->fd, &buf, size, offset) != size) {
|
1064 |
error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", |
1065 |
__func__, region->offset + addr, size); |
1066 |
return (uint64_t)-1; |
1067 |
} |
1068 |
|
1069 |
switch (size) {
|
1070 |
case 1: |
1071 |
data = buf.byte; |
1072 |
break;
|
1073 |
case 2: |
1074 |
data = le16_to_cpu(buf.word); |
1075 |
break;
|
1076 |
case 4: |
1077 |
data = le32_to_cpu(buf.dword); |
1078 |
break;
|
1079 |
default:
|
1080 |
hw_error("vfio: unsupported read size, %d bytes\n", size);
|
1081 |
break;
|
1082 |
} |
1083 |
|
1084 |
DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n", |
1085 |
__func__, region->offset + addr, size, data); |
1086 |
|
1087 |
return data;
|
1088 |
} |
1089 |
|
1090 |
static const MemoryRegionOps vfio_vga_ops = { |
1091 |
.read = vfio_vga_read, |
1092 |
.write = vfio_vga_write, |
1093 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1094 |
}; |
1095 |
|
1096 |
/*
|
1097 |
* Device specific quirks
|
1098 |
*/
|
1099 |
|
1100 |
#define PCI_VENDOR_ID_ATI 0x1002 |
1101 |
|
1102 |
/*
|
1103 |
* Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
|
1104 |
* HD 5450/6350]) reports the upper byte of the physical address of the
|
1105 |
* I/O port BAR4 through VGA register 0x3c3. The BAR is 256 bytes, so the
|
1106 |
* lower byte is known to be zero. Probing for this quirk reads 0xff from
|
1107 |
* port 0x3c3 on some devices so we store the physical address and replace
|
1108 |
* reads with the virtual address any time it matches. XXX Research when
|
1109 |
* to enable quirk.
|
1110 |
*/
|
1111 |
static uint64_t vfio_ati_3c3_quirk_read(void *opaque, |
1112 |
hwaddr addr, unsigned size)
|
1113 |
{ |
1114 |
VFIOQuirk *quirk = opaque; |
1115 |
VFIODevice *vdev = quirk->vdev; |
1116 |
PCIDevice *pdev = &vdev->pdev; |
1117 |
uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1118 |
addr + 0x3, size);
|
1119 |
|
1120 |
if (data == quirk->data) {
|
1121 |
data = pci_get_byte(pdev->config + PCI_BASE_ADDRESS_4 + 1);
|
1122 |
DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data); |
1123 |
} |
1124 |
|
1125 |
return data;
|
1126 |
} |
1127 |
|
1128 |
static const MemoryRegionOps vfio_ati_3c3_quirk = { |
1129 |
.read = vfio_ati_3c3_quirk_read, |
1130 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1131 |
}; |
1132 |
|
1133 |
static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev) |
1134 |
{ |
1135 |
PCIDevice *pdev = &vdev->pdev; |
1136 |
off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_4; |
1137 |
uint32_t physbar; |
1138 |
VFIOQuirk *quirk; |
1139 |
|
1140 |
if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI ||
|
1141 |
vdev->bars[4].size < 256) { |
1142 |
return;
|
1143 |
} |
1144 |
|
1145 |
/* Get I/O port BAR physical address */
|
1146 |
if (pread(vdev->fd, &physbar, 4, physoffset) != 4) { |
1147 |
error_report("vfio: probe failed for ATI/AMD 0x3c3 quirk on device "
|
1148 |
"%04x:%02x:%02x.%x", vdev->host.domain,
|
1149 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
1150 |
return;
|
1151 |
} |
1152 |
|
1153 |
quirk = g_malloc0(sizeof(*quirk));
|
1154 |
quirk->vdev = vdev; |
1155 |
quirk->data = (physbar >> 8) & 0xff; |
1156 |
|
1157 |
memory_region_init_io(&quirk->mem, &vfio_ati_3c3_quirk, quirk, |
1158 |
"vfio-ati-3c3-quirk", 1); |
1159 |
memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 3,
|
1160 |
&quirk->mem); |
1161 |
|
1162 |
QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1163 |
quirk, next); |
1164 |
|
1165 |
DPRINTF("Enabled ATI/AMD quirk 0x3c3 for device %04x:%02x:%02x.%x\n",
|
1166 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1167 |
vdev->host.function); |
1168 |
} |
1169 |
|
1170 |
/*
|
1171 |
* Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
|
1172 |
* HD 5450/6350]) reports the physical address of MMIO BAR0 through a
|
1173 |
* write/read operation on I/O port BAR4. When uint32_t 0x4010 is written
|
1174 |
* to offset 0x0, the subsequent read from offset 0x4 returns the contents
|
1175 |
* of BAR0. Test for this quirk on all ATI/AMD devices. XXX - Note that
|
1176 |
* 0x10 is the offset of BAR0 in config sapce, is this a window to all of
|
1177 |
* config space?
|
1178 |
*/
|
1179 |
static uint64_t vfio_ati_4010_quirk_read(void *opaque, |
1180 |
hwaddr addr, unsigned size)
|
1181 |
{ |
1182 |
VFIOQuirk *quirk = opaque; |
1183 |
VFIODevice *vdev = quirk->vdev; |
1184 |
PCIDevice *pdev = &vdev->pdev; |
1185 |
uint64_t data = vfio_bar_read(&vdev->bars[4], addr, size);
|
1186 |
|
1187 |
if (addr == 4 && size == 4 && quirk->data) { |
1188 |
data = pci_get_long(pdev->config + PCI_BASE_ADDRESS_0); |
1189 |
DPRINTF("%s(BAR4+0x4) = 0x%"PRIx64"\n", __func__, data); |
1190 |
} |
1191 |
|
1192 |
quirk->data = 0;
|
1193 |
|
1194 |
return data;
|
1195 |
} |
1196 |
|
1197 |
static void vfio_ati_4010_quirk_write(void *opaque, hwaddr addr, |
1198 |
uint64_t data, unsigned size)
|
1199 |
{ |
1200 |
VFIOQuirk *quirk = opaque; |
1201 |
VFIODevice *vdev = quirk->vdev; |
1202 |
|
1203 |
vfio_bar_write(&vdev->bars[4], addr, data, size);
|
1204 |
|
1205 |
quirk->data = (addr == 0 && size == 4 && data == 0x4010) ? 1 : 0; |
1206 |
} |
1207 |
|
1208 |
static const MemoryRegionOps vfio_ati_4010_quirk = { |
1209 |
.read = vfio_ati_4010_quirk_read, |
1210 |
.write = vfio_ati_4010_quirk_write, |
1211 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1212 |
}; |
1213 |
|
1214 |
static void vfio_probe_ati_4010_quirk(VFIODevice *vdev, int nr) |
1215 |
{ |
1216 |
PCIDevice *pdev = &vdev->pdev; |
1217 |
off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_0; |
1218 |
uint32_t physbar0; |
1219 |
uint64_t data; |
1220 |
VFIOQuirk *quirk; |
1221 |
|
1222 |
if (!vdev->has_vga || nr != 4 || !vdev->bars[0].size || |
1223 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1224 |
return;
|
1225 |
} |
1226 |
|
1227 |
/* Get I/O port BAR physical address */
|
1228 |
if (pread(vdev->fd, &physbar0, 4, physoffset) != 4) { |
1229 |
error_report("vfio: probe failed for ATI/AMD 0x4010 quirk on device "
|
1230 |
"%04x:%02x:%02x.%x", vdev->host.domain,
|
1231 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
1232 |
return;
|
1233 |
} |
1234 |
|
1235 |
/* Write 0x4010 to I/O port BAR offset 0 */
|
1236 |
vfio_bar_write(&vdev->bars[4], 0, 0x4010, 4); |
1237 |
/* Read back result */
|
1238 |
data = vfio_bar_read(&vdev->bars[4], 4, 4); |
1239 |
|
1240 |
/* If the register matches the physical address of BAR0, we need a quirk */
|
1241 |
if (data != physbar0) {
|
1242 |
return;
|
1243 |
} |
1244 |
|
1245 |
quirk = g_malloc0(sizeof(*quirk));
|
1246 |
quirk->vdev = vdev; |
1247 |
|
1248 |
memory_region_init_io(&quirk->mem, &vfio_ati_4010_quirk, quirk, |
1249 |
"vfio-ati-4010-quirk", 8); |
1250 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); |
1251 |
|
1252 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1253 |
|
1254 |
DPRINTF("Enabled ATI/AMD quirk 0x4010 for device %04x:%02x:%02x.%x\n",
|
1255 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1256 |
vdev->host.function); |
1257 |
} |
1258 |
|
1259 |
/*
|
1260 |
* Device 1002:5b63 (Advanced Micro Devices [AMD] nee ATI RV370 [Radeon X550])
|
1261 |
* retrieves the upper half of the MMIO BAR0 physical address by writing
|
1262 |
* 0xf10 to I/O port BAR1 offset 0 and reading the result from offset 6.
|
1263 |
* XXX - 0x10 is the offset of BAR0 in PCI config space, this could provide
|
1264 |
* full access to config space. Config space is little endian, so the data
|
1265 |
* register probably starts at 0x4.
|
1266 |
*/
|
1267 |
static uint64_t vfio_ati_f10_quirk_read(void *opaque, |
1268 |
hwaddr addr, unsigned size)
|
1269 |
{ |
1270 |
VFIOQuirk *quirk = opaque; |
1271 |
VFIODevice *vdev = quirk->vdev; |
1272 |
PCIDevice *pdev = &vdev->pdev; |
1273 |
uint64_t data = vfio_bar_read(&vdev->bars[1], addr, size);
|
1274 |
|
1275 |
if (addr == 6 && size == 2 && quirk->data) { |
1276 |
data = pci_get_word(pdev->config + PCI_BASE_ADDRESS_0 + 2);
|
1277 |
DPRINTF("%s(BAR1+0x6) = 0x%"PRIx64"\n", __func__, data); |
1278 |
} |
1279 |
|
1280 |
quirk->data = 0;
|
1281 |
|
1282 |
return data;
|
1283 |
} |
1284 |
|
1285 |
static void vfio_ati_f10_quirk_write(void *opaque, hwaddr addr, |
1286 |
uint64_t data, unsigned size)
|
1287 |
{ |
1288 |
VFIOQuirk *quirk = opaque; |
1289 |
VFIODevice *vdev = quirk->vdev; |
1290 |
|
1291 |
vfio_bar_write(&vdev->bars[1], addr, data, size);
|
1292 |
|
1293 |
quirk->data = (addr == 0 && size == 4 && data == 0xf10) ? 1 : 0; |
1294 |
} |
1295 |
|
1296 |
static const MemoryRegionOps vfio_ati_f10_quirk = { |
1297 |
.read = vfio_ati_f10_quirk_read, |
1298 |
.write = vfio_ati_f10_quirk_write, |
1299 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1300 |
}; |
1301 |
|
1302 |
static void vfio_probe_ati_f10_quirk(VFIODevice *vdev, int nr) |
1303 |
{ |
1304 |
PCIDevice *pdev = &vdev->pdev; |
1305 |
off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_0; |
1306 |
uint32_t physbar0; |
1307 |
uint64_t data; |
1308 |
VFIOQuirk *quirk; |
1309 |
|
1310 |
if (!vdev->has_vga || nr != 1 || !vdev->bars[0].size || |
1311 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) { |
1312 |
return;
|
1313 |
} |
1314 |
|
1315 |
/* Get I/O port BAR physical address */
|
1316 |
if (pread(vdev->fd, &physbar0, 4, physoffset) != 4) { |
1317 |
error_report("vfio: probe failed for ATI/AMD 0xf10 quirk on device "
|
1318 |
"%04x:%02x:%02x.%x", vdev->host.domain,
|
1319 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
1320 |
return;
|
1321 |
} |
1322 |
|
1323 |
vfio_bar_write(&vdev->bars[1], 0, 0xf10, 4); |
1324 |
data = vfio_bar_read(&vdev->bars[1], 0x6, 2); |
1325 |
|
1326 |
/* If the register matches the physical address of BAR0, we need a quirk */
|
1327 |
if (data != (le32_to_cpu(physbar0) >> 16)) { |
1328 |
return;
|
1329 |
} |
1330 |
|
1331 |
quirk = g_malloc0(sizeof(*quirk));
|
1332 |
quirk->vdev = vdev; |
1333 |
|
1334 |
memory_region_init_io(&quirk->mem, &vfio_ati_f10_quirk, quirk, |
1335 |
"vfio-ati-f10-quirk", 8); |
1336 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); |
1337 |
|
1338 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1339 |
|
1340 |
DPRINTF("Enabled ATI/AMD quirk 0xf10 for device %04x:%02x:%02x.%x\n",
|
1341 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1342 |
vdev->host.function); |
1343 |
} |
1344 |
|
1345 |
#define PCI_VENDOR_ID_NVIDIA 0x10de |
1346 |
|
1347 |
/*
|
1348 |
* Nvidia has several different methods to get to config space, the
|
1349 |
* nouveu project has several of these documented here:
|
1350 |
* https://github.com/pathscale/envytools/tree/master/hwdocs
|
1351 |
*
|
1352 |
* The first quirk is actually not documented in envytools and is found
|
1353 |
* on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
|
1354 |
* NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
|
1355 |
* the mirror of PCI config space found at BAR0 offset 0x1800. The access
|
1356 |
* sequence first writes 0x338 to I/O port 0x3d4. The target offset is
|
1357 |
* then written to 0x3d0. Finally 0x538 is written for a read and 0x738
|
1358 |
* is written for a write to 0x3d4. The BAR0 offset is then accessible
|
1359 |
* through 0x3d0. This quirk doesn't seem to be necessary on newer cards
|
1360 |
* that use the I/O port BAR5 window but it doesn't hurt to leave it.
|
1361 |
*/
|
1362 |
enum {
|
1363 |
NV_3D0_NONE, |
1364 |
NV_3D0_SELECT, |
1365 |
NV_3D0_WINDOW, |
1366 |
NV_3D0_READ, |
1367 |
NV_3D0_WRITE, |
1368 |
}; |
1369 |
|
1370 |
static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, |
1371 |
hwaddr addr, unsigned size)
|
1372 |
{ |
1373 |
VFIOQuirk *quirk = opaque; |
1374 |
VFIODevice *vdev = quirk->vdev; |
1375 |
PCIDevice *pdev = &vdev->pdev; |
1376 |
uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1377 |
addr + 0x10, size);
|
1378 |
|
1379 |
if (quirk->data == NV_3D0_READ && addr == 0) { |
1380 |
data = vfio_pci_read_config(pdev, quirk->data2, size); |
1381 |
DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data); |
1382 |
} |
1383 |
|
1384 |
quirk->data = NV_3D0_NONE; |
1385 |
|
1386 |
return data;
|
1387 |
} |
1388 |
|
1389 |
static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, |
1390 |
uint64_t data, unsigned size)
|
1391 |
{ |
1392 |
VFIOQuirk *quirk = opaque; |
1393 |
VFIODevice *vdev = quirk->vdev; |
1394 |
PCIDevice *pdev = &vdev->pdev; |
1395 |
|
1396 |
switch (quirk->data) {
|
1397 |
case NV_3D0_NONE:
|
1398 |
if (addr == 4 && data == 0x338) { |
1399 |
quirk->data = NV_3D0_SELECT; |
1400 |
} |
1401 |
break;
|
1402 |
case NV_3D0_SELECT:
|
1403 |
quirk->data = NV_3D0_NONE; |
1404 |
if (addr == 0 && (data & ~0xff) == 0x1800) { |
1405 |
quirk->data = NV_3D0_WINDOW; |
1406 |
quirk->data2 = data & 0xff;
|
1407 |
} |
1408 |
break;
|
1409 |
case NV_3D0_WINDOW:
|
1410 |
quirk->data = NV_3D0_NONE; |
1411 |
if (addr == 4) { |
1412 |
if (data == 0x538) { |
1413 |
quirk->data = NV_3D0_READ; |
1414 |
} else if (data == 0x738) { |
1415 |
quirk->data = NV_3D0_WRITE; |
1416 |
} |
1417 |
} |
1418 |
break;
|
1419 |
case NV_3D0_WRITE:
|
1420 |
quirk->data = NV_3D0_NONE; |
1421 |
if (addr == 0) { |
1422 |
vfio_pci_write_config(pdev, quirk->data2, data, size); |
1423 |
DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size); |
1424 |
return;
|
1425 |
} |
1426 |
break;
|
1427 |
default:
|
1428 |
quirk->data = NV_3D0_NONE; |
1429 |
} |
1430 |
|
1431 |
vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1432 |
addr + 0x10, data, size);
|
1433 |
} |
1434 |
|
1435 |
static const MemoryRegionOps vfio_nvidia_3d0_quirk = { |
1436 |
.read = vfio_nvidia_3d0_quirk_read, |
1437 |
.write = vfio_nvidia_3d0_quirk_write, |
1438 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1439 |
}; |
1440 |
|
1441 |
static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev) |
1442 |
{ |
1443 |
PCIDevice *pdev = &vdev->pdev; |
1444 |
VFIOQuirk *quirk; |
1445 |
|
1446 |
if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
|
1447 |
!vdev->bars[1].size) {
|
1448 |
return;
|
1449 |
} |
1450 |
|
1451 |
quirk = g_malloc0(sizeof(*quirk));
|
1452 |
quirk->vdev = vdev; |
1453 |
|
1454 |
memory_region_init_io(&quirk->mem, &vfio_nvidia_3d0_quirk, quirk, |
1455 |
"vfio-nvidia-3d0-quirk", 6); |
1456 |
memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
1457 |
0x10, &quirk->mem);
|
1458 |
|
1459 |
QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, |
1460 |
quirk, next); |
1461 |
|
1462 |
DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
|
1463 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1464 |
vdev->host.function); |
1465 |
} |
1466 |
|
1467 |
/*
|
1468 |
* The second quirk is documented in envytools. The I/O port BAR5 is just
|
1469 |
* a set of address/data ports to the MMIO BARs. The BAR we care about is
|
1470 |
* again BAR0. This backdoor is apparently a bit newer than the one above
|
1471 |
* so we need to not only trap 256 bytes @0x1800, but all of PCI config
|
1472 |
* space, including extended space is available at the 4k @0x88000.
|
1473 |
*/
|
1474 |
enum {
|
1475 |
NV_BAR5_ADDRESS = 0x1,
|
1476 |
NV_BAR5_ENABLE = 0x2,
|
1477 |
NV_BAR5_MASTER = 0x4,
|
1478 |
NV_BAR5_VALID = 0x7,
|
1479 |
}; |
1480 |
|
1481 |
static uint64_t vfio_nvidia_bar5_window_quirk_read(void *opaque, |
1482 |
hwaddr addr, unsigned size)
|
1483 |
{ |
1484 |
VFIOQuirk *quirk = opaque; |
1485 |
VFIODevice *vdev = quirk->vdev; |
1486 |
uint64_t data = vfio_bar_read(&vdev->bars[5], addr, size);
|
1487 |
|
1488 |
if (addr == 0xc && quirk->data == NV_BAR5_VALID) { |
1489 |
data = vfio_pci_read_config(&vdev->pdev, quirk->data2, size); |
1490 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx", %d) = 0x%" |
1491 |
PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
|
1492 |
vdev->host.slot, vdev->host.function, addr, size, data); |
1493 |
} |
1494 |
|
1495 |
return data;
|
1496 |
} |
1497 |
|
1498 |
static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr, |
1499 |
uint64_t data, unsigned size)
|
1500 |
{ |
1501 |
VFIOQuirk *quirk = opaque; |
1502 |
VFIODevice *vdev = quirk->vdev; |
1503 |
|
1504 |
/*
|
1505 |
* Use quirk->data to track enables and quirk->data2 for the offset
|
1506 |
*/
|
1507 |
switch (addr) {
|
1508 |
case 0x0: |
1509 |
if (data & 0x1) { |
1510 |
quirk->data |= NV_BAR5_MASTER; |
1511 |
} else {
|
1512 |
quirk->data &= ~NV_BAR5_MASTER; |
1513 |
} |
1514 |
break;
|
1515 |
case 0x4: |
1516 |
if (data & 0x1) { |
1517 |
quirk->data |= NV_BAR5_ENABLE; |
1518 |
} else {
|
1519 |
quirk->data &= ~NV_BAR5_ENABLE; |
1520 |
} |
1521 |
break;
|
1522 |
case 0x8: |
1523 |
if (quirk->data & NV_BAR5_MASTER) {
|
1524 |
if ((data & ~0xfff) == 0x88000) { |
1525 |
quirk->data |= NV_BAR5_ADDRESS; |
1526 |
quirk->data2 = data & 0xfff;
|
1527 |
} else if ((data & ~0xff) == 0x1800) { |
1528 |
quirk->data |= NV_BAR5_ADDRESS; |
1529 |
quirk->data2 = data & 0xff;
|
1530 |
} else {
|
1531 |
quirk->data &= ~NV_BAR5_ADDRESS; |
1532 |
} |
1533 |
} |
1534 |
break;
|
1535 |
case 0xc: |
1536 |
if (quirk->data == NV_BAR5_VALID) {
|
1537 |
vfio_pci_write_config(&vdev->pdev, quirk->data2, data, size); |
1538 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx", 0x%" |
1539 |
PRIx64", %d)\n", __func__, vdev->host.domain,
|
1540 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
1541 |
addr, data, size); |
1542 |
return;
|
1543 |
} |
1544 |
} |
1545 |
|
1546 |
vfio_bar_write(&vdev->bars[5], addr, data, size);
|
1547 |
} |
1548 |
|
1549 |
static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = { |
1550 |
.read = vfio_nvidia_bar5_window_quirk_read, |
1551 |
.write = vfio_nvidia_bar5_window_quirk_write, |
1552 |
.valid.min_access_size = 4,
|
1553 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1554 |
}; |
1555 |
|
1556 |
static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr) |
1557 |
{ |
1558 |
PCIDevice *pdev = &vdev->pdev; |
1559 |
VFIOQuirk *quirk; |
1560 |
|
1561 |
if (!vdev->has_vga || nr != 5 || |
1562 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1563 |
return;
|
1564 |
} |
1565 |
|
1566 |
quirk = g_malloc0(sizeof(*quirk));
|
1567 |
quirk->vdev = vdev; |
1568 |
|
1569 |
memory_region_init_io(&quirk->mem, &vfio_nvidia_bar5_window_quirk, quirk, |
1570 |
"vfio-nvidia-bar5-window-quirk", 16); |
1571 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1); |
1572 |
|
1573 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1574 |
|
1575 |
DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
|
1576 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1577 |
vdev->host.function); |
1578 |
} |
1579 |
|
1580 |
/*
|
1581 |
* Finally, BAR0 itself. We want to redirect any accesses to either
|
1582 |
* 0x1800 or 0x88000 through the PCI config space access functions.
|
1583 |
*
|
1584 |
* NB - quirk at a page granularity or else they don't seem to work when
|
1585 |
* BARs are mmap'd
|
1586 |
*
|
1587 |
* Here's offset 0x88000...
|
1588 |
*/
|
1589 |
static uint64_t vfio_nvidia_bar0_88000_quirk_read(void *opaque, |
1590 |
hwaddr addr, unsigned size)
|
1591 |
{ |
1592 |
VFIOQuirk *quirk = opaque; |
1593 |
VFIODevice *vdev = quirk->vdev; |
1594 |
hwaddr base = 0x88000 & TARGET_PAGE_MASK;
|
1595 |
hwaddr offset = 0x88000 & ~TARGET_PAGE_MASK;
|
1596 |
uint64_t data = vfio_bar_read(&vdev->bars[0], addr + base, size);
|
1597 |
|
1598 |
if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
|
1599 |
data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); |
1600 |
|
1601 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", %d) = 0x%" |
1602 |
PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
|
1603 |
vdev->host.slot, vdev->host.function, addr + base, size, data); |
1604 |
} |
1605 |
|
1606 |
return data;
|
1607 |
} |
1608 |
|
1609 |
static void vfio_nvidia_bar0_88000_quirk_write(void *opaque, hwaddr addr, |
1610 |
uint64_t data, unsigned size)
|
1611 |
{ |
1612 |
VFIOQuirk *quirk = opaque; |
1613 |
VFIODevice *vdev = quirk->vdev; |
1614 |
hwaddr base = 0x88000 & TARGET_PAGE_MASK;
|
1615 |
hwaddr offset = 0x88000 & ~TARGET_PAGE_MASK;
|
1616 |
|
1617 |
if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
|
1618 |
vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); |
1619 |
|
1620 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", 0x%" |
1621 |
PRIx64", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
|
1622 |
vdev->host.slot, vdev->host.function, addr + base, data, size); |
1623 |
} else {
|
1624 |
vfio_bar_write(&vdev->bars[0], addr + base, data, size);
|
1625 |
} |
1626 |
} |
1627 |
|
1628 |
static const MemoryRegionOps vfio_nvidia_bar0_88000_quirk = { |
1629 |
.read = vfio_nvidia_bar0_88000_quirk_read, |
1630 |
.write = vfio_nvidia_bar0_88000_quirk_write, |
1631 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1632 |
}; |
1633 |
|
1634 |
static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr) |
1635 |
{ |
1636 |
PCIDevice *pdev = &vdev->pdev; |
1637 |
VFIOQuirk *quirk; |
1638 |
|
1639 |
if (!vdev->has_vga || nr != 0 || |
1640 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1641 |
return;
|
1642 |
} |
1643 |
|
1644 |
quirk = g_malloc0(sizeof(*quirk));
|
1645 |
quirk->vdev = vdev; |
1646 |
|
1647 |
memory_region_init_io(&quirk->mem, &vfio_nvidia_bar0_88000_quirk, quirk, |
1648 |
"vfio-nvidia-bar0-88000-quirk",
|
1649 |
TARGET_PAGE_ALIGN(PCIE_CONFIG_SPACE_SIZE)); |
1650 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1651 |
0x88000 & TARGET_PAGE_MASK,
|
1652 |
&quirk->mem, 1);
|
1653 |
|
1654 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1655 |
|
1656 |
DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
|
1657 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1658 |
vdev->host.function); |
1659 |
} |
1660 |
|
1661 |
/*
|
1662 |
* And here's the same for BAR0 offset 0x1800...
|
1663 |
*/
|
1664 |
static uint64_t vfio_nvidia_bar0_1800_quirk_read(void *opaque, |
1665 |
hwaddr addr, unsigned size)
|
1666 |
{ |
1667 |
VFIOQuirk *quirk = opaque; |
1668 |
VFIODevice *vdev = quirk->vdev; |
1669 |
hwaddr base = 0x1800 & TARGET_PAGE_MASK;
|
1670 |
hwaddr offset = 0x1800 & ~TARGET_PAGE_MASK;
|
1671 |
uint64_t data = vfio_bar_read(&vdev->bars[0], addr + base, size);
|
1672 |
|
1673 |
if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
|
1674 |
data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); |
1675 |
|
1676 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", %d) = 0x%" |
1677 |
PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
|
1678 |
vdev->host.slot, vdev->host.function, addr + base, size, data); |
1679 |
} |
1680 |
|
1681 |
return data;
|
1682 |
} |
1683 |
|
1684 |
static void vfio_nvidia_bar0_1800_quirk_write(void *opaque, hwaddr addr, |
1685 |
uint64_t data, unsigned size)
|
1686 |
{ |
1687 |
VFIOQuirk *quirk = opaque; |
1688 |
VFIODevice *vdev = quirk->vdev; |
1689 |
hwaddr base = 0x1800 & TARGET_PAGE_MASK;
|
1690 |
hwaddr offset = 0x1800 & ~TARGET_PAGE_MASK;
|
1691 |
|
1692 |
if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
|
1693 |
vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); |
1694 |
|
1695 |
DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", 0x%" |
1696 |
PRIx64", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
|
1697 |
vdev->host.slot, vdev->host.function, addr + base, data, size); |
1698 |
} else {
|
1699 |
vfio_bar_write(&vdev->bars[0], addr + base, data, size);
|
1700 |
} |
1701 |
} |
1702 |
|
1703 |
static const MemoryRegionOps vfio_nvidia_bar0_1800_quirk = { |
1704 |
.read = vfio_nvidia_bar0_1800_quirk_read, |
1705 |
.write = vfio_nvidia_bar0_1800_quirk_write, |
1706 |
.endianness = DEVICE_LITTLE_ENDIAN, |
1707 |
}; |
1708 |
|
1709 |
static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr) |
1710 |
{ |
1711 |
PCIDevice *pdev = &vdev->pdev; |
1712 |
VFIOQuirk *quirk; |
1713 |
|
1714 |
if (!vdev->has_vga || nr != 0 || |
1715 |
pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { |
1716 |
return;
|
1717 |
} |
1718 |
|
1719 |
/* Log the chipset ID */
|
1720 |
DPRINTF("Nvidia NV%02x\n",
|
1721 |
(unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff); |
1722 |
|
1723 |
quirk = g_malloc0(sizeof(*quirk));
|
1724 |
quirk->vdev = vdev; |
1725 |
|
1726 |
memory_region_init_io(&quirk->mem, &vfio_nvidia_bar0_1800_quirk, quirk, |
1727 |
"vfio-nvidia-bar0-1800-quirk",
|
1728 |
TARGET_PAGE_ALIGN(PCI_CONFIG_SPACE_SIZE)); |
1729 |
memory_region_add_subregion_overlap(&vdev->bars[nr].mem, |
1730 |
0x1800 & TARGET_PAGE_MASK,
|
1731 |
&quirk->mem, 1);
|
1732 |
|
1733 |
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); |
1734 |
|
1735 |
DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
|
1736 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1737 |
vdev->host.function); |
1738 |
} |
1739 |
|
1740 |
/*
|
1741 |
* TODO - Some Nvidia devices provide config access to their companion HDA
|
1742 |
* device and even to their parent bridge via these config space mirrors.
|
1743 |
* Add quirks for those regions.
|
1744 |
*/
|
1745 |
|
1746 |
/*
|
1747 |
* Common quirk probe entry points.
|
1748 |
*/
|
1749 |
static void vfio_vga_quirk_setup(VFIODevice *vdev) |
1750 |
{ |
1751 |
vfio_vga_probe_ati_3c3_quirk(vdev); |
1752 |
vfio_vga_probe_nvidia_3d0_quirk(vdev); |
1753 |
} |
1754 |
|
1755 |
static void vfio_vga_quirk_teardown(VFIODevice *vdev) |
1756 |
{ |
1757 |
int i;
|
1758 |
|
1759 |
for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { |
1760 |
while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
|
1761 |
VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); |
1762 |
memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); |
1763 |
QLIST_REMOVE(quirk, next); |
1764 |
g_free(quirk); |
1765 |
} |
1766 |
} |
1767 |
} |
1768 |
|
1769 |
static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr) |
1770 |
{ |
1771 |
vfio_probe_ati_4010_quirk(vdev, nr); |
1772 |
vfio_probe_ati_f10_quirk(vdev, nr); |
1773 |
vfio_probe_nvidia_bar5_window_quirk(vdev, nr); |
1774 |
vfio_probe_nvidia_bar0_88000_quirk(vdev, nr); |
1775 |
vfio_probe_nvidia_bar0_1800_quirk(vdev, nr); |
1776 |
} |
1777 |
|
1778 |
static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr) |
1779 |
{ |
1780 |
VFIOBAR *bar = &vdev->bars[nr]; |
1781 |
|
1782 |
while (!QLIST_EMPTY(&bar->quirks)) {
|
1783 |
VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); |
1784 |
memory_region_del_subregion(&bar->mem, &quirk->mem); |
1785 |
QLIST_REMOVE(quirk, next); |
1786 |
g_free(quirk); |
1787 |
} |
1788 |
} |
1789 |
|
1790 |
/*
|
1791 |
* PCI config space
|
1792 |
*/
|
1793 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) |
1794 |
{ |
1795 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
1796 |
uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; |
1797 |
|
1798 |
memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); |
1799 |
emu_bits = le32_to_cpu(emu_bits); |
1800 |
|
1801 |
if (emu_bits) {
|
1802 |
emu_val = pci_default_read_config(pdev, addr, len); |
1803 |
} |
1804 |
|
1805 |
if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { |
1806 |
ssize_t ret; |
1807 |
|
1808 |
ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr); |
1809 |
if (ret != len) {
|
1810 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
|
1811 |
__func__, vdev->host.domain, vdev->host.bus, |
1812 |
vdev->host.slot, vdev->host.function, addr, len); |
1813 |
return -errno;
|
1814 |
} |
1815 |
phys_val = le32_to_cpu(phys_val); |
1816 |
} |
1817 |
|
1818 |
val = (emu_val & emu_bits) | (phys_val & ~emu_bits); |
1819 |
|
1820 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
|
1821 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1822 |
vdev->host.function, addr, len, val); |
1823 |
|
1824 |
return val;
|
1825 |
} |
1826 |
|
1827 |
static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
1828 |
uint32_t val, int len)
|
1829 |
{ |
1830 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
1831 |
uint32_t val_le = cpu_to_le32(val); |
1832 |
|
1833 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
|
1834 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1835 |
vdev->host.function, addr, val, len); |
1836 |
|
1837 |
/* Write everything to VFIO, let it filter out what we can't write */
|
1838 |
if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
|
1839 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
|
1840 |
__func__, vdev->host.domain, vdev->host.bus, |
1841 |
vdev->host.slot, vdev->host.function, addr, val, len); |
1842 |
} |
1843 |
|
1844 |
/* MSI/MSI-X Enabling/Disabling */
|
1845 |
if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
|
1846 |
ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { |
1847 |
int is_enabled, was_enabled = msi_enabled(pdev);
|
1848 |
|
1849 |
pci_default_write_config(pdev, addr, val, len); |
1850 |
|
1851 |
is_enabled = msi_enabled(pdev); |
1852 |
|
1853 |
if (!was_enabled && is_enabled) {
|
1854 |
vfio_enable_msi(vdev); |
1855 |
} else if (was_enabled && !is_enabled) { |
1856 |
vfio_disable_msi(vdev); |
1857 |
} |
1858 |
} else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && |
1859 |
ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
1860 |
int is_enabled, was_enabled = msix_enabled(pdev);
|
1861 |
|
1862 |
pci_default_write_config(pdev, addr, val, len); |
1863 |
|
1864 |
is_enabled = msix_enabled(pdev); |
1865 |
|
1866 |
if (!was_enabled && is_enabled) {
|
1867 |
vfio_enable_msix(vdev); |
1868 |
} else if (was_enabled && !is_enabled) { |
1869 |
vfio_disable_msix(vdev); |
1870 |
} |
1871 |
} else {
|
1872 |
/* Write everything to QEMU to keep emulated bits correct */
|
1873 |
pci_default_write_config(pdev, addr, val, len); |
1874 |
} |
1875 |
} |
1876 |
|
1877 |
/*
|
1878 |
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
|
1879 |
*/
|
1880 |
static int vfio_dma_unmap(VFIOContainer *container, |
1881 |
hwaddr iova, ram_addr_t size) |
1882 |
{ |
1883 |
struct vfio_iommu_type1_dma_unmap unmap = {
|
1884 |
.argsz = sizeof(unmap),
|
1885 |
.flags = 0,
|
1886 |
.iova = iova, |
1887 |
.size = size, |
1888 |
}; |
1889 |
|
1890 |
if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
1891 |
DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
|
1892 |
return -errno;
|
1893 |
} |
1894 |
|
1895 |
return 0; |
1896 |
} |
1897 |
|
1898 |
static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
1899 |
ram_addr_t size, void *vaddr, bool readonly) |
1900 |
{ |
1901 |
struct vfio_iommu_type1_dma_map map = {
|
1902 |
.argsz = sizeof(map),
|
1903 |
.flags = VFIO_DMA_MAP_FLAG_READ, |
1904 |
.vaddr = (__u64)(uintptr_t)vaddr, |
1905 |
.iova = iova, |
1906 |
.size = size, |
1907 |
}; |
1908 |
|
1909 |
if (!readonly) {
|
1910 |
map.flags |= VFIO_DMA_MAP_FLAG_WRITE; |
1911 |
} |
1912 |
|
1913 |
/*
|
1914 |
* Try the mapping, if it fails with EBUSY, unmap the region and try
|
1915 |
* again. This shouldn't be necessary, but we sometimes see it in
|
1916 |
* the the VGA ROM space.
|
1917 |
*/
|
1918 |
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || |
1919 |
(errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
|
1920 |
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
|
1921 |
return 0; |
1922 |
} |
1923 |
|
1924 |
DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
|
1925 |
return -errno;
|
1926 |
} |
1927 |
|
1928 |
static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
1929 |
{ |
1930 |
return !memory_region_is_ram(section->mr);
|
1931 |
} |
1932 |
|
1933 |
static void vfio_listener_region_add(MemoryListener *listener, |
1934 |
MemoryRegionSection *section) |
1935 |
{ |
1936 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1937 |
iommu_data.listener); |
1938 |
hwaddr iova, end; |
1939 |
void *vaddr;
|
1940 |
int ret;
|
1941 |
|
1942 |
if (vfio_listener_skipped_section(section)) {
|
1943 |
DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n", |
1944 |
section->offset_within_address_space, |
1945 |
section->offset_within_address_space + section->size - 1);
|
1946 |
return;
|
1947 |
} |
1948 |
|
1949 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1950 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1951 |
error_report("%s received unaligned region", __func__);
|
1952 |
return;
|
1953 |
} |
1954 |
|
1955 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
1956 |
end = (section->offset_within_address_space + section->size) & |
1957 |
TARGET_PAGE_MASK; |
1958 |
|
1959 |
if (iova >= end) {
|
1960 |
return;
|
1961 |
} |
1962 |
|
1963 |
vaddr = memory_region_get_ram_ptr(section->mr) + |
1964 |
section->offset_within_region + |
1965 |
(iova - section->offset_within_address_space); |
1966 |
|
1967 |
DPRINTF("region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n", |
1968 |
iova, end - 1, vaddr);
|
1969 |
|
1970 |
ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); |
1971 |
if (ret) {
|
1972 |
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
1973 |
"0x%"HWADDR_PRIx", %p) = %d (%m)", |
1974 |
container, iova, end - iova, vaddr, ret); |
1975 |
} |
1976 |
} |
1977 |
|
1978 |
static void vfio_listener_region_del(MemoryListener *listener, |
1979 |
MemoryRegionSection *section) |
1980 |
{ |
1981 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1982 |
iommu_data.listener); |
1983 |
hwaddr iova, end; |
1984 |
int ret;
|
1985 |
|
1986 |
if (vfio_listener_skipped_section(section)) {
|
1987 |
DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", |
1988 |
section->offset_within_address_space, |
1989 |
section->offset_within_address_space + section->size - 1);
|
1990 |
return;
|
1991 |
} |
1992 |
|
1993 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1994 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1995 |
error_report("%s received unaligned region", __func__);
|
1996 |
return;
|
1997 |
} |
1998 |
|
1999 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
2000 |
end = (section->offset_within_address_space + section->size) & |
2001 |
TARGET_PAGE_MASK; |
2002 |
|
2003 |
if (iova >= end) {
|
2004 |
return;
|
2005 |
} |
2006 |
|
2007 |
DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
2008 |
iova, end - 1);
|
2009 |
|
2010 |
ret = vfio_dma_unmap(container, iova, end - iova); |
2011 |
if (ret) {
|
2012 |
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " |
2013 |
"0x%"HWADDR_PRIx") = %d (%m)", |
2014 |
container, iova, end - iova, ret); |
2015 |
} |
2016 |
} |
2017 |
|
2018 |
static MemoryListener vfio_memory_listener = {
|
2019 |
.region_add = vfio_listener_region_add, |
2020 |
.region_del = vfio_listener_region_del, |
2021 |
}; |
2022 |
|
2023 |
static void vfio_listener_release(VFIOContainer *container) |
2024 |
{ |
2025 |
memory_listener_unregister(&container->iommu_data.listener); |
2026 |
} |
2027 |
|
2028 |
/*
|
2029 |
* Interrupt setup
|
2030 |
*/
|
2031 |
static void vfio_disable_interrupts(VFIODevice *vdev) |
2032 |
{ |
2033 |
switch (vdev->interrupt) {
|
2034 |
case VFIO_INT_INTx:
|
2035 |
vfio_disable_intx(vdev); |
2036 |
break;
|
2037 |
case VFIO_INT_MSI:
|
2038 |
vfio_disable_msi(vdev); |
2039 |
break;
|
2040 |
case VFIO_INT_MSIX:
|
2041 |
vfio_disable_msix(vdev); |
2042 |
break;
|
2043 |
} |
2044 |
} |
2045 |
|
2046 |
static int vfio_setup_msi(VFIODevice *vdev, int pos) |
2047 |
{ |
2048 |
uint16_t ctrl; |
2049 |
bool msi_64bit, msi_maskbit;
|
2050 |
int ret, entries;
|
2051 |
|
2052 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2053 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
2054 |
return -errno;
|
2055 |
} |
2056 |
ctrl = le16_to_cpu(ctrl); |
2057 |
|
2058 |
msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); |
2059 |
msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); |
2060 |
entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); |
2061 |
|
2062 |
DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
|
2063 |
vdev->host.bus, vdev->host.slot, vdev->host.function, pos); |
2064 |
|
2065 |
ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); |
2066 |
if (ret < 0) { |
2067 |
if (ret == -ENOTSUP) {
|
2068 |
return 0; |
2069 |
} |
2070 |
error_report("vfio: msi_init failed");
|
2071 |
return ret;
|
2072 |
} |
2073 |
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); |
2074 |
|
2075 |
return 0; |
2076 |
} |
2077 |
|
2078 |
/*
|
2079 |
* We don't have any control over how pci_add_capability() inserts
|
2080 |
* capabilities into the chain. In order to setup MSI-X we need a
|
2081 |
* MemoryRegion for the BAR. In order to setup the BAR and not
|
2082 |
* attempt to mmap the MSI-X table area, which VFIO won't allow, we
|
2083 |
* need to first look for where the MSI-X table lives. So we
|
2084 |
* unfortunately split MSI-X setup across two functions.
|
2085 |
*/
|
2086 |
static int vfio_early_setup_msix(VFIODevice *vdev) |
2087 |
{ |
2088 |
uint8_t pos; |
2089 |
uint16_t ctrl; |
2090 |
uint32_t table, pba; |
2091 |
|
2092 |
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); |
2093 |
if (!pos) {
|
2094 |
return 0; |
2095 |
} |
2096 |
|
2097 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
2098 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
2099 |
return -errno;
|
2100 |
} |
2101 |
|
2102 |
if (pread(vdev->fd, &table, sizeof(table), |
2103 |
vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
|
2104 |
return -errno;
|
2105 |
} |
2106 |
|
2107 |
if (pread(vdev->fd, &pba, sizeof(pba), |
2108 |
vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
|
2109 |
return -errno;
|
2110 |
} |
2111 |
|
2112 |
ctrl = le16_to_cpu(ctrl); |
2113 |
table = le32_to_cpu(table); |
2114 |
pba = le32_to_cpu(pba); |
2115 |
|
2116 |
vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
|
2117 |
vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; |
2118 |
vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; |
2119 |
vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; |
2120 |
vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; |
2121 |
vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
|
2122 |
|
2123 |
DPRINTF("%04x:%02x:%02x.%x "
|
2124 |
"PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
|
2125 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2126 |
vdev->host.function, pos, vdev->msix->table_bar, |
2127 |
vdev->msix->table_offset, vdev->msix->entries); |
2128 |
|
2129 |
return 0; |
2130 |
} |
2131 |
|
2132 |
static int vfio_setup_msix(VFIODevice *vdev, int pos) |
2133 |
{ |
2134 |
int ret;
|
2135 |
|
2136 |
ret = msix_init(&vdev->pdev, vdev->msix->entries, |
2137 |
&vdev->bars[vdev->msix->table_bar].mem, |
2138 |
vdev->msix->table_bar, vdev->msix->table_offset, |
2139 |
&vdev->bars[vdev->msix->pba_bar].mem, |
2140 |
vdev->msix->pba_bar, vdev->msix->pba_offset, pos); |
2141 |
if (ret < 0) { |
2142 |
if (ret == -ENOTSUP) {
|
2143 |
return 0; |
2144 |
} |
2145 |
error_report("vfio: msix_init failed");
|
2146 |
return ret;
|
2147 |
} |
2148 |
|
2149 |
return 0; |
2150 |
} |
2151 |
|
2152 |
static void vfio_teardown_msi(VFIODevice *vdev) |
2153 |
{ |
2154 |
msi_uninit(&vdev->pdev); |
2155 |
|
2156 |
if (vdev->msix) {
|
2157 |
msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem, |
2158 |
&vdev->bars[vdev->msix->pba_bar].mem); |
2159 |
} |
2160 |
} |
2161 |
|
2162 |
/*
|
2163 |
* Resource setup
|
2164 |
*/
|
2165 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled) |
2166 |
{ |
2167 |
int i;
|
2168 |
|
2169 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2170 |
VFIOBAR *bar = &vdev->bars[i]; |
2171 |
|
2172 |
if (!bar->size) {
|
2173 |
continue;
|
2174 |
} |
2175 |
|
2176 |
memory_region_set_enabled(&bar->mmap_mem, enabled); |
2177 |
if (vdev->msix && vdev->msix->table_bar == i) {
|
2178 |
memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); |
2179 |
} |
2180 |
} |
2181 |
} |
2182 |
|
2183 |
static void vfio_unmap_bar(VFIODevice *vdev, int nr) |
2184 |
{ |
2185 |
VFIOBAR *bar = &vdev->bars[nr]; |
2186 |
|
2187 |
if (!bar->size) {
|
2188 |
return;
|
2189 |
} |
2190 |
|
2191 |
vfio_bar_quirk_teardown(vdev, nr); |
2192 |
|
2193 |
memory_region_del_subregion(&bar->mem, &bar->mmap_mem); |
2194 |
munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); |
2195 |
|
2196 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2197 |
memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); |
2198 |
munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); |
2199 |
} |
2200 |
|
2201 |
memory_region_destroy(&bar->mem); |
2202 |
} |
2203 |
|
2204 |
static int vfio_mmap_bar(VFIOBAR *bar, MemoryRegion *mem, MemoryRegion *submem, |
2205 |
void **map, size_t size, off_t offset,
|
2206 |
const char *name) |
2207 |
{ |
2208 |
int ret = 0; |
2209 |
|
2210 |
if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
|
2211 |
int prot = 0; |
2212 |
|
2213 |
if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
|
2214 |
prot |= PROT_READ; |
2215 |
} |
2216 |
|
2217 |
if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
|
2218 |
prot |= PROT_WRITE; |
2219 |
} |
2220 |
|
2221 |
*map = mmap(NULL, size, prot, MAP_SHARED,
|
2222 |
bar->fd, bar->fd_offset + offset); |
2223 |
if (*map == MAP_FAILED) {
|
2224 |
*map = NULL;
|
2225 |
ret = -errno; |
2226 |
goto empty_region;
|
2227 |
} |
2228 |
|
2229 |
memory_region_init_ram_ptr(submem, name, size, *map); |
2230 |
} else {
|
2231 |
empty_region:
|
2232 |
/* Create a zero sized sub-region to make cleanup easy. */
|
2233 |
memory_region_init(submem, name, 0);
|
2234 |
} |
2235 |
|
2236 |
memory_region_add_subregion(mem, offset, submem); |
2237 |
|
2238 |
return ret;
|
2239 |
} |
2240 |
|
2241 |
static void vfio_map_bar(VFIODevice *vdev, int nr) |
2242 |
{ |
2243 |
VFIOBAR *bar = &vdev->bars[nr]; |
2244 |
unsigned size = bar->size;
|
2245 |
char name[64]; |
2246 |
uint32_t pci_bar; |
2247 |
uint8_t type; |
2248 |
int ret;
|
2249 |
|
2250 |
/* Skip both unimplemented BARs and the upper half of 64bit BARS. */
|
2251 |
if (!size) {
|
2252 |
return;
|
2253 |
} |
2254 |
|
2255 |
snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", |
2256 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2257 |
vdev->host.function, nr); |
2258 |
|
2259 |
/* Determine what type of BAR this is for registration */
|
2260 |
ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
|
2261 |
vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
|
2262 |
if (ret != sizeof(pci_bar)) { |
2263 |
error_report("vfio: Failed to read BAR %d (%m)", nr);
|
2264 |
return;
|
2265 |
} |
2266 |
|
2267 |
pci_bar = le32_to_cpu(pci_bar); |
2268 |
type = pci_bar & (pci_bar & PCI_BASE_ADDRESS_SPACE_IO ? |
2269 |
~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK); |
2270 |
|
2271 |
/* A "slow" read/write mapping underlies all BARs */
|
2272 |
memory_region_init_io(&bar->mem, &vfio_bar_ops, bar, name, size); |
2273 |
pci_register_bar(&vdev->pdev, nr, type, &bar->mem); |
2274 |
|
2275 |
/*
|
2276 |
* We can't mmap areas overlapping the MSIX vector table, so we
|
2277 |
* potentially insert a direct-mapped subregion before and after it.
|
2278 |
*/
|
2279 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2280 |
size = vdev->msix->table_offset & TARGET_PAGE_MASK; |
2281 |
} |
2282 |
|
2283 |
strncat(name, " mmap", sizeof(name) - strlen(name) - 1); |
2284 |
if (vfio_mmap_bar(bar, &bar->mem,
|
2285 |
&bar->mmap_mem, &bar->mmap, size, 0, name)) {
|
2286 |
error_report("%s unsupported. Performance may be slow", name);
|
2287 |
} |
2288 |
|
2289 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
2290 |
unsigned start;
|
2291 |
|
2292 |
start = TARGET_PAGE_ALIGN(vdev->msix->table_offset + |
2293 |
(vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); |
2294 |
|
2295 |
size = start < bar->size ? bar->size - start : 0;
|
2296 |
strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); |
2297 |
/* VFIOMSIXInfo contains another MemoryRegion for this mapping */
|
2298 |
if (vfio_mmap_bar(bar, &bar->mem, &vdev->msix->mmap_mem,
|
2299 |
&vdev->msix->mmap, size, start, name)) { |
2300 |
error_report("%s unsupported. Performance may be slow", name);
|
2301 |
} |
2302 |
} |
2303 |
|
2304 |
vfio_bar_quirk_setup(vdev, nr); |
2305 |
} |
2306 |
|
2307 |
static void vfio_map_bars(VFIODevice *vdev) |
2308 |
{ |
2309 |
int i;
|
2310 |
|
2311 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2312 |
vfio_map_bar(vdev, i); |
2313 |
} |
2314 |
|
2315 |
if (vdev->has_vga) {
|
2316 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
2317 |
&vfio_vga_ops, |
2318 |
&vdev->vga.region[QEMU_PCI_VGA_MEM], |
2319 |
"vfio-vga-mmio@0xa0000",
|
2320 |
QEMU_PCI_VGA_MEM_SIZE); |
2321 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
2322 |
&vfio_vga_ops, |
2323 |
&vdev->vga.region[QEMU_PCI_VGA_IO_LO], |
2324 |
"vfio-vga-io@0x3b0",
|
2325 |
QEMU_PCI_VGA_IO_LO_SIZE); |
2326 |
memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
2327 |
&vfio_vga_ops, |
2328 |
&vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
2329 |
"vfio-vga-io@0x3c0",
|
2330 |
QEMU_PCI_VGA_IO_HI_SIZE); |
2331 |
|
2332 |
pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
2333 |
&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
2334 |
&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); |
2335 |
vfio_vga_quirk_setup(vdev); |
2336 |
} |
2337 |
} |
2338 |
|
2339 |
static void vfio_unmap_bars(VFIODevice *vdev) |
2340 |
{ |
2341 |
int i;
|
2342 |
|
2343 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
2344 |
vfio_unmap_bar(vdev, i); |
2345 |
} |
2346 |
|
2347 |
if (vdev->has_vga) {
|
2348 |
vfio_vga_quirk_teardown(vdev); |
2349 |
pci_unregister_vga(&vdev->pdev); |
2350 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem); |
2351 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem); |
2352 |
memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); |
2353 |
} |
2354 |
} |
2355 |
|
2356 |
/*
|
2357 |
* General setup
|
2358 |
*/
|
2359 |
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
|
2360 |
{ |
2361 |
uint8_t tmp, next = 0xff;
|
2362 |
|
2363 |
for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
|
2364 |
tmp = pdev->config[tmp + 1]) {
|
2365 |
if (tmp > pos && tmp < next) {
|
2366 |
next = tmp; |
2367 |
} |
2368 |
} |
2369 |
|
2370 |
return next - pos;
|
2371 |
} |
2372 |
|
2373 |
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) |
2374 |
{ |
2375 |
pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); |
2376 |
} |
2377 |
|
2378 |
static void vfio_add_emulated_word(VFIODevice *vdev, int pos, |
2379 |
uint16_t val, uint16_t mask) |
2380 |
{ |
2381 |
vfio_set_word_bits(vdev->pdev.config + pos, val, mask); |
2382 |
vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); |
2383 |
vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); |
2384 |
} |
2385 |
|
2386 |
static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) |
2387 |
{ |
2388 |
pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); |
2389 |
} |
2390 |
|
2391 |
static void vfio_add_emulated_long(VFIODevice *vdev, int pos, |
2392 |
uint32_t val, uint32_t mask) |
2393 |
{ |
2394 |
vfio_set_long_bits(vdev->pdev.config + pos, val, mask); |
2395 |
vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); |
2396 |
vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); |
2397 |
} |
2398 |
|
2399 |
static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size) |
2400 |
{ |
2401 |
uint16_t flags; |
2402 |
uint8_t type; |
2403 |
|
2404 |
flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); |
2405 |
type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
|
2406 |
|
2407 |
if (type != PCI_EXP_TYPE_ENDPOINT &&
|
2408 |
type != PCI_EXP_TYPE_LEG_END && |
2409 |
type != PCI_EXP_TYPE_RC_END) { |
2410 |
|
2411 |
error_report("vfio: Assignment of PCIe type 0x%x "
|
2412 |
"devices is not currently supported", type);
|
2413 |
return -EINVAL;
|
2414 |
} |
2415 |
|
2416 |
if (!pci_bus_is_express(vdev->pdev.bus)) {
|
2417 |
/*
|
2418 |
* Use express capability as-is on PCI bus. It doesn't make much
|
2419 |
* sense to even expose, but some drivers (ex. tg3) depend on it
|
2420 |
* and guests don't seem to be particular about it. We'll need
|
2421 |
* to revist this or force express devices to express buses if we
|
2422 |
* ever expose an IOMMU to the guest.
|
2423 |
*/
|
2424 |
} else if (pci_bus_is_root(vdev->pdev.bus)) { |
2425 |
/*
|
2426 |
* On a Root Complex bus Endpoints become Root Complex Integrated
|
2427 |
* Endpoints, which changes the type and clears the LNK & LNK2 fields.
|
2428 |
*/
|
2429 |
if (type == PCI_EXP_TYPE_ENDPOINT) {
|
2430 |
vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, |
2431 |
PCI_EXP_TYPE_RC_END << 4,
|
2432 |
PCI_EXP_FLAGS_TYPE); |
2433 |
|
2434 |
/* Link Capabilities, Status, and Control goes away */
|
2435 |
if (size > PCI_EXP_LNKCTL) {
|
2436 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); |
2437 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); |
2438 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); |
2439 |
|
2440 |
#ifndef PCI_EXP_LNKCAP2
|
2441 |
#define PCI_EXP_LNKCAP2 44 |
2442 |
#endif
|
2443 |
#ifndef PCI_EXP_LNKSTA2
|
2444 |
#define PCI_EXP_LNKSTA2 50 |
2445 |
#endif
|
2446 |
/* Link 2 Capabilities, Status, and Control goes away */
|
2447 |
if (size > PCI_EXP_LNKCAP2) {
|
2448 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); |
2449 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); |
2450 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); |
2451 |
} |
2452 |
} |
2453 |
|
2454 |
} else if (type == PCI_EXP_TYPE_LEG_END) { |
2455 |
/*
|
2456 |
* Legacy endpoints don't belong on the root complex. Windows
|
2457 |
* seems to be happier with devices if we skip the capability.
|
2458 |
*/
|
2459 |
return 0; |
2460 |
} |
2461 |
|
2462 |
} else {
|
2463 |
/*
|
2464 |
* Convert Root Complex Integrated Endpoints to regular endpoints.
|
2465 |
* These devices don't support LNK/LNK2 capabilities, so make them up.
|
2466 |
*/
|
2467 |
if (type == PCI_EXP_TYPE_RC_END) {
|
2468 |
vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, |
2469 |
PCI_EXP_TYPE_ENDPOINT << 4,
|
2470 |
PCI_EXP_FLAGS_TYPE); |
2471 |
vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, |
2472 |
PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
|
2473 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); |
2474 |
} |
2475 |
|
2476 |
/* Mark the Link Status bits as emulated to allow virtual negotiation */
|
2477 |
vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, |
2478 |
pci_get_word(vdev->pdev.config + pos + |
2479 |
PCI_EXP_LNKSTA), |
2480 |
PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); |
2481 |
} |
2482 |
|
2483 |
pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); |
2484 |
if (pos >= 0) { |
2485 |
vdev->pdev.exp.exp_cap = pos; |
2486 |
} |
2487 |
|
2488 |
return pos;
|
2489 |
} |
2490 |
|
2491 |
static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos) |
2492 |
{ |
2493 |
PCIDevice *pdev = &vdev->pdev; |
2494 |
uint8_t cap_id, next, size; |
2495 |
int ret;
|
2496 |
|
2497 |
cap_id = pdev->config[pos]; |
2498 |
next = pdev->config[pos + 1];
|
2499 |
|
2500 |
/*
|
2501 |
* If it becomes important to configure capabilities to their actual
|
2502 |
* size, use this as the default when it's something we don't recognize.
|
2503 |
* Since QEMU doesn't actually handle many of the config accesses,
|
2504 |
* exact size doesn't seem worthwhile.
|
2505 |
*/
|
2506 |
size = vfio_std_cap_max_size(pdev, pos); |
2507 |
|
2508 |
/*
|
2509 |
* pci_add_capability always inserts the new capability at the head
|
2510 |
* of the chain. Therefore to end up with a chain that matches the
|
2511 |
* physical device, we insert from the end by making this recursive.
|
2512 |
* This is also why we pre-caclulate size above as cached config space
|
2513 |
* will be changed as we unwind the stack.
|
2514 |
*/
|
2515 |
if (next) {
|
2516 |
ret = vfio_add_std_cap(vdev, next); |
2517 |
if (ret) {
|
2518 |
return ret;
|
2519 |
} |
2520 |
} else {
|
2521 |
/* Begin the rebuild, use QEMU emulated list bits */
|
2522 |
pdev->config[PCI_CAPABILITY_LIST] = 0;
|
2523 |
vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
|
2524 |
vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; |
2525 |
} |
2526 |
|
2527 |
/* Use emulated next pointer to allow dropping caps */
|
2528 |
pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); |
2529 |
|
2530 |
switch (cap_id) {
|
2531 |
case PCI_CAP_ID_MSI:
|
2532 |
ret = vfio_setup_msi(vdev, pos); |
2533 |
break;
|
2534 |
case PCI_CAP_ID_EXP:
|
2535 |
ret = vfio_setup_pcie_cap(vdev, pos, size); |
2536 |
break;
|
2537 |
case PCI_CAP_ID_MSIX:
|
2538 |
ret = vfio_setup_msix(vdev, pos); |
2539 |
break;
|
2540 |
case PCI_CAP_ID_PM:
|
2541 |
vdev->pm_cap = pos; |
2542 |
default:
|
2543 |
ret = pci_add_capability(pdev, cap_id, pos, size); |
2544 |
break;
|
2545 |
} |
2546 |
|
2547 |
if (ret < 0) { |
2548 |
error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
|
2549 |
"0x%x[0x%x]@0x%x: %d", vdev->host.domain,
|
2550 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
2551 |
cap_id, size, pos, ret); |
2552 |
return ret;
|
2553 |
} |
2554 |
|
2555 |
return 0; |
2556 |
} |
2557 |
|
2558 |
static int vfio_add_capabilities(VFIODevice *vdev) |
2559 |
{ |
2560 |
PCIDevice *pdev = &vdev->pdev; |
2561 |
|
2562 |
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
|
2563 |
!pdev->config[PCI_CAPABILITY_LIST]) { |
2564 |
return 0; /* Nothing to add */ |
2565 |
} |
2566 |
|
2567 |
return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
|
2568 |
} |
2569 |
|
2570 |
static int vfio_load_rom(VFIODevice *vdev) |
2571 |
{ |
2572 |
uint64_t size = vdev->rom_size; |
2573 |
char name[32]; |
2574 |
off_t off = 0, voff = vdev->rom_offset;
|
2575 |
ssize_t bytes; |
2576 |
void *ptr;
|
2577 |
|
2578 |
/* If loading ROM from file, pci handles it */
|
2579 |
if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) {
|
2580 |
return 0; |
2581 |
} |
2582 |
|
2583 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
2584 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
2585 |
|
2586 |
snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", |
2587 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2588 |
vdev->host.function); |
2589 |
memory_region_init_ram(&vdev->pdev.rom, name, size); |
2590 |
ptr = memory_region_get_ram_ptr(&vdev->pdev.rom); |
2591 |
memset(ptr, 0xff, size);
|
2592 |
|
2593 |
while (size) {
|
2594 |
bytes = pread(vdev->fd, ptr + off, size, voff + off); |
2595 |
if (bytes == 0) { |
2596 |
break; /* expect that we could get back less than the ROM BAR */ |
2597 |
} else if (bytes > 0) { |
2598 |
off += bytes; |
2599 |
size -= bytes; |
2600 |
} else {
|
2601 |
if (errno == EINTR || errno == EAGAIN) {
|
2602 |
continue;
|
2603 |
} |
2604 |
error_report("vfio: Error reading device ROM: %m");
|
2605 |
memory_region_destroy(&vdev->pdev.rom); |
2606 |
return -errno;
|
2607 |
} |
2608 |
} |
2609 |
|
2610 |
pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom);
|
2611 |
vdev->pdev.has_rom = true;
|
2612 |
return 0; |
2613 |
} |
2614 |
|
2615 |
static int vfio_connect_container(VFIOGroup *group) |
2616 |
{ |
2617 |
VFIOContainer *container; |
2618 |
int ret, fd;
|
2619 |
|
2620 |
if (group->container) {
|
2621 |
return 0; |
2622 |
} |
2623 |
|
2624 |
QLIST_FOREACH(container, &container_list, next) { |
2625 |
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
|
2626 |
group->container = container; |
2627 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
2628 |
return 0; |
2629 |
} |
2630 |
} |
2631 |
|
2632 |
fd = qemu_open("/dev/vfio/vfio", O_RDWR);
|
2633 |
if (fd < 0) { |
2634 |
error_report("vfio: failed to open /dev/vfio/vfio: %m");
|
2635 |
return -errno;
|
2636 |
} |
2637 |
|
2638 |
ret = ioctl(fd, VFIO_GET_API_VERSION); |
2639 |
if (ret != VFIO_API_VERSION) {
|
2640 |
error_report("vfio: supported vfio version: %d, "
|
2641 |
"reported version: %d", VFIO_API_VERSION, ret);
|
2642 |
close(fd); |
2643 |
return -EINVAL;
|
2644 |
} |
2645 |
|
2646 |
container = g_malloc0(sizeof(*container));
|
2647 |
container->fd = fd; |
2648 |
|
2649 |
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
|
2650 |
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
2651 |
if (ret) {
|
2652 |
error_report("vfio: failed to set group container: %m");
|
2653 |
g_free(container); |
2654 |
close(fd); |
2655 |
return -errno;
|
2656 |
} |
2657 |
|
2658 |
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); |
2659 |
if (ret) {
|
2660 |
error_report("vfio: failed to set iommu for container: %m");
|
2661 |
g_free(container); |
2662 |
close(fd); |
2663 |
return -errno;
|
2664 |
} |
2665 |
|
2666 |
container->iommu_data.listener = vfio_memory_listener; |
2667 |
container->iommu_data.release = vfio_listener_release; |
2668 |
|
2669 |
memory_listener_register(&container->iommu_data.listener, &address_space_memory); |
2670 |
} else {
|
2671 |
error_report("vfio: No available IOMMU models");
|
2672 |
g_free(container); |
2673 |
close(fd); |
2674 |
return -EINVAL;
|
2675 |
} |
2676 |
|
2677 |
QLIST_INIT(&container->group_list); |
2678 |
QLIST_INSERT_HEAD(&container_list, container, next); |
2679 |
|
2680 |
group->container = container; |
2681 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
2682 |
|
2683 |
return 0; |
2684 |
} |
2685 |
|
2686 |
static void vfio_disconnect_container(VFIOGroup *group) |
2687 |
{ |
2688 |
VFIOContainer *container = group->container; |
2689 |
|
2690 |
if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
|
2691 |
error_report("vfio: error disconnecting group %d from container",
|
2692 |
group->groupid); |
2693 |
} |
2694 |
|
2695 |
QLIST_REMOVE(group, container_next); |
2696 |
group->container = NULL;
|
2697 |
|
2698 |
if (QLIST_EMPTY(&container->group_list)) {
|
2699 |
if (container->iommu_data.release) {
|
2700 |
container->iommu_data.release(container); |
2701 |
} |
2702 |
QLIST_REMOVE(container, next); |
2703 |
DPRINTF("vfio_disconnect_container: close container->fd\n");
|
2704 |
close(container->fd); |
2705 |
g_free(container); |
2706 |
} |
2707 |
} |
2708 |
|
2709 |
static VFIOGroup *vfio_get_group(int groupid) |
2710 |
{ |
2711 |
VFIOGroup *group; |
2712 |
char path[32]; |
2713 |
struct vfio_group_status status = { .argsz = sizeof(status) }; |
2714 |
|
2715 |
QLIST_FOREACH(group, &group_list, next) { |
2716 |
if (group->groupid == groupid) {
|
2717 |
return group;
|
2718 |
} |
2719 |
} |
2720 |
|
2721 |
group = g_malloc0(sizeof(*group));
|
2722 |
|
2723 |
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); |
2724 |
group->fd = qemu_open(path, O_RDWR); |
2725 |
if (group->fd < 0) { |
2726 |
error_report("vfio: error opening %s: %m", path);
|
2727 |
g_free(group); |
2728 |
return NULL; |
2729 |
} |
2730 |
|
2731 |
if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
|
2732 |
error_report("vfio: error getting group status: %m");
|
2733 |
close(group->fd); |
2734 |
g_free(group); |
2735 |
return NULL; |
2736 |
} |
2737 |
|
2738 |
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
|
2739 |
error_report("vfio: error, group %d is not viable, please ensure "
|
2740 |
"all devices within the iommu_group are bound to their "
|
2741 |
"vfio bus driver.", groupid);
|
2742 |
close(group->fd); |
2743 |
g_free(group); |
2744 |
return NULL; |
2745 |
} |
2746 |
|
2747 |
group->groupid = groupid; |
2748 |
QLIST_INIT(&group->device_list); |
2749 |
|
2750 |
if (vfio_connect_container(group)) {
|
2751 |
error_report("vfio: failed to setup container for group %d", groupid);
|
2752 |
close(group->fd); |
2753 |
g_free(group); |
2754 |
return NULL; |
2755 |
} |
2756 |
|
2757 |
QLIST_INSERT_HEAD(&group_list, group, next); |
2758 |
|
2759 |
return group;
|
2760 |
} |
2761 |
|
2762 |
static void vfio_put_group(VFIOGroup *group) |
2763 |
{ |
2764 |
if (!QLIST_EMPTY(&group->device_list)) {
|
2765 |
return;
|
2766 |
} |
2767 |
|
2768 |
vfio_disconnect_container(group); |
2769 |
QLIST_REMOVE(group, next); |
2770 |
DPRINTF("vfio_put_group: close group->fd\n");
|
2771 |
close(group->fd); |
2772 |
g_free(group); |
2773 |
} |
2774 |
|
2775 |
static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) |
2776 |
{ |
2777 |
struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; |
2778 |
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; |
2779 |
int ret, i;
|
2780 |
|
2781 |
ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
2782 |
if (ret < 0) { |
2783 |
error_report("vfio: error getting device %s from group %d: %m",
|
2784 |
name, group->groupid); |
2785 |
error_printf("Verify all devices in group %d are bound to vfio-pci "
|
2786 |
"or pci-stub and not already in use\n", group->groupid);
|
2787 |
return ret;
|
2788 |
} |
2789 |
|
2790 |
vdev->fd = ret; |
2791 |
vdev->group = group; |
2792 |
QLIST_INSERT_HEAD(&group->device_list, vdev, next); |
2793 |
|
2794 |
/* Sanity check device */
|
2795 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); |
2796 |
if (ret) {
|
2797 |
error_report("vfio: error getting device info: %m");
|
2798 |
goto error;
|
2799 |
} |
2800 |
|
2801 |
DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
|
2802 |
dev_info.flags, dev_info.num_regions, dev_info.num_irqs); |
2803 |
|
2804 |
if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
|
2805 |
error_report("vfio: Um, this isn't a PCI device");
|
2806 |
goto error;
|
2807 |
} |
2808 |
|
2809 |
vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); |
2810 |
if (!vdev->reset_works) {
|
2811 |
error_report("Warning, device %s does not support reset", name);
|
2812 |
} |
2813 |
|
2814 |
if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { |
2815 |
error_report("vfio: unexpected number of io regions %u",
|
2816 |
dev_info.num_regions); |
2817 |
goto error;
|
2818 |
} |
2819 |
|
2820 |
if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { |
2821 |
error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs);
|
2822 |
goto error;
|
2823 |
} |
2824 |
|
2825 |
for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
|
2826 |
reg_info.index = i; |
2827 |
|
2828 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2829 |
if (ret) {
|
2830 |
error_report("vfio: Error getting region %d info: %m", i);
|
2831 |
goto error;
|
2832 |
} |
2833 |
|
2834 |
DPRINTF("Device %s region %d:\n", name, i);
|
2835 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2836 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2837 |
(unsigned long)reg_info.flags); |
2838 |
|
2839 |
vdev->bars[i].flags = reg_info.flags; |
2840 |
vdev->bars[i].size = reg_info.size; |
2841 |
vdev->bars[i].fd_offset = reg_info.offset; |
2842 |
vdev->bars[i].fd = vdev->fd; |
2843 |
vdev->bars[i].nr = i; |
2844 |
QLIST_INIT(&vdev->bars[i].quirks); |
2845 |
} |
2846 |
|
2847 |
reg_info.index = VFIO_PCI_ROM_REGION_INDEX; |
2848 |
|
2849 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2850 |
if (ret) {
|
2851 |
error_report("vfio: Error getting ROM info: %m");
|
2852 |
goto error;
|
2853 |
} |
2854 |
|
2855 |
DPRINTF("Device %s ROM:\n", name);
|
2856 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2857 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2858 |
(unsigned long)reg_info.flags); |
2859 |
|
2860 |
vdev->rom_size = reg_info.size; |
2861 |
vdev->rom_offset = reg_info.offset; |
2862 |
|
2863 |
reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
2864 |
|
2865 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
2866 |
if (ret) {
|
2867 |
error_report("vfio: Error getting config info: %m");
|
2868 |
goto error;
|
2869 |
} |
2870 |
|
2871 |
DPRINTF("Device %s config:\n", name);
|
2872 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
2873 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
2874 |
(unsigned long)reg_info.flags); |
2875 |
|
2876 |
vdev->config_size = reg_info.size; |
2877 |
if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
|
2878 |
vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; |
2879 |
} |
2880 |
vdev->config_offset = reg_info.offset; |
2881 |
|
2882 |
if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
|
2883 |
dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) { |
2884 |
struct vfio_region_info vga_info = {
|
2885 |
.argsz = sizeof(vga_info),
|
2886 |
.index = VFIO_PCI_VGA_REGION_INDEX, |
2887 |
}; |
2888 |
|
2889 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); |
2890 |
if (ret) {
|
2891 |
error_report( |
2892 |
"vfio: Device does not support requested feature x-vga");
|
2893 |
goto error;
|
2894 |
} |
2895 |
|
2896 |
if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
|
2897 |
!(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || |
2898 |
vga_info.size < 0xbffff + 1) { |
2899 |
error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
|
2900 |
(unsigned long)vga_info.flags, |
2901 |
(unsigned long)vga_info.size); |
2902 |
goto error;
|
2903 |
} |
2904 |
|
2905 |
vdev->vga.fd_offset = vga_info.offset; |
2906 |
vdev->vga.fd = vdev->fd; |
2907 |
|
2908 |
vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; |
2909 |
vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; |
2910 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); |
2911 |
|
2912 |
vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; |
2913 |
vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; |
2914 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); |
2915 |
|
2916 |
vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; |
2917 |
vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; |
2918 |
QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); |
2919 |
|
2920 |
vdev->has_vga = true;
|
2921 |
} |
2922 |
|
2923 |
error:
|
2924 |
if (ret) {
|
2925 |
QLIST_REMOVE(vdev, next); |
2926 |
vdev->group = NULL;
|
2927 |
close(vdev->fd); |
2928 |
} |
2929 |
return ret;
|
2930 |
} |
2931 |
|
2932 |
static void vfio_put_device(VFIODevice *vdev) |
2933 |
{ |
2934 |
QLIST_REMOVE(vdev, next); |
2935 |
vdev->group = NULL;
|
2936 |
DPRINTF("vfio_put_device: close vdev->fd\n");
|
2937 |
close(vdev->fd); |
2938 |
if (vdev->msix) {
|
2939 |
g_free(vdev->msix); |
2940 |
vdev->msix = NULL;
|
2941 |
} |
2942 |
} |
2943 |
|
2944 |
static int vfio_initfn(PCIDevice *pdev) |
2945 |
{ |
2946 |
VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
2947 |
VFIOGroup *group; |
2948 |
char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
|
2949 |
ssize_t len; |
2950 |
struct stat st;
|
2951 |
int groupid;
|
2952 |
int ret;
|
2953 |
|
2954 |
/* Check that the host device exists */
|
2955 |
snprintf(path, sizeof(path),
|
2956 |
"/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
|
2957 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2958 |
vdev->host.function); |
2959 |
if (stat(path, &st) < 0) { |
2960 |
error_report("vfio: error: no such host device: %s", path);
|
2961 |
return -errno;
|
2962 |
} |
2963 |
|
2964 |
strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); |
2965 |
|
2966 |
len = readlink(path, iommu_group_path, PATH_MAX); |
2967 |
if (len <= 0) { |
2968 |
error_report("vfio: error no iommu_group for device");
|
2969 |
return -errno;
|
2970 |
} |
2971 |
|
2972 |
iommu_group_path[len] = 0;
|
2973 |
group_name = basename(iommu_group_path); |
2974 |
|
2975 |
if (sscanf(group_name, "%d", &groupid) != 1) { |
2976 |
error_report("vfio: error reading %s: %m", path);
|
2977 |
return -errno;
|
2978 |
} |
2979 |
|
2980 |
DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
|
2981 |
vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); |
2982 |
|
2983 |
group = vfio_get_group(groupid); |
2984 |
if (!group) {
|
2985 |
error_report("vfio: failed to get group %d", groupid);
|
2986 |
return -ENOENT;
|
2987 |
} |
2988 |
|
2989 |
snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", |
2990 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
2991 |
vdev->host.function); |
2992 |
|
2993 |
QLIST_FOREACH(pvdev, &group->device_list, next) { |
2994 |
if (pvdev->host.domain == vdev->host.domain &&
|
2995 |
pvdev->host.bus == vdev->host.bus && |
2996 |
pvdev->host.slot == vdev->host.slot && |
2997 |
pvdev->host.function == vdev->host.function) { |
2998 |
|
2999 |
error_report("vfio: error: device %s is already attached", path);
|
3000 |
vfio_put_group(group); |
3001 |
return -EBUSY;
|
3002 |
} |
3003 |
} |
3004 |
|
3005 |
ret = vfio_get_device(group, path, vdev); |
3006 |
if (ret) {
|
3007 |
error_report("vfio: failed to get device %s", path);
|
3008 |
vfio_put_group(group); |
3009 |
return ret;
|
3010 |
} |
3011 |
|
3012 |
/* Get a copy of config space */
|
3013 |
ret = pread(vdev->fd, vdev->pdev.config, |
3014 |
MIN(pci_config_size(&vdev->pdev), vdev->config_size), |
3015 |
vdev->config_offset); |
3016 |
if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { |
3017 |
ret = ret < 0 ? -errno : -EFAULT;
|
3018 |
error_report("vfio: Failed to read device config space");
|
3019 |
goto out_put;
|
3020 |
} |
3021 |
|
3022 |
/* vfio emulates a lot for us, but some bits need extra love */
|
3023 |
vdev->emulated_config_bits = g_malloc0(vdev->config_size); |
3024 |
|
3025 |
/* QEMU can choose to expose the ROM or not */
|
3026 |
memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); |
3027 |
|
3028 |
/* QEMU can change multi-function devices to single function, or reverse */
|
3029 |
vdev->emulated_config_bits[PCI_HEADER_TYPE] = |
3030 |
PCI_HEADER_TYPE_MULTI_FUNCTION; |
3031 |
|
3032 |
/*
|
3033 |
* Clear host resource mapping info. If we choose not to register a
|
3034 |
* BAR, such as might be the case with the option ROM, we can get
|
3035 |
* confusing, unwritable, residual addresses from the host here.
|
3036 |
*/
|
3037 |
memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); |
3038 |
memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); |
3039 |
|
3040 |
vfio_load_rom(vdev); |
3041 |
|
3042 |
ret = vfio_early_setup_msix(vdev); |
3043 |
if (ret) {
|
3044 |
goto out_put;
|
3045 |
} |
3046 |
|
3047 |
vfio_map_bars(vdev); |
3048 |
|
3049 |
ret = vfio_add_capabilities(vdev); |
3050 |
if (ret) {
|
3051 |
goto out_teardown;
|
3052 |
} |
3053 |
|
3054 |
/* QEMU emulates all of MSI & MSIX */
|
3055 |
if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
|
3056 |
memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
|
3057 |
MSIX_CAP_LENGTH); |
3058 |
} |
3059 |
|
3060 |
if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
|
3061 |
memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
|
3062 |
vdev->msi_cap_size); |
3063 |
} |
3064 |
|
3065 |
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
3066 |
vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock, |
3067 |
vfio_intx_mmap_enable, vdev); |
3068 |
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); |
3069 |
ret = vfio_enable_intx(vdev); |
3070 |
if (ret) {
|
3071 |
goto out_teardown;
|
3072 |
} |
3073 |
} |
3074 |
|
3075 |
add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL);
|
3076 |
|
3077 |
return 0; |
3078 |
|
3079 |
out_teardown:
|
3080 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
3081 |
vfio_teardown_msi(vdev); |
3082 |
vfio_unmap_bars(vdev); |
3083 |
out_put:
|
3084 |
g_free(vdev->emulated_config_bits); |
3085 |
vfio_put_device(vdev); |
3086 |
vfio_put_group(group); |
3087 |
return ret;
|
3088 |
} |
3089 |
|
3090 |
static void vfio_exitfn(PCIDevice *pdev) |
3091 |
{ |
3092 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
3093 |
VFIOGroup *group = vdev->group; |
3094 |
|
3095 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
3096 |
vfio_disable_interrupts(vdev); |
3097 |
if (vdev->intx.mmap_timer) {
|
3098 |
qemu_free_timer(vdev->intx.mmap_timer); |
3099 |
} |
3100 |
vfio_teardown_msi(vdev); |
3101 |
vfio_unmap_bars(vdev); |
3102 |
g_free(vdev->emulated_config_bits); |
3103 |
vfio_put_device(vdev); |
3104 |
vfio_put_group(group); |
3105 |
} |
3106 |
|
3107 |
static void vfio_pci_reset(DeviceState *dev) |
3108 |
{ |
3109 |
PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); |
3110 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
3111 |
uint16_t cmd; |
3112 |
|
3113 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
3114 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
3115 |
|
3116 |
vfio_disable_interrupts(vdev); |
3117 |
|
3118 |
/* Make sure the device is in D0 */
|
3119 |
if (vdev->pm_cap) {
|
3120 |
uint16_t pmcsr; |
3121 |
uint8_t state; |
3122 |
|
3123 |
pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
|
3124 |
state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
3125 |
if (state) {
|
3126 |
pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3127 |
vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
|
3128 |
/* vfio handles the necessary delay here */
|
3129 |
pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
|
3130 |
state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
3131 |
if (state) {
|
3132 |
error_report("vfio: Unable to power on device, stuck in D%d\n",
|
3133 |
state); |
3134 |
} |
3135 |
} |
3136 |
} |
3137 |
|
3138 |
/*
|
3139 |
* Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
|
3140 |
* Also put INTx Disable in known state.
|
3141 |
*/
|
3142 |
cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
|
3143 |
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | |
3144 |
PCI_COMMAND_INTX_DISABLE); |
3145 |
vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
|
3146 |
|
3147 |
if (vdev->reset_works) {
|
3148 |
if (ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
|
3149 |
error_report("vfio: Error unable to reset physical device "
|
3150 |
"(%04x:%02x:%02x.%x): %m", vdev->host.domain,
|
3151 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
3152 |
} |
3153 |
} |
3154 |
|
3155 |
vfio_enable_intx(vdev); |
3156 |
} |
3157 |
|
3158 |
static Property vfio_pci_dev_properties[] = {
|
3159 |
DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
|
3160 |
DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
|
3161 |
intx.mmap_timeout, 1100),
|
3162 |
DEFINE_PROP_BIT("x-vga", VFIODevice, features,
|
3163 |
VFIO_FEATURE_ENABLE_VGA_BIT, false),
|
3164 |
DEFINE_PROP_INT32("bootindex", VFIODevice, bootindex, -1), |
3165 |
/*
|
3166 |
* TODO - support passed fds... is this necessary?
|
3167 |
* DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
|
3168 |
* DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
|
3169 |
*/
|
3170 |
DEFINE_PROP_END_OF_LIST(), |
3171 |
}; |
3172 |
|
3173 |
static const VMStateDescription vfio_pci_vmstate = { |
3174 |
.name = "vfio-pci",
|
3175 |
.unmigratable = 1,
|
3176 |
}; |
3177 |
|
3178 |
static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) |
3179 |
{ |
3180 |
DeviceClass *dc = DEVICE_CLASS(klass); |
3181 |
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); |
3182 |
|
3183 |
dc->reset = vfio_pci_reset; |
3184 |
dc->props = vfio_pci_dev_properties; |
3185 |
dc->vmsd = &vfio_pci_vmstate; |
3186 |
dc->desc = "VFIO-based PCI device assignment";
|
3187 |
pdc->init = vfio_initfn; |
3188 |
pdc->exit = vfio_exitfn; |
3189 |
pdc->config_read = vfio_pci_read_config; |
3190 |
pdc->config_write = vfio_pci_write_config; |
3191 |
pdc->is_express = 1; /* We might be */ |
3192 |
} |
3193 |
|
3194 |
static const TypeInfo vfio_pci_dev_info = { |
3195 |
.name = "vfio-pci",
|
3196 |
.parent = TYPE_PCI_DEVICE, |
3197 |
.instance_size = sizeof(VFIODevice),
|
3198 |
.class_init = vfio_pci_dev_class_init, |
3199 |
}; |
3200 |
|
3201 |
static void register_vfio_pci_dev_type(void) |
3202 |
{ |
3203 |
type_register_static(&vfio_pci_dev_info); |
3204 |
} |
3205 |
|
3206 |
type_init(register_vfio_pci_dev_type) |