root / hw / vfio_pci.c @ 9c17d615
History | View | Annotate | Download (61.8 kB)
1 |
/*
|
---|---|
2 |
* vfio based device assignment support
|
3 |
*
|
4 |
* Copyright Red Hat, Inc. 2012
|
5 |
*
|
6 |
* Authors:
|
7 |
* Alex Williamson <alex.williamson@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Based on qemu-kvm device-assignment:
|
13 |
* Adapted for KVM by Qumranet.
|
14 |
* Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
|
15 |
* Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
|
16 |
* Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
|
17 |
* Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
|
18 |
* Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
|
19 |
*/
|
20 |
|
21 |
#include <dirent.h> |
22 |
#include <unistd.h> |
23 |
#include <sys/ioctl.h> |
24 |
#include <sys/mman.h> |
25 |
#include <sys/stat.h> |
26 |
#include <sys/types.h> |
27 |
#include <linux/vfio.h> |
28 |
|
29 |
#include "config.h" |
30 |
#include "qemu/event_notifier.h" |
31 |
#include "exec/address-spaces.h" |
32 |
#include "sysemu/kvm.h" |
33 |
#include "exec/memory.h" |
34 |
#include "pci/msi.h" |
35 |
#include "pci/msix.h" |
36 |
#include "pci/pci.h" |
37 |
#include "qemu-common.h" |
38 |
#include "qemu/error-report.h" |
39 |
#include "qemu/queue.h" |
40 |
#include "qemu/range.h" |
41 |
|
42 |
/* #define DEBUG_VFIO */
|
43 |
#ifdef DEBUG_VFIO
|
44 |
#define DPRINTF(fmt, ...) \
|
45 |
do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0) |
46 |
#else
|
47 |
#define DPRINTF(fmt, ...) \
|
48 |
do { } while (0) |
49 |
#endif
|
50 |
|
51 |
typedef struct VFIOBAR { |
52 |
off_t fd_offset; /* offset of BAR within device fd */
|
53 |
int fd; /* device fd, allows us to pass VFIOBAR as opaque data */ |
54 |
MemoryRegion mem; /* slow, read/write access */
|
55 |
MemoryRegion mmap_mem; /* direct mapped access */
|
56 |
void *mmap;
|
57 |
size_t size; |
58 |
uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
|
59 |
uint8_t nr; /* cache the BAR number for debug */
|
60 |
} VFIOBAR; |
61 |
|
62 |
typedef struct VFIOINTx { |
63 |
bool pending; /* interrupt pending */ |
64 |
bool kvm_accel; /* set when QEMU bypass through KVM enabled */ |
65 |
uint8_t pin; /* which pin to pull for qemu_set_irq */
|
66 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
67 |
EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
|
68 |
PCIINTxRoute route; /* routing info for QEMU bypass */
|
69 |
uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
|
70 |
QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
|
71 |
} VFIOINTx; |
72 |
|
73 |
struct VFIODevice;
|
74 |
|
75 |
typedef struct VFIOMSIVector { |
76 |
EventNotifier interrupt; /* eventfd triggered on interrupt */
|
77 |
struct VFIODevice *vdev; /* back pointer to device */ |
78 |
int virq; /* KVM irqchip route for QEMU bypass */ |
79 |
bool use;
|
80 |
} VFIOMSIVector; |
81 |
|
82 |
enum {
|
83 |
VFIO_INT_NONE = 0,
|
84 |
VFIO_INT_INTx = 1,
|
85 |
VFIO_INT_MSI = 2,
|
86 |
VFIO_INT_MSIX = 3,
|
87 |
}; |
88 |
|
89 |
struct VFIOGroup;
|
90 |
|
91 |
typedef struct VFIOContainer { |
92 |
int fd; /* /dev/vfio/vfio, empowered by the attached groups */ |
93 |
struct {
|
94 |
/* enable abstraction to support various iommu backends */
|
95 |
union {
|
96 |
MemoryListener listener; /* Used by type1 iommu */
|
97 |
}; |
98 |
void (*release)(struct VFIOContainer *); |
99 |
} iommu_data; |
100 |
QLIST_HEAD(, VFIOGroup) group_list; |
101 |
QLIST_ENTRY(VFIOContainer) next; |
102 |
} VFIOContainer; |
103 |
|
104 |
/* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
|
105 |
typedef struct VFIOMSIXInfo { |
106 |
uint8_t table_bar; |
107 |
uint8_t pba_bar; |
108 |
uint16_t entries; |
109 |
uint32_t table_offset; |
110 |
uint32_t pba_offset; |
111 |
MemoryRegion mmap_mem; |
112 |
void *mmap;
|
113 |
} VFIOMSIXInfo; |
114 |
|
115 |
typedef struct VFIODevice { |
116 |
PCIDevice pdev; |
117 |
int fd;
|
118 |
VFIOINTx intx; |
119 |
unsigned int config_size; |
120 |
off_t config_offset; /* Offset of config space region within device fd */
|
121 |
unsigned int rom_size; |
122 |
off_t rom_offset; /* Offset of ROM region within device fd */
|
123 |
int msi_cap_size;
|
124 |
VFIOMSIVector *msi_vectors; |
125 |
VFIOMSIXInfo *msix; |
126 |
int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ |
127 |
int interrupt; /* Current interrupt type */ |
128 |
VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ |
129 |
PCIHostDeviceAddress host; |
130 |
QLIST_ENTRY(VFIODevice) next; |
131 |
struct VFIOGroup *group;
|
132 |
bool reset_works;
|
133 |
} VFIODevice; |
134 |
|
135 |
typedef struct VFIOGroup { |
136 |
int fd;
|
137 |
int groupid;
|
138 |
VFIOContainer *container; |
139 |
QLIST_HEAD(, VFIODevice) device_list; |
140 |
QLIST_ENTRY(VFIOGroup) next; |
141 |
QLIST_ENTRY(VFIOGroup) container_next; |
142 |
} VFIOGroup; |
143 |
|
144 |
#define MSIX_CAP_LENGTH 12 |
145 |
|
146 |
static QLIST_HEAD(, VFIOContainer)
|
147 |
container_list = QLIST_HEAD_INITIALIZER(container_list); |
148 |
|
149 |
static QLIST_HEAD(, VFIOGroup)
|
150 |
group_list = QLIST_HEAD_INITIALIZER(group_list); |
151 |
|
152 |
static void vfio_disable_interrupts(VFIODevice *vdev); |
153 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); |
154 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled); |
155 |
|
156 |
/*
|
157 |
* Common VFIO interrupt disable
|
158 |
*/
|
159 |
static void vfio_disable_irqindex(VFIODevice *vdev, int index) |
160 |
{ |
161 |
struct vfio_irq_set irq_set = {
|
162 |
.argsz = sizeof(irq_set),
|
163 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, |
164 |
.index = index, |
165 |
.start = 0,
|
166 |
.count = 0,
|
167 |
}; |
168 |
|
169 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
170 |
} |
171 |
|
172 |
/*
|
173 |
* INTx
|
174 |
*/
|
175 |
static void vfio_unmask_intx(VFIODevice *vdev) |
176 |
{ |
177 |
struct vfio_irq_set irq_set = {
|
178 |
.argsz = sizeof(irq_set),
|
179 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, |
180 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
181 |
.start = 0,
|
182 |
.count = 1,
|
183 |
}; |
184 |
|
185 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
186 |
} |
187 |
|
188 |
#ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */ |
189 |
static void vfio_mask_intx(VFIODevice *vdev) |
190 |
{ |
191 |
struct vfio_irq_set irq_set = {
|
192 |
.argsz = sizeof(irq_set),
|
193 |
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, |
194 |
.index = VFIO_PCI_INTX_IRQ_INDEX, |
195 |
.start = 0,
|
196 |
.count = 1,
|
197 |
}; |
198 |
|
199 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); |
200 |
} |
201 |
#endif
|
202 |
|
203 |
/*
|
204 |
* Disabling BAR mmaping can be slow, but toggling it around INTx can
|
205 |
* also be a huge overhead. We try to get the best of both worlds by
|
206 |
* waiting until an interrupt to disable mmaps (subsequent transitions
|
207 |
* to the same state are effectively no overhead). If the interrupt has
|
208 |
* been serviced and the time gap is long enough, we re-enable mmaps for
|
209 |
* performance. This works well for things like graphics cards, which
|
210 |
* may not use their interrupt at all and are penalized to an unusable
|
211 |
* level by read/write BAR traps. Other devices, like NICs, have more
|
212 |
* regular interrupts and see much better latency by staying in non-mmap
|
213 |
* mode. We therefore set the default mmap_timeout such that a ping
|
214 |
* is just enough to keep the mmap disabled. Users can experiment with
|
215 |
* other options with the x-intx-mmap-timeout-ms parameter (a value of
|
216 |
* zero disables the timer).
|
217 |
*/
|
218 |
static void vfio_intx_mmap_enable(void *opaque) |
219 |
{ |
220 |
VFIODevice *vdev = opaque; |
221 |
|
222 |
if (vdev->intx.pending) {
|
223 |
qemu_mod_timer(vdev->intx.mmap_timer, |
224 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
225 |
return;
|
226 |
} |
227 |
|
228 |
vfio_mmap_set_enabled(vdev, true);
|
229 |
} |
230 |
|
231 |
static void vfio_intx_interrupt(void *opaque) |
232 |
{ |
233 |
VFIODevice *vdev = opaque; |
234 |
|
235 |
if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
|
236 |
return;
|
237 |
} |
238 |
|
239 |
DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
|
240 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
241 |
'A' + vdev->intx.pin);
|
242 |
|
243 |
vdev->intx.pending = true;
|
244 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
|
245 |
vfio_mmap_set_enabled(vdev, false);
|
246 |
if (vdev->intx.mmap_timeout) {
|
247 |
qemu_mod_timer(vdev->intx.mmap_timer, |
248 |
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout); |
249 |
} |
250 |
} |
251 |
|
252 |
static void vfio_eoi(VFIODevice *vdev) |
253 |
{ |
254 |
if (!vdev->intx.pending) {
|
255 |
return;
|
256 |
} |
257 |
|
258 |
DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
|
259 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
260 |
|
261 |
vdev->intx.pending = false;
|
262 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
263 |
vfio_unmask_intx(vdev); |
264 |
} |
265 |
|
266 |
static void vfio_enable_intx_kvm(VFIODevice *vdev) |
267 |
{ |
268 |
#ifdef CONFIG_KVM
|
269 |
struct kvm_irqfd irqfd = {
|
270 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
271 |
.gsi = vdev->intx.route.irq, |
272 |
.flags = KVM_IRQFD_FLAG_RESAMPLE, |
273 |
}; |
274 |
struct vfio_irq_set *irq_set;
|
275 |
int ret, argsz;
|
276 |
int32_t *pfd; |
277 |
|
278 |
if (!kvm_irqfds_enabled() ||
|
279 |
vdev->intx.route.mode != PCI_INTX_ENABLED || |
280 |
!kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
281 |
return;
|
282 |
} |
283 |
|
284 |
/* Get to a known interrupt state */
|
285 |
qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); |
286 |
vfio_mask_intx(vdev); |
287 |
vdev->intx.pending = false;
|
288 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
289 |
|
290 |
/* Get an eventfd for resample/unmask */
|
291 |
if (event_notifier_init(&vdev->intx.unmask, 0)) { |
292 |
error_report("vfio: Error: event_notifier_init failed eoi\n");
|
293 |
goto fail;
|
294 |
} |
295 |
|
296 |
/* KVM triggers it, VFIO listens for it */
|
297 |
irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); |
298 |
|
299 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
300 |
error_report("vfio: Error: Failed to setup resample irqfd: %m\n");
|
301 |
goto fail_irqfd;
|
302 |
} |
303 |
|
304 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
305 |
|
306 |
irq_set = g_malloc0(argsz); |
307 |
irq_set->argsz = argsz; |
308 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; |
309 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
310 |
irq_set->start = 0;
|
311 |
irq_set->count = 1;
|
312 |
pfd = (int32_t *)&irq_set->data; |
313 |
|
314 |
*pfd = irqfd.resamplefd; |
315 |
|
316 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
317 |
g_free(irq_set); |
318 |
if (ret) {
|
319 |
error_report("vfio: Error: Failed to setup INTx unmask fd: %m\n");
|
320 |
goto fail_vfio;
|
321 |
} |
322 |
|
323 |
/* Let'em rip */
|
324 |
vfio_unmask_intx(vdev); |
325 |
|
326 |
vdev->intx.kvm_accel = true;
|
327 |
|
328 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
|
329 |
__func__, vdev->host.domain, vdev->host.bus, |
330 |
vdev->host.slot, vdev->host.function); |
331 |
|
332 |
return;
|
333 |
|
334 |
fail_vfio:
|
335 |
irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; |
336 |
kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); |
337 |
fail_irqfd:
|
338 |
event_notifier_cleanup(&vdev->intx.unmask); |
339 |
fail:
|
340 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
341 |
vfio_unmask_intx(vdev); |
342 |
#endif
|
343 |
} |
344 |
|
345 |
static void vfio_disable_intx_kvm(VFIODevice *vdev) |
346 |
{ |
347 |
#ifdef CONFIG_KVM
|
348 |
struct kvm_irqfd irqfd = {
|
349 |
.fd = event_notifier_get_fd(&vdev->intx.interrupt), |
350 |
.gsi = vdev->intx.route.irq, |
351 |
.flags = KVM_IRQFD_FLAG_DEASSIGN, |
352 |
}; |
353 |
|
354 |
if (!vdev->intx.kvm_accel) {
|
355 |
return;
|
356 |
} |
357 |
|
358 |
/*
|
359 |
* Get to a known state, hardware masked, QEMU ready to accept new
|
360 |
* interrupts, QEMU IRQ de-asserted.
|
361 |
*/
|
362 |
vfio_mask_intx(vdev); |
363 |
vdev->intx.pending = false;
|
364 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
365 |
|
366 |
/* Tell KVM to stop listening for an INTx irqfd */
|
367 |
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
|
368 |
error_report("vfio: Error: Failed to disable INTx irqfd: %m\n");
|
369 |
} |
370 |
|
371 |
/* We only need to close the eventfd for VFIO to cleanup the kernel side */
|
372 |
event_notifier_cleanup(&vdev->intx.unmask); |
373 |
|
374 |
/* QEMU starts listening for interrupt events. */
|
375 |
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
|
376 |
|
377 |
vdev->intx.kvm_accel = false;
|
378 |
|
379 |
/* If we've missed an event, let it re-fire through QEMU */
|
380 |
vfio_unmask_intx(vdev); |
381 |
|
382 |
DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
|
383 |
__func__, vdev->host.domain, vdev->host.bus, |
384 |
vdev->host.slot, vdev->host.function); |
385 |
#endif
|
386 |
} |
387 |
|
388 |
static void vfio_update_irq(PCIDevice *pdev) |
389 |
{ |
390 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
391 |
PCIINTxRoute route; |
392 |
|
393 |
if (vdev->interrupt != VFIO_INT_INTx) {
|
394 |
return;
|
395 |
} |
396 |
|
397 |
route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); |
398 |
|
399 |
if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
|
400 |
return; /* Nothing changed */ |
401 |
} |
402 |
|
403 |
DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
|
404 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
405 |
vdev->host.function, vdev->intx.route.irq, route.irq); |
406 |
|
407 |
vfio_disable_intx_kvm(vdev); |
408 |
|
409 |
vdev->intx.route = route; |
410 |
|
411 |
if (route.mode != PCI_INTX_ENABLED) {
|
412 |
return;
|
413 |
} |
414 |
|
415 |
vfio_enable_intx_kvm(vdev); |
416 |
|
417 |
/* Re-enable the interrupt in cased we missed an EOI */
|
418 |
vfio_eoi(vdev); |
419 |
} |
420 |
|
421 |
static int vfio_enable_intx(VFIODevice *vdev) |
422 |
{ |
423 |
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
|
424 |
int ret, argsz;
|
425 |
struct vfio_irq_set *irq_set;
|
426 |
int32_t *pfd; |
427 |
|
428 |
if (!pin) {
|
429 |
return 0; |
430 |
} |
431 |
|
432 |
vfio_disable_interrupts(vdev); |
433 |
|
434 |
vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ |
435 |
|
436 |
#ifdef CONFIG_KVM
|
437 |
/*
|
438 |
* Only conditional to avoid generating error messages on platforms
|
439 |
* where we won't actually use the result anyway.
|
440 |
*/
|
441 |
if (kvm_irqfds_enabled() &&
|
442 |
kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) { |
443 |
vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
444 |
vdev->intx.pin); |
445 |
} |
446 |
#endif
|
447 |
|
448 |
ret = event_notifier_init(&vdev->intx.interrupt, 0);
|
449 |
if (ret) {
|
450 |
error_report("vfio: Error: event_notifier_init failed\n");
|
451 |
return ret;
|
452 |
} |
453 |
|
454 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
455 |
|
456 |
irq_set = g_malloc0(argsz); |
457 |
irq_set->argsz = argsz; |
458 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
459 |
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; |
460 |
irq_set->start = 0;
|
461 |
irq_set->count = 1;
|
462 |
pfd = (int32_t *)&irq_set->data; |
463 |
|
464 |
*pfd = event_notifier_get_fd(&vdev->intx.interrupt); |
465 |
qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
|
466 |
|
467 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
468 |
g_free(irq_set); |
469 |
if (ret) {
|
470 |
error_report("vfio: Error: Failed to setup INTx fd: %m\n");
|
471 |
qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
472 |
event_notifier_cleanup(&vdev->intx.interrupt); |
473 |
return -errno;
|
474 |
} |
475 |
|
476 |
vfio_enable_intx_kvm(vdev); |
477 |
|
478 |
vdev->interrupt = VFIO_INT_INTx; |
479 |
|
480 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
481 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
482 |
|
483 |
return 0; |
484 |
} |
485 |
|
486 |
static void vfio_disable_intx(VFIODevice *vdev) |
487 |
{ |
488 |
int fd;
|
489 |
|
490 |
qemu_del_timer(vdev->intx.mmap_timer); |
491 |
vfio_disable_intx_kvm(vdev); |
492 |
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX); |
493 |
vdev->intx.pending = false;
|
494 |
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
|
495 |
vfio_mmap_set_enabled(vdev, true);
|
496 |
|
497 |
fd = event_notifier_get_fd(&vdev->intx.interrupt); |
498 |
qemu_set_fd_handler(fd, NULL, NULL, vdev); |
499 |
event_notifier_cleanup(&vdev->intx.interrupt); |
500 |
|
501 |
vdev->interrupt = VFIO_INT_NONE; |
502 |
|
503 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
504 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
505 |
} |
506 |
|
507 |
/*
|
508 |
* MSI/X
|
509 |
*/
|
510 |
static void vfio_msi_interrupt(void *opaque) |
511 |
{ |
512 |
VFIOMSIVector *vector = opaque; |
513 |
VFIODevice *vdev = vector->vdev; |
514 |
int nr = vector - vdev->msi_vectors;
|
515 |
|
516 |
if (!event_notifier_test_and_clear(&vector->interrupt)) {
|
517 |
return;
|
518 |
} |
519 |
|
520 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
|
521 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
522 |
vdev->host.function, nr); |
523 |
|
524 |
if (vdev->interrupt == VFIO_INT_MSIX) {
|
525 |
msix_notify(&vdev->pdev, nr); |
526 |
} else if (vdev->interrupt == VFIO_INT_MSI) { |
527 |
msi_notify(&vdev->pdev, nr); |
528 |
} else {
|
529 |
error_report("vfio: MSI interrupt receieved, but not enabled?\n");
|
530 |
} |
531 |
} |
532 |
|
533 |
static int vfio_enable_vectors(VFIODevice *vdev, bool msix) |
534 |
{ |
535 |
struct vfio_irq_set *irq_set;
|
536 |
int ret = 0, i, argsz; |
537 |
int32_t *fds; |
538 |
|
539 |
argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); |
540 |
|
541 |
irq_set = g_malloc0(argsz); |
542 |
irq_set->argsz = argsz; |
543 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; |
544 |
irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; |
545 |
irq_set->start = 0;
|
546 |
irq_set->count = vdev->nr_vectors; |
547 |
fds = (int32_t *)&irq_set->data; |
548 |
|
549 |
for (i = 0; i < vdev->nr_vectors; i++) { |
550 |
if (!vdev->msi_vectors[i].use) {
|
551 |
fds[i] = -1;
|
552 |
continue;
|
553 |
} |
554 |
|
555 |
fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); |
556 |
} |
557 |
|
558 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
559 |
|
560 |
g_free(irq_set); |
561 |
|
562 |
return ret;
|
563 |
} |
564 |
|
565 |
static int vfio_msix_vector_use(PCIDevice *pdev, |
566 |
unsigned int nr, MSIMessage msg) |
567 |
{ |
568 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
569 |
VFIOMSIVector *vector; |
570 |
int ret;
|
571 |
|
572 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
|
573 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
574 |
vdev->host.function, nr); |
575 |
|
576 |
vector = &vdev->msi_vectors[nr]; |
577 |
vector->vdev = vdev; |
578 |
vector->use = true;
|
579 |
|
580 |
msix_vector_use(pdev, nr); |
581 |
|
582 |
if (event_notifier_init(&vector->interrupt, 0)) { |
583 |
error_report("vfio: Error: event_notifier_init failed\n");
|
584 |
} |
585 |
|
586 |
/*
|
587 |
* Attempt to enable route through KVM irqchip,
|
588 |
* default to userspace handling if unavailable.
|
589 |
*/
|
590 |
vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg); |
591 |
if (vector->virq < 0 || |
592 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
593 |
vector->virq) < 0) {
|
594 |
if (vector->virq >= 0) { |
595 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
596 |
vector->virq = -1;
|
597 |
} |
598 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
599 |
vfio_msi_interrupt, NULL, vector);
|
600 |
} |
601 |
|
602 |
/*
|
603 |
* We don't want to have the host allocate all possible MSI vectors
|
604 |
* for a device if they're not in use, so we shutdown and incrementally
|
605 |
* increase them as needed.
|
606 |
*/
|
607 |
if (vdev->nr_vectors < nr + 1) { |
608 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
609 |
vdev->nr_vectors = nr + 1;
|
610 |
ret = vfio_enable_vectors(vdev, true);
|
611 |
if (ret) {
|
612 |
error_report("vfio: failed to enable vectors, %d\n", ret);
|
613 |
} |
614 |
} else {
|
615 |
int argsz;
|
616 |
struct vfio_irq_set *irq_set;
|
617 |
int32_t *pfd; |
618 |
|
619 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
620 |
|
621 |
irq_set = g_malloc0(argsz); |
622 |
irq_set->argsz = argsz; |
623 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
624 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
625 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
626 |
irq_set->start = nr; |
627 |
irq_set->count = 1;
|
628 |
pfd = (int32_t *)&irq_set->data; |
629 |
|
630 |
*pfd = event_notifier_get_fd(&vector->interrupt); |
631 |
|
632 |
ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
633 |
g_free(irq_set); |
634 |
if (ret) {
|
635 |
error_report("vfio: failed to modify vector, %d\n", ret);
|
636 |
} |
637 |
} |
638 |
|
639 |
return 0; |
640 |
} |
641 |
|
642 |
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
643 |
{ |
644 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
645 |
VFIOMSIVector *vector = &vdev->msi_vectors[nr]; |
646 |
int argsz;
|
647 |
struct vfio_irq_set *irq_set;
|
648 |
int32_t *pfd; |
649 |
|
650 |
DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
|
651 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
652 |
vdev->host.function, nr); |
653 |
|
654 |
/*
|
655 |
* XXX What's the right thing to do here? This turns off the interrupt
|
656 |
* completely, but do we really just want to switch the interrupt to
|
657 |
* bouncing through userspace and let msix.c drop it? Not sure.
|
658 |
*/
|
659 |
msix_vector_unuse(pdev, nr); |
660 |
|
661 |
argsz = sizeof(*irq_set) + sizeof(*pfd); |
662 |
|
663 |
irq_set = g_malloc0(argsz); |
664 |
irq_set->argsz = argsz; |
665 |
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | |
666 |
VFIO_IRQ_SET_ACTION_TRIGGER; |
667 |
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; |
668 |
irq_set->start = nr; |
669 |
irq_set->count = 1;
|
670 |
pfd = (int32_t *)&irq_set->data; |
671 |
|
672 |
*pfd = -1;
|
673 |
|
674 |
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set); |
675 |
|
676 |
g_free(irq_set); |
677 |
|
678 |
if (vector->virq < 0) { |
679 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
680 |
NULL, NULL, NULL); |
681 |
} else {
|
682 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
683 |
vector->virq); |
684 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
685 |
vector->virq = -1;
|
686 |
} |
687 |
|
688 |
event_notifier_cleanup(&vector->interrupt); |
689 |
vector->use = false;
|
690 |
} |
691 |
|
692 |
static void vfio_enable_msix(VFIODevice *vdev) |
693 |
{ |
694 |
vfio_disable_interrupts(vdev); |
695 |
|
696 |
vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
|
697 |
|
698 |
vdev->interrupt = VFIO_INT_MSIX; |
699 |
|
700 |
if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
|
701 |
vfio_msix_vector_release)) { |
702 |
error_report("vfio: msix_set_vector_notifiers failed\n");
|
703 |
} |
704 |
|
705 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
706 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
707 |
} |
708 |
|
709 |
static void vfio_enable_msi(VFIODevice *vdev) |
710 |
{ |
711 |
int ret, i;
|
712 |
|
713 |
vfio_disable_interrupts(vdev); |
714 |
|
715 |
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); |
716 |
retry:
|
717 |
vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
|
718 |
|
719 |
for (i = 0; i < vdev->nr_vectors; i++) { |
720 |
MSIMessage msg; |
721 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
722 |
|
723 |
vector->vdev = vdev; |
724 |
vector->use = true;
|
725 |
|
726 |
if (event_notifier_init(&vector->interrupt, 0)) { |
727 |
error_report("vfio: Error: event_notifier_init failed\n");
|
728 |
} |
729 |
|
730 |
msg = msi_get_message(&vdev->pdev, i); |
731 |
|
732 |
/*
|
733 |
* Attempt to enable route through KVM irqchip,
|
734 |
* default to userspace handling if unavailable.
|
735 |
*/
|
736 |
vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg); |
737 |
if (vector->virq < 0 || |
738 |
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, |
739 |
vector->virq) < 0) {
|
740 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
741 |
vfio_msi_interrupt, NULL, vector);
|
742 |
} |
743 |
} |
744 |
|
745 |
ret = vfio_enable_vectors(vdev, false);
|
746 |
if (ret) {
|
747 |
if (ret < 0) { |
748 |
error_report("vfio: Error: Failed to setup MSI fds: %m\n");
|
749 |
} else if (ret != vdev->nr_vectors) { |
750 |
error_report("vfio: Error: Failed to enable %d "
|
751 |
"MSI vectors, retry with %d\n", vdev->nr_vectors, ret);
|
752 |
} |
753 |
|
754 |
for (i = 0; i < vdev->nr_vectors; i++) { |
755 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
756 |
if (vector->virq >= 0) { |
757 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, |
758 |
vector->virq); |
759 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
760 |
vector->virq = -1;
|
761 |
} else {
|
762 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
763 |
NULL, NULL, NULL); |
764 |
} |
765 |
event_notifier_cleanup(&vector->interrupt); |
766 |
} |
767 |
|
768 |
g_free(vdev->msi_vectors); |
769 |
|
770 |
if (ret > 0 && ret != vdev->nr_vectors) { |
771 |
vdev->nr_vectors = ret; |
772 |
goto retry;
|
773 |
} |
774 |
vdev->nr_vectors = 0;
|
775 |
|
776 |
return;
|
777 |
} |
778 |
|
779 |
vdev->interrupt = VFIO_INT_MSI; |
780 |
|
781 |
DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
|
782 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
783 |
vdev->host.function, vdev->nr_vectors); |
784 |
} |
785 |
|
786 |
static void vfio_disable_msi_common(VFIODevice *vdev) |
787 |
{ |
788 |
g_free(vdev->msi_vectors); |
789 |
vdev->msi_vectors = NULL;
|
790 |
vdev->nr_vectors = 0;
|
791 |
vdev->interrupt = VFIO_INT_NONE; |
792 |
|
793 |
vfio_enable_intx(vdev); |
794 |
} |
795 |
|
796 |
static void vfio_disable_msix(VFIODevice *vdev) |
797 |
{ |
798 |
msix_unset_vector_notifiers(&vdev->pdev); |
799 |
|
800 |
if (vdev->nr_vectors) {
|
801 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); |
802 |
} |
803 |
|
804 |
vfio_disable_msi_common(vdev); |
805 |
|
806 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
807 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
808 |
} |
809 |
|
810 |
static void vfio_disable_msi(VFIODevice *vdev) |
811 |
{ |
812 |
int i;
|
813 |
|
814 |
vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX); |
815 |
|
816 |
for (i = 0; i < vdev->nr_vectors; i++) { |
817 |
VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
818 |
|
819 |
if (!vector->use) {
|
820 |
continue;
|
821 |
} |
822 |
|
823 |
if (vector->virq >= 0) { |
824 |
kvm_irqchip_remove_irqfd_notifier(kvm_state, |
825 |
&vector->interrupt, vector->virq); |
826 |
kvm_irqchip_release_virq(kvm_state, vector->virq); |
827 |
vector->virq = -1;
|
828 |
} else {
|
829 |
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
830 |
NULL, NULL, NULL); |
831 |
} |
832 |
|
833 |
event_notifier_cleanup(&vector->interrupt); |
834 |
} |
835 |
|
836 |
vfio_disable_msi_common(vdev); |
837 |
|
838 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
839 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
840 |
} |
841 |
|
842 |
/*
|
843 |
* IO Port/MMIO - Beware of the endians, VFIO is always little endian
|
844 |
*/
|
845 |
static void vfio_bar_write(void *opaque, hwaddr addr, |
846 |
uint64_t data, unsigned size)
|
847 |
{ |
848 |
VFIOBAR *bar = opaque; |
849 |
union {
|
850 |
uint8_t byte; |
851 |
uint16_t word; |
852 |
uint32_t dword; |
853 |
uint64_t qword; |
854 |
} buf; |
855 |
|
856 |
switch (size) {
|
857 |
case 1: |
858 |
buf.byte = data; |
859 |
break;
|
860 |
case 2: |
861 |
buf.word = cpu_to_le16(data); |
862 |
break;
|
863 |
case 4: |
864 |
buf.dword = cpu_to_le32(data); |
865 |
break;
|
866 |
default:
|
867 |
hw_error("vfio: unsupported write size, %d bytes\n", size);
|
868 |
break;
|
869 |
} |
870 |
|
871 |
if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
872 |
error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m\n", |
873 |
__func__, addr, data, size); |
874 |
} |
875 |
|
876 |
DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n", |
877 |
__func__, bar->nr, addr, data, size); |
878 |
|
879 |
/*
|
880 |
* A read or write to a BAR always signals an INTx EOI. This will
|
881 |
* do nothing if not pending (including not in INTx mode). We assume
|
882 |
* that a BAR access is in response to an interrupt and that BAR
|
883 |
* accesses will service the interrupt. Unfortunately, we don't know
|
884 |
* which access will service the interrupt, so we're potentially
|
885 |
* getting quite a few host interrupts per guest interrupt.
|
886 |
*/
|
887 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
888 |
} |
889 |
|
890 |
static uint64_t vfio_bar_read(void *opaque, |
891 |
hwaddr addr, unsigned size)
|
892 |
{ |
893 |
VFIOBAR *bar = opaque; |
894 |
union {
|
895 |
uint8_t byte; |
896 |
uint16_t word; |
897 |
uint32_t dword; |
898 |
uint64_t qword; |
899 |
} buf; |
900 |
uint64_t data = 0;
|
901 |
|
902 |
if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
|
903 |
error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m\n", |
904 |
__func__, addr, size); |
905 |
return (uint64_t)-1; |
906 |
} |
907 |
|
908 |
switch (size) {
|
909 |
case 1: |
910 |
data = buf.byte; |
911 |
break;
|
912 |
case 2: |
913 |
data = le16_to_cpu(buf.word); |
914 |
break;
|
915 |
case 4: |
916 |
data = le32_to_cpu(buf.dword); |
917 |
break;
|
918 |
default:
|
919 |
hw_error("vfio: unsupported read size, %d bytes\n", size);
|
920 |
break;
|
921 |
} |
922 |
|
923 |
DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n", |
924 |
__func__, bar->nr, addr, size, data); |
925 |
|
926 |
/* Same as write above */
|
927 |
vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); |
928 |
|
929 |
return data;
|
930 |
} |
931 |
|
932 |
static const MemoryRegionOps vfio_bar_ops = { |
933 |
.read = vfio_bar_read, |
934 |
.write = vfio_bar_write, |
935 |
.endianness = DEVICE_LITTLE_ENDIAN, |
936 |
}; |
937 |
|
938 |
/*
|
939 |
* PCI config space
|
940 |
*/
|
941 |
static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) |
942 |
{ |
943 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
944 |
uint32_t val = 0;
|
945 |
|
946 |
/*
|
947 |
* We only need QEMU PCI config support for the ROM BAR, the MSI and MSIX
|
948 |
* capabilities, and the multifunction bit below. We let VFIO handle
|
949 |
* virtualizing everything else. Performance is not a concern here.
|
950 |
*/
|
951 |
if (ranges_overlap(addr, len, PCI_ROM_ADDRESS, 4) || |
952 |
(pdev->cap_present & QEMU_PCI_CAP_MSIX && |
953 |
ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) || |
954 |
(pdev->cap_present & QEMU_PCI_CAP_MSI && |
955 |
ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size))) { |
956 |
|
957 |
val = pci_default_read_config(pdev, addr, len); |
958 |
} else {
|
959 |
if (pread(vdev->fd, &val, len, vdev->config_offset + addr) != len) {
|
960 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m\n",
|
961 |
__func__, vdev->host.domain, vdev->host.bus, |
962 |
vdev->host.slot, vdev->host.function, addr, len); |
963 |
return -errno;
|
964 |
} |
965 |
val = le32_to_cpu(val); |
966 |
} |
967 |
|
968 |
/* Multifunction bit is virualized in QEMU */
|
969 |
if (unlikely(ranges_overlap(addr, len, PCI_HEADER_TYPE, 1))) { |
970 |
uint32_t mask = PCI_HEADER_TYPE_MULTI_FUNCTION; |
971 |
|
972 |
if (len == 4) { |
973 |
mask <<= 16;
|
974 |
} |
975 |
|
976 |
if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
|
977 |
val |= mask; |
978 |
} else {
|
979 |
val &= ~mask; |
980 |
} |
981 |
} |
982 |
|
983 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
|
984 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
985 |
vdev->host.function, addr, len, val); |
986 |
|
987 |
return val;
|
988 |
} |
989 |
|
990 |
static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, |
991 |
uint32_t val, int len)
|
992 |
{ |
993 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
994 |
uint32_t val_le = cpu_to_le32(val); |
995 |
|
996 |
DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
|
997 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
998 |
vdev->host.function, addr, val, len); |
999 |
|
1000 |
/* Write everything to VFIO, let it filter out what we can't write */
|
1001 |
if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
|
1002 |
error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m\n",
|
1003 |
__func__, vdev->host.domain, vdev->host.bus, |
1004 |
vdev->host.slot, vdev->host.function, addr, val, len); |
1005 |
} |
1006 |
|
1007 |
/* Write standard header bits to emulation */
|
1008 |
if (addr < PCI_CONFIG_HEADER_SIZE) {
|
1009 |
pci_default_write_config(pdev, addr, val, len); |
1010 |
return;
|
1011 |
} |
1012 |
|
1013 |
/* MSI/MSI-X Enabling/Disabling */
|
1014 |
if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
|
1015 |
ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { |
1016 |
int is_enabled, was_enabled = msi_enabled(pdev);
|
1017 |
|
1018 |
pci_default_write_config(pdev, addr, val, len); |
1019 |
|
1020 |
is_enabled = msi_enabled(pdev); |
1021 |
|
1022 |
if (!was_enabled && is_enabled) {
|
1023 |
vfio_enable_msi(vdev); |
1024 |
} else if (was_enabled && !is_enabled) { |
1025 |
vfio_disable_msi(vdev); |
1026 |
} |
1027 |
} |
1028 |
|
1029 |
if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
|
1030 |
ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
1031 |
int is_enabled, was_enabled = msix_enabled(pdev);
|
1032 |
|
1033 |
pci_default_write_config(pdev, addr, val, len); |
1034 |
|
1035 |
is_enabled = msix_enabled(pdev); |
1036 |
|
1037 |
if (!was_enabled && is_enabled) {
|
1038 |
vfio_enable_msix(vdev); |
1039 |
} else if (was_enabled && !is_enabled) { |
1040 |
vfio_disable_msix(vdev); |
1041 |
} |
1042 |
} |
1043 |
} |
1044 |
|
1045 |
/*
|
1046 |
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
|
1047 |
*/
|
1048 |
static int vfio_dma_unmap(VFIOContainer *container, |
1049 |
hwaddr iova, ram_addr_t size) |
1050 |
{ |
1051 |
struct vfio_iommu_type1_dma_unmap unmap = {
|
1052 |
.argsz = sizeof(unmap),
|
1053 |
.flags = 0,
|
1054 |
.iova = iova, |
1055 |
.size = size, |
1056 |
}; |
1057 |
|
1058 |
if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
1059 |
DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
|
1060 |
return -errno;
|
1061 |
} |
1062 |
|
1063 |
return 0; |
1064 |
} |
1065 |
|
1066 |
static int vfio_dma_map(VFIOContainer *container, hwaddr iova, |
1067 |
ram_addr_t size, void *vaddr, bool readonly) |
1068 |
{ |
1069 |
struct vfio_iommu_type1_dma_map map = {
|
1070 |
.argsz = sizeof(map),
|
1071 |
.flags = VFIO_DMA_MAP_FLAG_READ, |
1072 |
.vaddr = (__u64)(uintptr_t)vaddr, |
1073 |
.iova = iova, |
1074 |
.size = size, |
1075 |
}; |
1076 |
|
1077 |
if (!readonly) {
|
1078 |
map.flags |= VFIO_DMA_MAP_FLAG_WRITE; |
1079 |
} |
1080 |
|
1081 |
/*
|
1082 |
* Try the mapping, if it fails with EBUSY, unmap the region and try
|
1083 |
* again. This shouldn't be necessary, but we sometimes see it in
|
1084 |
* the the VGA ROM space.
|
1085 |
*/
|
1086 |
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || |
1087 |
(errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
|
1088 |
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
|
1089 |
return 0; |
1090 |
} |
1091 |
|
1092 |
DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
|
1093 |
return -errno;
|
1094 |
} |
1095 |
|
1096 |
static bool vfio_listener_skipped_section(MemoryRegionSection *section) |
1097 |
{ |
1098 |
return !memory_region_is_ram(section->mr);
|
1099 |
} |
1100 |
|
1101 |
static void vfio_listener_region_add(MemoryListener *listener, |
1102 |
MemoryRegionSection *section) |
1103 |
{ |
1104 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1105 |
iommu_data.listener); |
1106 |
hwaddr iova, end; |
1107 |
void *vaddr;
|
1108 |
int ret;
|
1109 |
|
1110 |
if (vfio_listener_skipped_section(section)) {
|
1111 |
DPRINTF("vfio: SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n", |
1112 |
section->offset_within_address_space, |
1113 |
section->offset_within_address_space + section->size - 1);
|
1114 |
return;
|
1115 |
} |
1116 |
|
1117 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1118 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1119 |
error_report("%s received unaligned region\n", __func__);
|
1120 |
return;
|
1121 |
} |
1122 |
|
1123 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
1124 |
end = (section->offset_within_address_space + section->size) & |
1125 |
TARGET_PAGE_MASK; |
1126 |
|
1127 |
if (iova >= end) {
|
1128 |
return;
|
1129 |
} |
1130 |
|
1131 |
vaddr = memory_region_get_ram_ptr(section->mr) + |
1132 |
section->offset_within_region + |
1133 |
(iova - section->offset_within_address_space); |
1134 |
|
1135 |
DPRINTF("vfio: region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n", |
1136 |
iova, end - 1, vaddr);
|
1137 |
|
1138 |
ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly); |
1139 |
if (ret) {
|
1140 |
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " |
1141 |
"0x%"HWADDR_PRIx", %p) = %d (%m)\n", |
1142 |
container, iova, end - iova, vaddr, ret); |
1143 |
} |
1144 |
} |
1145 |
|
1146 |
static void vfio_listener_region_del(MemoryListener *listener, |
1147 |
MemoryRegionSection *section) |
1148 |
{ |
1149 |
VFIOContainer *container = container_of(listener, VFIOContainer, |
1150 |
iommu_data.listener); |
1151 |
hwaddr iova, end; |
1152 |
int ret;
|
1153 |
|
1154 |
if (vfio_listener_skipped_section(section)) {
|
1155 |
DPRINTF("vfio: SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", |
1156 |
section->offset_within_address_space, |
1157 |
section->offset_within_address_space + section->size - 1);
|
1158 |
return;
|
1159 |
} |
1160 |
|
1161 |
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
1162 |
(section->offset_within_region & ~TARGET_PAGE_MASK))) { |
1163 |
error_report("%s received unaligned region\n", __func__);
|
1164 |
return;
|
1165 |
} |
1166 |
|
1167 |
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); |
1168 |
end = (section->offset_within_address_space + section->size) & |
1169 |
TARGET_PAGE_MASK; |
1170 |
|
1171 |
if (iova >= end) {
|
1172 |
return;
|
1173 |
} |
1174 |
|
1175 |
DPRINTF("vfio: region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
1176 |
iova, end - 1);
|
1177 |
|
1178 |
ret = vfio_dma_unmap(container, iova, end - iova); |
1179 |
if (ret) {
|
1180 |
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " |
1181 |
"0x%"HWADDR_PRIx") = %d (%m)\n", |
1182 |
container, iova, end - iova, ret); |
1183 |
} |
1184 |
} |
1185 |
|
1186 |
static MemoryListener vfio_memory_listener = {
|
1187 |
.region_add = vfio_listener_region_add, |
1188 |
.region_del = vfio_listener_region_del, |
1189 |
}; |
1190 |
|
1191 |
static void vfio_listener_release(VFIOContainer *container) |
1192 |
{ |
1193 |
memory_listener_unregister(&container->iommu_data.listener); |
1194 |
} |
1195 |
|
1196 |
/*
|
1197 |
* Interrupt setup
|
1198 |
*/
|
1199 |
static void vfio_disable_interrupts(VFIODevice *vdev) |
1200 |
{ |
1201 |
switch (vdev->interrupt) {
|
1202 |
case VFIO_INT_INTx:
|
1203 |
vfio_disable_intx(vdev); |
1204 |
break;
|
1205 |
case VFIO_INT_MSI:
|
1206 |
vfio_disable_msi(vdev); |
1207 |
break;
|
1208 |
case VFIO_INT_MSIX:
|
1209 |
vfio_disable_msix(vdev); |
1210 |
break;
|
1211 |
} |
1212 |
} |
1213 |
|
1214 |
static int vfio_setup_msi(VFIODevice *vdev, int pos) |
1215 |
{ |
1216 |
uint16_t ctrl; |
1217 |
bool msi_64bit, msi_maskbit;
|
1218 |
int ret, entries;
|
1219 |
|
1220 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
1221 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
1222 |
return -errno;
|
1223 |
} |
1224 |
ctrl = le16_to_cpu(ctrl); |
1225 |
|
1226 |
msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); |
1227 |
msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); |
1228 |
entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); |
1229 |
|
1230 |
DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
|
1231 |
vdev->host.bus, vdev->host.slot, vdev->host.function, pos); |
1232 |
|
1233 |
ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); |
1234 |
if (ret < 0) { |
1235 |
if (ret == -ENOTSUP) {
|
1236 |
return 0; |
1237 |
} |
1238 |
error_report("vfio: msi_init failed\n");
|
1239 |
return ret;
|
1240 |
} |
1241 |
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); |
1242 |
|
1243 |
return 0; |
1244 |
} |
1245 |
|
1246 |
/*
|
1247 |
* We don't have any control over how pci_add_capability() inserts
|
1248 |
* capabilities into the chain. In order to setup MSI-X we need a
|
1249 |
* MemoryRegion for the BAR. In order to setup the BAR and not
|
1250 |
* attempt to mmap the MSI-X table area, which VFIO won't allow, we
|
1251 |
* need to first look for where the MSI-X table lives. So we
|
1252 |
* unfortunately split MSI-X setup across two functions.
|
1253 |
*/
|
1254 |
static int vfio_early_setup_msix(VFIODevice *vdev) |
1255 |
{ |
1256 |
uint8_t pos; |
1257 |
uint16_t ctrl; |
1258 |
uint32_t table, pba; |
1259 |
|
1260 |
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); |
1261 |
if (!pos) {
|
1262 |
return 0; |
1263 |
} |
1264 |
|
1265 |
if (pread(vdev->fd, &ctrl, sizeof(ctrl), |
1266 |
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
|
1267 |
return -errno;
|
1268 |
} |
1269 |
|
1270 |
if (pread(vdev->fd, &table, sizeof(table), |
1271 |
vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
|
1272 |
return -errno;
|
1273 |
} |
1274 |
|
1275 |
if (pread(vdev->fd, &pba, sizeof(pba), |
1276 |
vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
|
1277 |
return -errno;
|
1278 |
} |
1279 |
|
1280 |
ctrl = le16_to_cpu(ctrl); |
1281 |
table = le32_to_cpu(table); |
1282 |
pba = le32_to_cpu(pba); |
1283 |
|
1284 |
vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
|
1285 |
vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; |
1286 |
vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; |
1287 |
vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; |
1288 |
vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; |
1289 |
vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
|
1290 |
|
1291 |
DPRINTF("%04x:%02x:%02x.%x "
|
1292 |
"PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
|
1293 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1294 |
vdev->host.function, pos, vdev->msix->table_bar, |
1295 |
vdev->msix->table_offset, vdev->msix->entries); |
1296 |
|
1297 |
return 0; |
1298 |
} |
1299 |
|
1300 |
static int vfio_setup_msix(VFIODevice *vdev, int pos) |
1301 |
{ |
1302 |
int ret;
|
1303 |
|
1304 |
ret = msix_init(&vdev->pdev, vdev->msix->entries, |
1305 |
&vdev->bars[vdev->msix->table_bar].mem, |
1306 |
vdev->msix->table_bar, vdev->msix->table_offset, |
1307 |
&vdev->bars[vdev->msix->pba_bar].mem, |
1308 |
vdev->msix->pba_bar, vdev->msix->pba_offset, pos); |
1309 |
if (ret < 0) { |
1310 |
if (ret == -ENOTSUP) {
|
1311 |
return 0; |
1312 |
} |
1313 |
error_report("vfio: msix_init failed\n");
|
1314 |
return ret;
|
1315 |
} |
1316 |
|
1317 |
return 0; |
1318 |
} |
1319 |
|
1320 |
static void vfio_teardown_msi(VFIODevice *vdev) |
1321 |
{ |
1322 |
msi_uninit(&vdev->pdev); |
1323 |
|
1324 |
if (vdev->msix) {
|
1325 |
msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem, |
1326 |
&vdev->bars[vdev->msix->pba_bar].mem); |
1327 |
} |
1328 |
} |
1329 |
|
1330 |
/*
|
1331 |
* Resource setup
|
1332 |
*/
|
1333 |
static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled) |
1334 |
{ |
1335 |
int i;
|
1336 |
|
1337 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
1338 |
VFIOBAR *bar = &vdev->bars[i]; |
1339 |
|
1340 |
if (!bar->size) {
|
1341 |
continue;
|
1342 |
} |
1343 |
|
1344 |
memory_region_set_enabled(&bar->mmap_mem, enabled); |
1345 |
if (vdev->msix && vdev->msix->table_bar == i) {
|
1346 |
memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); |
1347 |
} |
1348 |
} |
1349 |
} |
1350 |
|
1351 |
static void vfio_unmap_bar(VFIODevice *vdev, int nr) |
1352 |
{ |
1353 |
VFIOBAR *bar = &vdev->bars[nr]; |
1354 |
|
1355 |
if (!bar->size) {
|
1356 |
return;
|
1357 |
} |
1358 |
|
1359 |
memory_region_del_subregion(&bar->mem, &bar->mmap_mem); |
1360 |
munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); |
1361 |
|
1362 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
1363 |
memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); |
1364 |
munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); |
1365 |
} |
1366 |
|
1367 |
memory_region_destroy(&bar->mem); |
1368 |
} |
1369 |
|
1370 |
static int vfio_mmap_bar(VFIOBAR *bar, MemoryRegion *mem, MemoryRegion *submem, |
1371 |
void **map, size_t size, off_t offset,
|
1372 |
const char *name) |
1373 |
{ |
1374 |
int ret = 0; |
1375 |
|
1376 |
if (size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
|
1377 |
int prot = 0; |
1378 |
|
1379 |
if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
|
1380 |
prot |= PROT_READ; |
1381 |
} |
1382 |
|
1383 |
if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
|
1384 |
prot |= PROT_WRITE; |
1385 |
} |
1386 |
|
1387 |
*map = mmap(NULL, size, prot, MAP_SHARED,
|
1388 |
bar->fd, bar->fd_offset + offset); |
1389 |
if (*map == MAP_FAILED) {
|
1390 |
*map = NULL;
|
1391 |
ret = -errno; |
1392 |
goto empty_region;
|
1393 |
} |
1394 |
|
1395 |
memory_region_init_ram_ptr(submem, name, size, *map); |
1396 |
} else {
|
1397 |
empty_region:
|
1398 |
/* Create a zero sized sub-region to make cleanup easy. */
|
1399 |
memory_region_init(submem, name, 0);
|
1400 |
} |
1401 |
|
1402 |
memory_region_add_subregion(mem, offset, submem); |
1403 |
|
1404 |
return ret;
|
1405 |
} |
1406 |
|
1407 |
static void vfio_map_bar(VFIODevice *vdev, int nr) |
1408 |
{ |
1409 |
VFIOBAR *bar = &vdev->bars[nr]; |
1410 |
unsigned size = bar->size;
|
1411 |
char name[64]; |
1412 |
uint32_t pci_bar; |
1413 |
uint8_t type; |
1414 |
int ret;
|
1415 |
|
1416 |
/* Skip both unimplemented BARs and the upper half of 64bit BARS. */
|
1417 |
if (!size) {
|
1418 |
return;
|
1419 |
} |
1420 |
|
1421 |
snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", |
1422 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1423 |
vdev->host.function, nr); |
1424 |
|
1425 |
/* Determine what type of BAR this is for registration */
|
1426 |
ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
|
1427 |
vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
|
1428 |
if (ret != sizeof(pci_bar)) { |
1429 |
error_report("vfio: Failed to read BAR %d (%m)\n", nr);
|
1430 |
return;
|
1431 |
} |
1432 |
|
1433 |
pci_bar = le32_to_cpu(pci_bar); |
1434 |
type = pci_bar & (pci_bar & PCI_BASE_ADDRESS_SPACE_IO ? |
1435 |
~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK); |
1436 |
|
1437 |
/* A "slow" read/write mapping underlies all BARs */
|
1438 |
memory_region_init_io(&bar->mem, &vfio_bar_ops, bar, name, size); |
1439 |
pci_register_bar(&vdev->pdev, nr, type, &bar->mem); |
1440 |
|
1441 |
/*
|
1442 |
* We can't mmap areas overlapping the MSIX vector table, so we
|
1443 |
* potentially insert a direct-mapped subregion before and after it.
|
1444 |
*/
|
1445 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
1446 |
size = vdev->msix->table_offset & TARGET_PAGE_MASK; |
1447 |
} |
1448 |
|
1449 |
strncat(name, " mmap", sizeof(name) - strlen(name) - 1); |
1450 |
if (vfio_mmap_bar(bar, &bar->mem,
|
1451 |
&bar->mmap_mem, &bar->mmap, size, 0, name)) {
|
1452 |
error_report("%s unsupported. Performance may be slow\n", name);
|
1453 |
} |
1454 |
|
1455 |
if (vdev->msix && vdev->msix->table_bar == nr) {
|
1456 |
unsigned start;
|
1457 |
|
1458 |
start = TARGET_PAGE_ALIGN(vdev->msix->table_offset + |
1459 |
(vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); |
1460 |
|
1461 |
size = start < bar->size ? bar->size - start : 0;
|
1462 |
strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); |
1463 |
/* VFIOMSIXInfo contains another MemoryRegion for this mapping */
|
1464 |
if (vfio_mmap_bar(bar, &bar->mem, &vdev->msix->mmap_mem,
|
1465 |
&vdev->msix->mmap, size, start, name)) { |
1466 |
error_report("%s unsupported. Performance may be slow\n", name);
|
1467 |
} |
1468 |
} |
1469 |
} |
1470 |
|
1471 |
static void vfio_map_bars(VFIODevice *vdev) |
1472 |
{ |
1473 |
int i;
|
1474 |
|
1475 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
1476 |
vfio_map_bar(vdev, i); |
1477 |
} |
1478 |
} |
1479 |
|
1480 |
static void vfio_unmap_bars(VFIODevice *vdev) |
1481 |
{ |
1482 |
int i;
|
1483 |
|
1484 |
for (i = 0; i < PCI_ROM_SLOT; i++) { |
1485 |
vfio_unmap_bar(vdev, i); |
1486 |
} |
1487 |
} |
1488 |
|
1489 |
/*
|
1490 |
* General setup
|
1491 |
*/
|
1492 |
static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
|
1493 |
{ |
1494 |
uint8_t tmp, next = 0xff;
|
1495 |
|
1496 |
for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
|
1497 |
tmp = pdev->config[tmp + 1]) {
|
1498 |
if (tmp > pos && tmp < next) {
|
1499 |
next = tmp; |
1500 |
} |
1501 |
} |
1502 |
|
1503 |
return next - pos;
|
1504 |
} |
1505 |
|
1506 |
static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos) |
1507 |
{ |
1508 |
PCIDevice *pdev = &vdev->pdev; |
1509 |
uint8_t cap_id, next, size; |
1510 |
int ret;
|
1511 |
|
1512 |
cap_id = pdev->config[pos]; |
1513 |
next = pdev->config[pos + 1];
|
1514 |
|
1515 |
/*
|
1516 |
* If it becomes important to configure capabilities to their actual
|
1517 |
* size, use this as the default when it's something we don't recognize.
|
1518 |
* Since QEMU doesn't actually handle many of the config accesses,
|
1519 |
* exact size doesn't seem worthwhile.
|
1520 |
*/
|
1521 |
size = vfio_std_cap_max_size(pdev, pos); |
1522 |
|
1523 |
/*
|
1524 |
* pci_add_capability always inserts the new capability at the head
|
1525 |
* of the chain. Therefore to end up with a chain that matches the
|
1526 |
* physical device, we insert from the end by making this recursive.
|
1527 |
* This is also why we pre-caclulate size above as cached config space
|
1528 |
* will be changed as we unwind the stack.
|
1529 |
*/
|
1530 |
if (next) {
|
1531 |
ret = vfio_add_std_cap(vdev, next); |
1532 |
if (ret) {
|
1533 |
return ret;
|
1534 |
} |
1535 |
} else {
|
1536 |
pdev->config[PCI_CAPABILITY_LIST] = 0; /* Begin the rebuild */ |
1537 |
} |
1538 |
|
1539 |
switch (cap_id) {
|
1540 |
case PCI_CAP_ID_MSI:
|
1541 |
ret = vfio_setup_msi(vdev, pos); |
1542 |
break;
|
1543 |
case PCI_CAP_ID_MSIX:
|
1544 |
ret = vfio_setup_msix(vdev, pos); |
1545 |
break;
|
1546 |
default:
|
1547 |
ret = pci_add_capability(pdev, cap_id, pos, size); |
1548 |
break;
|
1549 |
} |
1550 |
|
1551 |
if (ret < 0) { |
1552 |
error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
|
1553 |
"0x%x[0x%x]@0x%x: %d\n", vdev->host.domain,
|
1554 |
vdev->host.bus, vdev->host.slot, vdev->host.function, |
1555 |
cap_id, size, pos, ret); |
1556 |
return ret;
|
1557 |
} |
1558 |
|
1559 |
return 0; |
1560 |
} |
1561 |
|
1562 |
static int vfio_add_capabilities(VFIODevice *vdev) |
1563 |
{ |
1564 |
PCIDevice *pdev = &vdev->pdev; |
1565 |
|
1566 |
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
|
1567 |
!pdev->config[PCI_CAPABILITY_LIST]) { |
1568 |
return 0; /* Nothing to add */ |
1569 |
} |
1570 |
|
1571 |
return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
|
1572 |
} |
1573 |
|
1574 |
static int vfio_load_rom(VFIODevice *vdev) |
1575 |
{ |
1576 |
uint64_t size = vdev->rom_size; |
1577 |
char name[32]; |
1578 |
off_t off = 0, voff = vdev->rom_offset;
|
1579 |
ssize_t bytes; |
1580 |
void *ptr;
|
1581 |
|
1582 |
/* If loading ROM from file, pci handles it */
|
1583 |
if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) {
|
1584 |
return 0; |
1585 |
} |
1586 |
|
1587 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
1588 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
1589 |
|
1590 |
snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", |
1591 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1592 |
vdev->host.function); |
1593 |
memory_region_init_ram(&vdev->pdev.rom, name, size); |
1594 |
ptr = memory_region_get_ram_ptr(&vdev->pdev.rom); |
1595 |
memset(ptr, 0xff, size);
|
1596 |
|
1597 |
while (size) {
|
1598 |
bytes = pread(vdev->fd, ptr + off, size, voff + off); |
1599 |
if (bytes == 0) { |
1600 |
break; /* expect that we could get back less than the ROM BAR */ |
1601 |
} else if (bytes > 0) { |
1602 |
off += bytes; |
1603 |
size -= bytes; |
1604 |
} else {
|
1605 |
if (errno == EINTR || errno == EAGAIN) {
|
1606 |
continue;
|
1607 |
} |
1608 |
error_report("vfio: Error reading device ROM: %m\n");
|
1609 |
memory_region_destroy(&vdev->pdev.rom); |
1610 |
return -errno;
|
1611 |
} |
1612 |
} |
1613 |
|
1614 |
pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom);
|
1615 |
vdev->pdev.has_rom = true;
|
1616 |
return 0; |
1617 |
} |
1618 |
|
1619 |
static int vfio_connect_container(VFIOGroup *group) |
1620 |
{ |
1621 |
VFIOContainer *container; |
1622 |
int ret, fd;
|
1623 |
|
1624 |
if (group->container) {
|
1625 |
return 0; |
1626 |
} |
1627 |
|
1628 |
QLIST_FOREACH(container, &container_list, next) { |
1629 |
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
|
1630 |
group->container = container; |
1631 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
1632 |
return 0; |
1633 |
} |
1634 |
} |
1635 |
|
1636 |
fd = qemu_open("/dev/vfio/vfio", O_RDWR);
|
1637 |
if (fd < 0) { |
1638 |
error_report("vfio: failed to open /dev/vfio/vfio: %m\n");
|
1639 |
return -errno;
|
1640 |
} |
1641 |
|
1642 |
ret = ioctl(fd, VFIO_GET_API_VERSION); |
1643 |
if (ret != VFIO_API_VERSION) {
|
1644 |
error_report("vfio: supported vfio version: %d, "
|
1645 |
"reported version: %d\n", VFIO_API_VERSION, ret);
|
1646 |
close(fd); |
1647 |
return -EINVAL;
|
1648 |
} |
1649 |
|
1650 |
container = g_malloc0(sizeof(*container));
|
1651 |
container->fd = fd; |
1652 |
|
1653 |
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
|
1654 |
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); |
1655 |
if (ret) {
|
1656 |
error_report("vfio: failed to set group container: %m\n");
|
1657 |
g_free(container); |
1658 |
close(fd); |
1659 |
return -errno;
|
1660 |
} |
1661 |
|
1662 |
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); |
1663 |
if (ret) {
|
1664 |
error_report("vfio: failed to set iommu for container: %m\n");
|
1665 |
g_free(container); |
1666 |
close(fd); |
1667 |
return -errno;
|
1668 |
} |
1669 |
|
1670 |
container->iommu_data.listener = vfio_memory_listener; |
1671 |
container->iommu_data.release = vfio_listener_release; |
1672 |
|
1673 |
memory_listener_register(&container->iommu_data.listener, &address_space_memory); |
1674 |
} else {
|
1675 |
error_report("vfio: No available IOMMU models\n");
|
1676 |
g_free(container); |
1677 |
close(fd); |
1678 |
return -EINVAL;
|
1679 |
} |
1680 |
|
1681 |
QLIST_INIT(&container->group_list); |
1682 |
QLIST_INSERT_HEAD(&container_list, container, next); |
1683 |
|
1684 |
group->container = container; |
1685 |
QLIST_INSERT_HEAD(&container->group_list, group, container_next); |
1686 |
|
1687 |
return 0; |
1688 |
} |
1689 |
|
1690 |
static void vfio_disconnect_container(VFIOGroup *group) |
1691 |
{ |
1692 |
VFIOContainer *container = group->container; |
1693 |
|
1694 |
if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
|
1695 |
error_report("vfio: error disconnecting group %d from container\n",
|
1696 |
group->groupid); |
1697 |
} |
1698 |
|
1699 |
QLIST_REMOVE(group, container_next); |
1700 |
group->container = NULL;
|
1701 |
|
1702 |
if (QLIST_EMPTY(&container->group_list)) {
|
1703 |
if (container->iommu_data.release) {
|
1704 |
container->iommu_data.release(container); |
1705 |
} |
1706 |
QLIST_REMOVE(container, next); |
1707 |
DPRINTF("vfio_disconnect_container: close container->fd\n");
|
1708 |
close(container->fd); |
1709 |
g_free(container); |
1710 |
} |
1711 |
} |
1712 |
|
1713 |
static VFIOGroup *vfio_get_group(int groupid) |
1714 |
{ |
1715 |
VFIOGroup *group; |
1716 |
char path[32]; |
1717 |
struct vfio_group_status status = { .argsz = sizeof(status) }; |
1718 |
|
1719 |
QLIST_FOREACH(group, &group_list, next) { |
1720 |
if (group->groupid == groupid) {
|
1721 |
return group;
|
1722 |
} |
1723 |
} |
1724 |
|
1725 |
group = g_malloc0(sizeof(*group));
|
1726 |
|
1727 |
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); |
1728 |
group->fd = qemu_open(path, O_RDWR); |
1729 |
if (group->fd < 0) { |
1730 |
error_report("vfio: error opening %s: %m\n", path);
|
1731 |
g_free(group); |
1732 |
return NULL; |
1733 |
} |
1734 |
|
1735 |
if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
|
1736 |
error_report("vfio: error getting group status: %m\n");
|
1737 |
close(group->fd); |
1738 |
g_free(group); |
1739 |
return NULL; |
1740 |
} |
1741 |
|
1742 |
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
|
1743 |
error_report("vfio: error, group %d is not viable, please ensure "
|
1744 |
"all devices within the iommu_group are bound to their "
|
1745 |
"vfio bus driver.\n", groupid);
|
1746 |
close(group->fd); |
1747 |
g_free(group); |
1748 |
return NULL; |
1749 |
} |
1750 |
|
1751 |
group->groupid = groupid; |
1752 |
QLIST_INIT(&group->device_list); |
1753 |
|
1754 |
if (vfio_connect_container(group)) {
|
1755 |
error_report("vfio: failed to setup container for group %d\n", groupid);
|
1756 |
close(group->fd); |
1757 |
g_free(group); |
1758 |
return NULL; |
1759 |
} |
1760 |
|
1761 |
QLIST_INSERT_HEAD(&group_list, group, next); |
1762 |
|
1763 |
return group;
|
1764 |
} |
1765 |
|
1766 |
static void vfio_put_group(VFIOGroup *group) |
1767 |
{ |
1768 |
if (!QLIST_EMPTY(&group->device_list)) {
|
1769 |
return;
|
1770 |
} |
1771 |
|
1772 |
vfio_disconnect_container(group); |
1773 |
QLIST_REMOVE(group, next); |
1774 |
DPRINTF("vfio_put_group: close group->fd\n");
|
1775 |
close(group->fd); |
1776 |
g_free(group); |
1777 |
} |
1778 |
|
1779 |
static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) |
1780 |
{ |
1781 |
struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; |
1782 |
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; |
1783 |
int ret, i;
|
1784 |
|
1785 |
ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); |
1786 |
if (ret < 0) { |
1787 |
error_report("vfio: error getting device %s from group %d: %m\n",
|
1788 |
name, group->groupid); |
1789 |
error_report("Verify all devices in group %d are bound to vfio-pci "
|
1790 |
"or pci-stub and not already in use\n", group->groupid);
|
1791 |
return ret;
|
1792 |
} |
1793 |
|
1794 |
vdev->fd = ret; |
1795 |
vdev->group = group; |
1796 |
QLIST_INSERT_HEAD(&group->device_list, vdev, next); |
1797 |
|
1798 |
/* Sanity check device */
|
1799 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); |
1800 |
if (ret) {
|
1801 |
error_report("vfio: error getting device info: %m\n");
|
1802 |
goto error;
|
1803 |
} |
1804 |
|
1805 |
DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
|
1806 |
dev_info.flags, dev_info.num_regions, dev_info.num_irqs); |
1807 |
|
1808 |
if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
|
1809 |
error_report("vfio: Um, this isn't a PCI device\n");
|
1810 |
goto error;
|
1811 |
} |
1812 |
|
1813 |
vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); |
1814 |
if (!vdev->reset_works) {
|
1815 |
error_report("Warning, device %s does not support reset\n", name);
|
1816 |
} |
1817 |
|
1818 |
if (dev_info.num_regions != VFIO_PCI_NUM_REGIONS) {
|
1819 |
error_report("vfio: unexpected number of io regions %u\n",
|
1820 |
dev_info.num_regions); |
1821 |
goto error;
|
1822 |
} |
1823 |
|
1824 |
if (dev_info.num_irqs != VFIO_PCI_NUM_IRQS) {
|
1825 |
error_report("vfio: unexpected number of irqs %u\n", dev_info.num_irqs);
|
1826 |
goto error;
|
1827 |
} |
1828 |
|
1829 |
for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
|
1830 |
reg_info.index = i; |
1831 |
|
1832 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
1833 |
if (ret) {
|
1834 |
error_report("vfio: Error getting region %d info: %m\n", i);
|
1835 |
goto error;
|
1836 |
} |
1837 |
|
1838 |
DPRINTF("Device %s region %d:\n", name, i);
|
1839 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
1840 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
1841 |
(unsigned long)reg_info.flags); |
1842 |
|
1843 |
vdev->bars[i].flags = reg_info.flags; |
1844 |
vdev->bars[i].size = reg_info.size; |
1845 |
vdev->bars[i].fd_offset = reg_info.offset; |
1846 |
vdev->bars[i].fd = vdev->fd; |
1847 |
vdev->bars[i].nr = i; |
1848 |
} |
1849 |
|
1850 |
reg_info.index = VFIO_PCI_ROM_REGION_INDEX; |
1851 |
|
1852 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
1853 |
if (ret) {
|
1854 |
error_report("vfio: Error getting ROM info: %m\n");
|
1855 |
goto error;
|
1856 |
} |
1857 |
|
1858 |
DPRINTF("Device %s ROM:\n", name);
|
1859 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
1860 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
1861 |
(unsigned long)reg_info.flags); |
1862 |
|
1863 |
vdev->rom_size = reg_info.size; |
1864 |
vdev->rom_offset = reg_info.offset; |
1865 |
|
1866 |
reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
1867 |
|
1868 |
ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
1869 |
if (ret) {
|
1870 |
error_report("vfio: Error getting config info: %m\n");
|
1871 |
goto error;
|
1872 |
} |
1873 |
|
1874 |
DPRINTF("Device %s config:\n", name);
|
1875 |
DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
|
1876 |
(unsigned long)reg_info.size, (unsigned long)reg_info.offset, |
1877 |
(unsigned long)reg_info.flags); |
1878 |
|
1879 |
vdev->config_size = reg_info.size; |
1880 |
vdev->config_offset = reg_info.offset; |
1881 |
|
1882 |
error:
|
1883 |
if (ret) {
|
1884 |
QLIST_REMOVE(vdev, next); |
1885 |
vdev->group = NULL;
|
1886 |
close(vdev->fd); |
1887 |
} |
1888 |
return ret;
|
1889 |
} |
1890 |
|
1891 |
static void vfio_put_device(VFIODevice *vdev) |
1892 |
{ |
1893 |
QLIST_REMOVE(vdev, next); |
1894 |
vdev->group = NULL;
|
1895 |
DPRINTF("vfio_put_device: close vdev->fd\n");
|
1896 |
close(vdev->fd); |
1897 |
if (vdev->msix) {
|
1898 |
g_free(vdev->msix); |
1899 |
vdev->msix = NULL;
|
1900 |
} |
1901 |
} |
1902 |
|
1903 |
static int vfio_initfn(PCIDevice *pdev) |
1904 |
{ |
1905 |
VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
1906 |
VFIOGroup *group; |
1907 |
char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
|
1908 |
ssize_t len; |
1909 |
struct stat st;
|
1910 |
int groupid;
|
1911 |
int ret;
|
1912 |
|
1913 |
/* Check that the host device exists */
|
1914 |
snprintf(path, sizeof(path),
|
1915 |
"/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
|
1916 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1917 |
vdev->host.function); |
1918 |
if (stat(path, &st) < 0) { |
1919 |
error_report("vfio: error: no such host device: %s\n", path);
|
1920 |
return -errno;
|
1921 |
} |
1922 |
|
1923 |
strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); |
1924 |
|
1925 |
len = readlink(path, iommu_group_path, PATH_MAX); |
1926 |
if (len <= 0) { |
1927 |
error_report("vfio: error no iommu_group for device\n");
|
1928 |
return -errno;
|
1929 |
} |
1930 |
|
1931 |
iommu_group_path[len] = 0;
|
1932 |
group_name = basename(iommu_group_path); |
1933 |
|
1934 |
if (sscanf(group_name, "%d", &groupid) != 1) { |
1935 |
error_report("vfio: error reading %s: %m\n", path);
|
1936 |
return -errno;
|
1937 |
} |
1938 |
|
1939 |
DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
|
1940 |
vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); |
1941 |
|
1942 |
group = vfio_get_group(groupid); |
1943 |
if (!group) {
|
1944 |
error_report("vfio: failed to get group %d\n", groupid);
|
1945 |
return -ENOENT;
|
1946 |
} |
1947 |
|
1948 |
snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", |
1949 |
vdev->host.domain, vdev->host.bus, vdev->host.slot, |
1950 |
vdev->host.function); |
1951 |
|
1952 |
QLIST_FOREACH(pvdev, &group->device_list, next) { |
1953 |
if (pvdev->host.domain == vdev->host.domain &&
|
1954 |
pvdev->host.bus == vdev->host.bus && |
1955 |
pvdev->host.slot == vdev->host.slot && |
1956 |
pvdev->host.function == vdev->host.function) { |
1957 |
|
1958 |
error_report("vfio: error: device %s is already attached\n", path);
|
1959 |
vfio_put_group(group); |
1960 |
return -EBUSY;
|
1961 |
} |
1962 |
} |
1963 |
|
1964 |
ret = vfio_get_device(group, path, vdev); |
1965 |
if (ret) {
|
1966 |
error_report("vfio: failed to get device %s\n", path);
|
1967 |
vfio_put_group(group); |
1968 |
return ret;
|
1969 |
} |
1970 |
|
1971 |
/* Get a copy of config space */
|
1972 |
ret = pread(vdev->fd, vdev->pdev.config, |
1973 |
MIN(pci_config_size(&vdev->pdev), vdev->config_size), |
1974 |
vdev->config_offset); |
1975 |
if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { |
1976 |
ret = ret < 0 ? -errno : -EFAULT;
|
1977 |
error_report("vfio: Failed to read device config space\n");
|
1978 |
goto out_put;
|
1979 |
} |
1980 |
|
1981 |
/*
|
1982 |
* Clear host resource mapping info. If we choose not to register a
|
1983 |
* BAR, such as might be the case with the option ROM, we can get
|
1984 |
* confusing, unwritable, residual addresses from the host here.
|
1985 |
*/
|
1986 |
memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); |
1987 |
memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); |
1988 |
|
1989 |
vfio_load_rom(vdev); |
1990 |
|
1991 |
ret = vfio_early_setup_msix(vdev); |
1992 |
if (ret) {
|
1993 |
goto out_put;
|
1994 |
} |
1995 |
|
1996 |
vfio_map_bars(vdev); |
1997 |
|
1998 |
ret = vfio_add_capabilities(vdev); |
1999 |
if (ret) {
|
2000 |
goto out_teardown;
|
2001 |
} |
2002 |
|
2003 |
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
2004 |
vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock, |
2005 |
vfio_intx_mmap_enable, vdev); |
2006 |
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq); |
2007 |
ret = vfio_enable_intx(vdev); |
2008 |
if (ret) {
|
2009 |
goto out_teardown;
|
2010 |
} |
2011 |
} |
2012 |
|
2013 |
return 0; |
2014 |
|
2015 |
out_teardown:
|
2016 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
2017 |
vfio_teardown_msi(vdev); |
2018 |
vfio_unmap_bars(vdev); |
2019 |
out_put:
|
2020 |
vfio_put_device(vdev); |
2021 |
vfio_put_group(group); |
2022 |
return ret;
|
2023 |
} |
2024 |
|
2025 |
static void vfio_exitfn(PCIDevice *pdev) |
2026 |
{ |
2027 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
2028 |
VFIOGroup *group = vdev->group; |
2029 |
|
2030 |
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
2031 |
vfio_disable_interrupts(vdev); |
2032 |
if (vdev->intx.mmap_timer) {
|
2033 |
qemu_free_timer(vdev->intx.mmap_timer); |
2034 |
} |
2035 |
vfio_teardown_msi(vdev); |
2036 |
vfio_unmap_bars(vdev); |
2037 |
vfio_put_device(vdev); |
2038 |
vfio_put_group(group); |
2039 |
} |
2040 |
|
2041 |
static void vfio_pci_reset(DeviceState *dev) |
2042 |
{ |
2043 |
PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); |
2044 |
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev); |
2045 |
uint16_t cmd; |
2046 |
|
2047 |
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
|
2048 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
2049 |
|
2050 |
vfio_disable_interrupts(vdev); |
2051 |
|
2052 |
/*
|
2053 |
* Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
|
2054 |
* Also put INTx Disable in known state.
|
2055 |
*/
|
2056 |
cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
|
2057 |
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | |
2058 |
PCI_COMMAND_INTX_DISABLE); |
2059 |
vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
|
2060 |
|
2061 |
if (vdev->reset_works) {
|
2062 |
if (ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
|
2063 |
error_report("vfio: Error unable to reset physical device "
|
2064 |
"(%04x:%02x:%02x.%x): %m\n", vdev->host.domain,
|
2065 |
vdev->host.bus, vdev->host.slot, vdev->host.function); |
2066 |
} |
2067 |
} |
2068 |
|
2069 |
vfio_enable_intx(vdev); |
2070 |
} |
2071 |
|
2072 |
static Property vfio_pci_dev_properties[] = {
|
2073 |
DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
|
2074 |
DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
|
2075 |
intx.mmap_timeout, 1100),
|
2076 |
/*
|
2077 |
* TODO - support passed fds... is this necessary?
|
2078 |
* DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
|
2079 |
* DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
|
2080 |
*/
|
2081 |
DEFINE_PROP_END_OF_LIST(), |
2082 |
}; |
2083 |
|
2084 |
static const VMStateDescription vfio_pci_vmstate = { |
2085 |
.name = "vfio-pci",
|
2086 |
.unmigratable = 1,
|
2087 |
}; |
2088 |
|
2089 |
static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) |
2090 |
{ |
2091 |
DeviceClass *dc = DEVICE_CLASS(klass); |
2092 |
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); |
2093 |
|
2094 |
dc->reset = vfio_pci_reset; |
2095 |
dc->props = vfio_pci_dev_properties; |
2096 |
dc->vmsd = &vfio_pci_vmstate; |
2097 |
dc->desc = "VFIO-based PCI device assignment";
|
2098 |
pdc->init = vfio_initfn; |
2099 |
pdc->exit = vfio_exitfn; |
2100 |
pdc->config_read = vfio_pci_read_config; |
2101 |
pdc->config_write = vfio_pci_write_config; |
2102 |
} |
2103 |
|
2104 |
static const TypeInfo vfio_pci_dev_info = { |
2105 |
.name = "vfio-pci",
|
2106 |
.parent = TYPE_PCI_DEVICE, |
2107 |
.instance_size = sizeof(VFIODevice),
|
2108 |
.class_init = vfio_pci_dev_class_init, |
2109 |
}; |
2110 |
|
2111 |
static void register_vfio_pci_dev_type(void) |
2112 |
{ |
2113 |
type_register_static(&vfio_pci_dev_info); |
2114 |
} |
2115 |
|
2116 |
type_init(register_vfio_pci_dev_type) |