Statistics
| Branch: | Revision:

root / hw / kvm / pci-assign.c @ 2b199f93

History | View | Annotate | Download (62.1 kB)

1
/*
2
 * Copyright (c) 2007, Neocleus Corporation.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 *
8
 *  Assign a PCI device from the host to a guest VM.
9
 *
10
 *  This implementation uses the classic device assignment interface of KVM
11
 *  and is only available on x86 hosts. It is expected to be obsoleted by VFIO
12
 *  based device assignment.
13
 *
14
 *  Adapted for KVM (qemu-kvm) by Qumranet. QEMU version was based on qemu-kvm
15
 *  revision 4144fe9d48. See its repository for the history.
16
 *
17
 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
18
 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
19
 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
20
 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
21
 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22
 */
23
#include <stdio.h>
24
#include <unistd.h>
25
#include <sys/io.h>
26
#include <sys/mman.h>
27
#include <sys/types.h>
28
#include <sys/stat.h>
29
#include "hw/hw.h"
30
#include "hw/pc.h"
31
#include "qemu-error.h"
32
#include "console.h"
33
#include "hw/loader.h"
34
#include "monitor.h"
35
#include "range.h"
36
#include "sysemu.h"
37
#include "hw/pci.h"
38
#include "hw/msi.h"
39
#include "kvm_i386.h"
40

    
41
#define MSIX_PAGE_SIZE 0x1000
42

    
43
/* From linux/ioport.h */
44
#define IORESOURCE_IO       0x00000100  /* Resource type */
45
#define IORESOURCE_MEM      0x00000200
46
#define IORESOURCE_IRQ      0x00000400
47
#define IORESOURCE_DMA      0x00000800
48
#define IORESOURCE_PREFETCH 0x00002000  /* No side effects */
49

    
50
//#define DEVICE_ASSIGNMENT_DEBUG
51

    
52
#ifdef DEVICE_ASSIGNMENT_DEBUG
53
#define DEBUG(fmt, ...)                                       \
54
    do {                                                      \
55
        fprintf(stderr, "%s: " fmt, __func__ , __VA_ARGS__);  \
56
    } while (0)
57
#else
58
#define DEBUG(fmt, ...)
59
#endif
60

    
61
typedef struct PCIRegion {
62
    int type;           /* Memory or port I/O */
63
    int valid;
64
    uint64_t base_addr;
65
    uint64_t size;    /* size of the region */
66
    int resource_fd;
67
} PCIRegion;
68

    
69
typedef struct PCIDevRegions {
70
    uint8_t bus, dev, func; /* Bus inside domain, device and function */
71
    int irq;                /* IRQ number */
72
    uint16_t region_number; /* number of active regions */
73

    
74
    /* Port I/O or MMIO Regions */
75
    PCIRegion regions[PCI_NUM_REGIONS - 1];
76
    int config_fd;
77
} PCIDevRegions;
78

    
79
typedef struct AssignedDevRegion {
80
    MemoryRegion container;
81
    MemoryRegion real_iomem;
82
    union {
83
        uint8_t *r_virtbase; /* mmapped access address for memory regions */
84
        uint32_t r_baseport; /* the base guest port for I/O regions */
85
    } u;
86
    pcibus_t e_size;    /* emulated size of region in bytes */
87
    pcibus_t r_size;    /* real size of region in bytes */
88
    PCIRegion *region;
89
} AssignedDevRegion;
90

    
91
#define ASSIGNED_DEVICE_PREFER_MSI_BIT  0
92
#define ASSIGNED_DEVICE_SHARE_INTX_BIT  1
93

    
94
#define ASSIGNED_DEVICE_PREFER_MSI_MASK (1 << ASSIGNED_DEVICE_PREFER_MSI_BIT)
95
#define ASSIGNED_DEVICE_SHARE_INTX_MASK (1 << ASSIGNED_DEVICE_SHARE_INTX_BIT)
96

    
97
typedef struct MSIXTableEntry {
98
    uint32_t addr_lo;
99
    uint32_t addr_hi;
100
    uint32_t data;
101
    uint32_t ctrl;
102
} MSIXTableEntry;
103

    
104
typedef enum AssignedIRQType {
105
    ASSIGNED_IRQ_NONE = 0,
106
    ASSIGNED_IRQ_INTX_HOST_INTX,
107
    ASSIGNED_IRQ_INTX_HOST_MSI,
108
    ASSIGNED_IRQ_MSI,
109
    ASSIGNED_IRQ_MSIX
110
} AssignedIRQType;
111

    
112
typedef struct AssignedDevice {
113
    PCIDevice dev;
114
    PCIHostDeviceAddress host;
115
    uint32_t dev_id;
116
    uint32_t features;
117
    int intpin;
118
    AssignedDevRegion v_addrs[PCI_NUM_REGIONS - 1];
119
    PCIDevRegions real_device;
120
    PCIINTxRoute intx_route;
121
    AssignedIRQType assigned_irq_type;
122
    struct {
123
#define ASSIGNED_DEVICE_CAP_MSI (1 << 0)
124
#define ASSIGNED_DEVICE_CAP_MSIX (1 << 1)
125
        uint32_t available;
126
#define ASSIGNED_DEVICE_MSI_ENABLED (1 << 0)
127
#define ASSIGNED_DEVICE_MSIX_ENABLED (1 << 1)
128
#define ASSIGNED_DEVICE_MSIX_MASKED (1 << 2)
129
        uint32_t state;
130
    } cap;
131
    uint8_t emulate_config_read[PCI_CONFIG_SPACE_SIZE];
132
    uint8_t emulate_config_write[PCI_CONFIG_SPACE_SIZE];
133
    int msi_virq_nr;
134
    int *msi_virq;
135
    MSIXTableEntry *msix_table;
136
    hwaddr msix_table_addr;
137
    uint16_t msix_max;
138
    MemoryRegion mmio;
139
    char *configfd_name;
140
    int32_t bootindex;
141
} AssignedDevice;
142

    
143
static void assigned_dev_update_irq_routing(PCIDevice *dev);
144

    
145
static void assigned_dev_load_option_rom(AssignedDevice *dev);
146

    
147
static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev);
148

    
149
static uint64_t assigned_dev_ioport_rw(AssignedDevRegion *dev_region,
150
                                       hwaddr addr, int size,
151
                                       uint64_t *data)
152
{
153
    uint64_t val = 0;
154
    int fd = dev_region->region->resource_fd;
155

    
156
    if (fd >= 0) {
157
        if (data) {
158
            DEBUG("pwrite data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
159
                  ", addr="TARGET_FMT_plx"\n", *data, size, addr, addr);
160
            if (pwrite(fd, data, size, addr) != size) {
161
                error_report("%s - pwrite failed %s",
162
                             __func__, strerror(errno));
163
            }
164
        } else {
165
            if (pread(fd, &val, size, addr) != size) {
166
                error_report("%s - pread failed %s",
167
                             __func__, strerror(errno));
168
                val = (1UL << (size * 8)) - 1;
169
            }
170
            DEBUG("pread val=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
171
                  ", addr=" TARGET_FMT_plx "\n", val, size, addr, addr);
172
        }
173
    } else {
174
        uint32_t port = addr + dev_region->u.r_baseport;
175

    
176
        if (data) {
177
            DEBUG("out data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
178
                  ", host=%x\n", *data, size, addr, port);
179
            switch (size) {
180
            case 1:
181
                outb(*data, port);
182
                break;
183
            case 2:
184
                outw(*data, port);
185
                break;
186
            case 4:
187
                outl(*data, port);
188
                break;
189
            }
190
        } else {
191
            switch (size) {
192
            case 1:
193
                val = inb(port);
194
                break;
195
            case 2:
196
                val = inw(port);
197
                break;
198
            case 4:
199
                val = inl(port);
200
                break;
201
            }
202
            DEBUG("in data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
203
                  ", host=%x\n", val, size, addr, port);
204
        }
205
    }
206
    return val;
207
}
208

    
209
static void assigned_dev_ioport_write(void *opaque, hwaddr addr,
210
                                      uint64_t data, unsigned size)
211
{
212
    assigned_dev_ioport_rw(opaque, addr, size, &data);
213
}
214

    
215
static uint64_t assigned_dev_ioport_read(void *opaque,
216
                                         hwaddr addr, unsigned size)
217
{
218
    return assigned_dev_ioport_rw(opaque, addr, size, NULL);
219
}
220

    
221
static uint32_t slow_bar_readb(void *opaque, hwaddr addr)
222
{
223
    AssignedDevRegion *d = opaque;
224
    uint8_t *in = d->u.r_virtbase + addr;
225
    uint32_t r;
226

    
227
    r = *in;
228
    DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
229

    
230
    return r;
231
}
232

    
233
static uint32_t slow_bar_readw(void *opaque, hwaddr addr)
234
{
235
    AssignedDevRegion *d = opaque;
236
    uint16_t *in = (uint16_t *)(d->u.r_virtbase + addr);
237
    uint32_t r;
238

    
239
    r = *in;
240
    DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
241

    
242
    return r;
243
}
244

    
245
static uint32_t slow_bar_readl(void *opaque, hwaddr addr)
246
{
247
    AssignedDevRegion *d = opaque;
248
    uint32_t *in = (uint32_t *)(d->u.r_virtbase + addr);
249
    uint32_t r;
250

    
251
    r = *in;
252
    DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
253

    
254
    return r;
255
}
256

    
257
static void slow_bar_writeb(void *opaque, hwaddr addr, uint32_t val)
258
{
259
    AssignedDevRegion *d = opaque;
260
    uint8_t *out = d->u.r_virtbase + addr;
261

    
262
    DEBUG("slow_bar_writeb addr=0x" TARGET_FMT_plx " val=0x%02x\n", addr, val);
263
    *out = val;
264
}
265

    
266
static void slow_bar_writew(void *opaque, hwaddr addr, uint32_t val)
267
{
268
    AssignedDevRegion *d = opaque;
269
    uint16_t *out = (uint16_t *)(d->u.r_virtbase + addr);
270

    
271
    DEBUG("slow_bar_writew addr=0x" TARGET_FMT_plx " val=0x%04x\n", addr, val);
272
    *out = val;
273
}
274

    
275
static void slow_bar_writel(void *opaque, hwaddr addr, uint32_t val)
276
{
277
    AssignedDevRegion *d = opaque;
278
    uint32_t *out = (uint32_t *)(d->u.r_virtbase + addr);
279

    
280
    DEBUG("slow_bar_writel addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, val);
281
    *out = val;
282
}
283

    
284
static const MemoryRegionOps slow_bar_ops = {
285
    .old_mmio = {
286
        .read = { slow_bar_readb, slow_bar_readw, slow_bar_readl, },
287
        .write = { slow_bar_writeb, slow_bar_writew, slow_bar_writel, },
288
    },
289
    .endianness = DEVICE_NATIVE_ENDIAN,
290
};
291

    
292
static void assigned_dev_iomem_setup(PCIDevice *pci_dev, int region_num,
293
                                     pcibus_t e_size)
294
{
295
    AssignedDevice *r_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
296
    AssignedDevRegion *region = &r_dev->v_addrs[region_num];
297
    PCIRegion *real_region = &r_dev->real_device.regions[region_num];
298

    
299
    if (e_size > 0) {
300
        memory_region_init(&region->container, "assigned-dev-container",
301
                           e_size);
302
        memory_region_add_subregion(&region->container, 0, &region->real_iomem);
303

    
304
        /* deal with MSI-X MMIO page */
305
        if (real_region->base_addr <= r_dev->msix_table_addr &&
306
                real_region->base_addr + real_region->size >
307
                r_dev->msix_table_addr) {
308
            uint64_t offset = r_dev->msix_table_addr - real_region->base_addr;
309

    
310
            memory_region_add_subregion_overlap(&region->container,
311
                                                offset,
312
                                                &r_dev->mmio,
313
                                                1);
314
        }
315
    }
316
}
317

    
318
static const MemoryRegionOps assigned_dev_ioport_ops = {
319
    .read = assigned_dev_ioport_read,
320
    .write = assigned_dev_ioport_write,
321
    .endianness = DEVICE_NATIVE_ENDIAN,
322
};
323

    
324
static void assigned_dev_ioport_setup(PCIDevice *pci_dev, int region_num,
325
                                      pcibus_t size)
326
{
327
    AssignedDevice *r_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
328
    AssignedDevRegion *region = &r_dev->v_addrs[region_num];
329

    
330
    region->e_size = size;
331
    memory_region_init(&region->container, "assigned-dev-container", size);
332
    memory_region_init_io(&region->real_iomem, &assigned_dev_ioport_ops,
333
                          r_dev->v_addrs + region_num,
334
                          "assigned-dev-iomem", size);
335
    memory_region_add_subregion(&region->container, 0, &region->real_iomem);
336
}
337

    
338
static uint32_t assigned_dev_pci_read(PCIDevice *d, int pos, int len)
339
{
340
    AssignedDevice *pci_dev = DO_UPCAST(AssignedDevice, dev, d);
341
    uint32_t val;
342
    ssize_t ret;
343
    int fd = pci_dev->real_device.config_fd;
344

    
345
again:
346
    ret = pread(fd, &val, len, pos);
347
    if (ret != len) {
348
        if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
349
            goto again;
350
        }
351

    
352
        hw_error("pci read failed, ret = %zd errno = %d\n", ret, errno);
353
    }
354

    
355
    return val;
356
}
357

    
358
static uint8_t assigned_dev_pci_read_byte(PCIDevice *d, int pos)
359
{
360
    return (uint8_t)assigned_dev_pci_read(d, pos, 1);
361
}
362

    
363
static void assigned_dev_pci_write(PCIDevice *d, int pos, uint32_t val, int len)
364
{
365
    AssignedDevice *pci_dev = DO_UPCAST(AssignedDevice, dev, d);
366
    ssize_t ret;
367
    int fd = pci_dev->real_device.config_fd;
368

    
369
again:
370
    ret = pwrite(fd, &val, len, pos);
371
    if (ret != len) {
372
        if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
373
            goto again;
374
        }
375

    
376
        hw_error("pci write failed, ret = %zd errno = %d\n", ret, errno);
377
    }
378
}
379

    
380
static void assigned_dev_emulate_config_read(AssignedDevice *dev,
381
                                             uint32_t offset, uint32_t len)
382
{
383
    memset(dev->emulate_config_read + offset, 0xff, len);
384
}
385

    
386
static void assigned_dev_direct_config_read(AssignedDevice *dev,
387
                                            uint32_t offset, uint32_t len)
388
{
389
    memset(dev->emulate_config_read + offset, 0, len);
390
}
391

    
392
static void assigned_dev_direct_config_write(AssignedDevice *dev,
393
                                             uint32_t offset, uint32_t len)
394
{
395
    memset(dev->emulate_config_write + offset, 0, len);
396
}
397

    
398
static uint8_t pci_find_cap_offset(PCIDevice *d, uint8_t cap, uint8_t start)
399
{
400
    int id;
401
    int max_cap = 48;
402
    int pos = start ? start : PCI_CAPABILITY_LIST;
403
    int status;
404

    
405
    status = assigned_dev_pci_read_byte(d, PCI_STATUS);
406
    if ((status & PCI_STATUS_CAP_LIST) == 0) {
407
        return 0;
408
    }
409

    
410
    while (max_cap--) {
411
        pos = assigned_dev_pci_read_byte(d, pos);
412
        if (pos < 0x40) {
413
            break;
414
        }
415

    
416
        pos &= ~3;
417
        id = assigned_dev_pci_read_byte(d, pos + PCI_CAP_LIST_ID);
418

    
419
        if (id == 0xff) {
420
            break;
421
        }
422
        if (id == cap) {
423
            return pos;
424
        }
425

    
426
        pos += PCI_CAP_LIST_NEXT;
427
    }
428
    return 0;
429
}
430

    
431
static int assigned_dev_register_regions(PCIRegion *io_regions,
432
                                         unsigned long regions_num,
433
                                         AssignedDevice *pci_dev)
434
{
435
    uint32_t i;
436
    PCIRegion *cur_region = io_regions;
437

    
438
    for (i = 0; i < regions_num; i++, cur_region++) {
439
        if (!cur_region->valid) {
440
            continue;
441
        }
442

    
443
        /* handle memory io regions */
444
        if (cur_region->type & IORESOURCE_MEM) {
445
            int t = cur_region->type & IORESOURCE_PREFETCH
446
                ? PCI_BASE_ADDRESS_MEM_PREFETCH
447
                : PCI_BASE_ADDRESS_SPACE_MEMORY;
448

    
449
            /* map physical memory */
450
            pci_dev->v_addrs[i].u.r_virtbase = mmap(NULL, cur_region->size,
451
                                                    PROT_WRITE | PROT_READ,
452
                                                    MAP_SHARED,
453
                                                    cur_region->resource_fd,
454
                                                    (off_t)0);
455

    
456
            if (pci_dev->v_addrs[i].u.r_virtbase == MAP_FAILED) {
457
                pci_dev->v_addrs[i].u.r_virtbase = NULL;
458
                error_report("%s: Error: Couldn't mmap 0x%" PRIx64 "!",
459
                             __func__, cur_region->base_addr);
460
                return -1;
461
            }
462

    
463
            pci_dev->v_addrs[i].r_size = cur_region->size;
464
            pci_dev->v_addrs[i].e_size = 0;
465

    
466
            /* add offset */
467
            pci_dev->v_addrs[i].u.r_virtbase +=
468
                (cur_region->base_addr & 0xFFF);
469

    
470
            if (cur_region->size & 0xFFF) {
471
                error_report("PCI region %d at address 0x%" PRIx64 " has "
472
                             "size 0x%" PRIx64 ", which is not a multiple of "
473
                             "4K.  You might experience some performance hit "
474
                             "due to that.",
475
                             i, cur_region->base_addr, cur_region->size);
476
                memory_region_init_io(&pci_dev->v_addrs[i].real_iomem,
477
                                      &slow_bar_ops, &pci_dev->v_addrs[i],
478
                                      "assigned-dev-slow-bar",
479
                                      cur_region->size);
480
            } else {
481
                void *virtbase = pci_dev->v_addrs[i].u.r_virtbase;
482
                char name[32];
483
                snprintf(name, sizeof(name), "%s.bar%d",
484
                         object_get_typename(OBJECT(pci_dev)), i);
485
                memory_region_init_ram_ptr(&pci_dev->v_addrs[i].real_iomem,
486
                                           name, cur_region->size,
487
                                           virtbase);
488
                vmstate_register_ram(&pci_dev->v_addrs[i].real_iomem,
489
                                     &pci_dev->dev.qdev);
490
            }
491

    
492
            assigned_dev_iomem_setup(&pci_dev->dev, i, cur_region->size);
493
            pci_register_bar((PCIDevice *) pci_dev, i, t,
494
                             &pci_dev->v_addrs[i].container);
495
            continue;
496
        } else {
497
            /* handle port io regions */
498
            uint32_t val;
499
            int ret;
500

    
501
            /* Test kernel support for ioport resource read/write.  Old
502
             * kernels return EIO.  New kernels only allow 1/2/4 byte reads
503
             * so should return EINVAL for a 3 byte read */
504
            ret = pread(pci_dev->v_addrs[i].region->resource_fd, &val, 3, 0);
505
            if (ret >= 0) {
506
                error_report("Unexpected return from I/O port read: %d", ret);
507
                abort();
508
            } else if (errno != EINVAL) {
509
                error_report("Kernel doesn't support ioport resource "
510
                             "access, hiding this region.");
511
                close(pci_dev->v_addrs[i].region->resource_fd);
512
                cur_region->valid = 0;
513
                continue;
514
            }
515

    
516
            pci_dev->v_addrs[i].u.r_baseport = cur_region->base_addr;
517
            pci_dev->v_addrs[i].r_size = cur_region->size;
518
            pci_dev->v_addrs[i].e_size = 0;
519

    
520
            assigned_dev_ioport_setup(&pci_dev->dev, i, cur_region->size);
521
            pci_register_bar((PCIDevice *) pci_dev, i,
522
                             PCI_BASE_ADDRESS_SPACE_IO,
523
                             &pci_dev->v_addrs[i].container);
524
        }
525
    }
526

    
527
    /* success */
528
    return 0;
529
}
530

    
531
static int get_real_id(const char *devpath, const char *idname, uint16_t *val)
532
{
533
    FILE *f;
534
    char name[128];
535
    long id;
536

    
537
    snprintf(name, sizeof(name), "%s%s", devpath, idname);
538
    f = fopen(name, "r");
539
    if (f == NULL) {
540
        error_report("%s: %s: %m", __func__, name);
541
        return -1;
542
    }
543
    if (fscanf(f, "%li\n", &id) == 1) {
544
        *val = id;
545
    } else {
546
        return -1;
547
    }
548
    fclose(f);
549

    
550
    return 0;
551
}
552

    
553
static int get_real_vendor_id(const char *devpath, uint16_t *val)
554
{
555
    return get_real_id(devpath, "vendor", val);
556
}
557

    
558
static int get_real_device_id(const char *devpath, uint16_t *val)
559
{
560
    return get_real_id(devpath, "device", val);
561
}
562

    
563
static int get_real_device(AssignedDevice *pci_dev, uint16_t r_seg,
564
                           uint8_t r_bus, uint8_t r_dev, uint8_t r_func)
565
{
566
    char dir[128], name[128];
567
    int fd, r = 0, v;
568
    FILE *f;
569
    uint64_t start, end, size, flags;
570
    uint16_t id;
571
    PCIRegion *rp;
572
    PCIDevRegions *dev = &pci_dev->real_device;
573

    
574
    dev->region_number = 0;
575

    
576
    snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%x/",
577
             r_seg, r_bus, r_dev, r_func);
578

    
579
    snprintf(name, sizeof(name), "%sconfig", dir);
580

    
581
    if (pci_dev->configfd_name && *pci_dev->configfd_name) {
582
        dev->config_fd = monitor_handle_fd_param(cur_mon, pci_dev->configfd_name);
583
        if (dev->config_fd < 0) {
584
            return 1;
585
        }
586
    } else {
587
        dev->config_fd = open(name, O_RDWR);
588

    
589
        if (dev->config_fd == -1) {
590
            error_report("%s: %s: %m", __func__, name);
591
            return 1;
592
        }
593
    }
594
again:
595
    r = read(dev->config_fd, pci_dev->dev.config,
596
             pci_config_size(&pci_dev->dev));
597
    if (r < 0) {
598
        if (errno == EINTR || errno == EAGAIN) {
599
            goto again;
600
        }
601
        error_report("%s: read failed, errno = %d", __func__, errno);
602
    }
603

    
604
    /* Restore or clear multifunction, this is always controlled by qemu */
605
    if (pci_dev->dev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
606
        pci_dev->dev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
607
    } else {
608
        pci_dev->dev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
609
    }
610

    
611
    /* Clear host resource mapping info.  If we choose not to register a
612
     * BAR, such as might be the case with the option ROM, we can get
613
     * confusing, unwritable, residual addresses from the host here. */
614
    memset(&pci_dev->dev.config[PCI_BASE_ADDRESS_0], 0, 24);
615
    memset(&pci_dev->dev.config[PCI_ROM_ADDRESS], 0, 4);
616

    
617
    snprintf(name, sizeof(name), "%sresource", dir);
618

    
619
    f = fopen(name, "r");
620
    if (f == NULL) {
621
        error_report("%s: %s: %m", __func__, name);
622
        return 1;
623
    }
624

    
625
    for (r = 0; r < PCI_ROM_SLOT; r++) {
626
        if (fscanf(f, "%" SCNi64 " %" SCNi64 " %" SCNi64 "\n",
627
                   &start, &end, &flags) != 3) {
628
            break;
629
        }
630

    
631
        rp = dev->regions + r;
632
        rp->valid = 0;
633
        rp->resource_fd = -1;
634
        size = end - start + 1;
635
        flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
636
        if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0) {
637
            continue;
638
        }
639
        if (flags & IORESOURCE_MEM) {
640
            flags &= ~IORESOURCE_IO;
641
        } else {
642
            flags &= ~IORESOURCE_PREFETCH;
643
        }
644
        snprintf(name, sizeof(name), "%sresource%d", dir, r);
645
        fd = open(name, O_RDWR);
646
        if (fd == -1) {
647
            continue;
648
        }
649
        rp->resource_fd = fd;
650

    
651
        rp->type = flags;
652
        rp->valid = 1;
653
        rp->base_addr = start;
654
        rp->size = size;
655
        pci_dev->v_addrs[r].region = rp;
656
        DEBUG("region %d size %" PRIu64 " start 0x%" PRIx64
657
              " type %d resource_fd %d\n",
658
              r, rp->size, start, rp->type, rp->resource_fd);
659
    }
660

    
661
    fclose(f);
662

    
663
    /* read and fill vendor ID */
664
    v = get_real_vendor_id(dir, &id);
665
    if (v) {
666
        return 1;
667
    }
668
    pci_dev->dev.config[0] = id & 0xff;
669
    pci_dev->dev.config[1] = (id & 0xff00) >> 8;
670

    
671
    /* read and fill device ID */
672
    v = get_real_device_id(dir, &id);
673
    if (v) {
674
        return 1;
675
    }
676
    pci_dev->dev.config[2] = id & 0xff;
677
    pci_dev->dev.config[3] = (id & 0xff00) >> 8;
678

    
679
    pci_word_test_and_clear_mask(pci_dev->emulate_config_write + PCI_COMMAND,
680
                                 PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE);
681

    
682
    dev->region_number = r;
683
    return 0;
684
}
685

    
686
static void free_msi_virqs(AssignedDevice *dev)
687
{
688
    int i;
689

    
690
    for (i = 0; i < dev->msi_virq_nr; i++) {
691
        if (dev->msi_virq[i] >= 0) {
692
            kvm_irqchip_release_virq(kvm_state, dev->msi_virq[i]);
693
            dev->msi_virq[i] = -1;
694
        }
695
    }
696
    g_free(dev->msi_virq);
697
    dev->msi_virq = NULL;
698
    dev->msi_virq_nr = 0;
699
}
700

    
701
static void free_assigned_device(AssignedDevice *dev)
702
{
703
    int i;
704

    
705
    if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
706
        assigned_dev_unregister_msix_mmio(dev);
707
    }
708
    for (i = 0; i < dev->real_device.region_number; i++) {
709
        PCIRegion *pci_region = &dev->real_device.regions[i];
710
        AssignedDevRegion *region = &dev->v_addrs[i];
711

    
712
        if (!pci_region->valid) {
713
            continue;
714
        }
715
        if (pci_region->type & IORESOURCE_IO) {
716
            if (region->u.r_baseport) {
717
                memory_region_del_subregion(&region->container,
718
                                            &region->real_iomem);
719
                memory_region_destroy(&region->real_iomem);
720
                memory_region_destroy(&region->container);
721
            }
722
        } else if (pci_region->type & IORESOURCE_MEM) {
723
            if (region->u.r_virtbase) {
724
                memory_region_del_subregion(&region->container,
725
                                            &region->real_iomem);
726

    
727
                /* Remove MSI-X table subregion */
728
                if (pci_region->base_addr <= dev->msix_table_addr &&
729
                    pci_region->base_addr + pci_region->size >
730
                    dev->msix_table_addr) {
731
                    memory_region_del_subregion(&region->container,
732
                                                &dev->mmio);
733
                }
734

    
735
                memory_region_destroy(&region->real_iomem);
736
                memory_region_destroy(&region->container);
737
                if (munmap(region->u.r_virtbase,
738
                           (pci_region->size + 0xFFF) & 0xFFFFF000)) {
739
                    error_report("Failed to unmap assigned device region: %s",
740
                                 strerror(errno));
741
                }
742
            }
743
        }
744
        if (pci_region->resource_fd >= 0) {
745
            close(pci_region->resource_fd);
746
        }
747
    }
748

    
749
    if (dev->real_device.config_fd >= 0) {
750
        close(dev->real_device.config_fd);
751
    }
752

    
753
    free_msi_virqs(dev);
754
}
755

    
756
static void assign_failed_examine(AssignedDevice *dev)
757
{
758
    char name[PATH_MAX], dir[PATH_MAX], driver[PATH_MAX] = {}, *ns;
759
    uint16_t vendor_id, device_id;
760
    int r;
761

    
762
    snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
763
            dev->host.domain, dev->host.bus, dev->host.slot,
764
            dev->host.function);
765

    
766
    snprintf(name, sizeof(name), "%sdriver", dir);
767

    
768
    r = readlink(name, driver, sizeof(driver));
769
    if ((r <= 0) || r >= sizeof(driver)) {
770
        goto fail;
771
    }
772

    
773
    ns = strrchr(driver, '/');
774
    if (!ns) {
775
        goto fail;
776
    }
777

    
778
    ns++;
779

    
780
    if (get_real_vendor_id(dir, &vendor_id) ||
781
        get_real_device_id(dir, &device_id)) {
782
        goto fail;
783
    }
784

    
785
    error_report("*** The driver '%s' is occupying your device "
786
                 "%04x:%02x:%02x.%x.",
787
                 ns, dev->host.domain, dev->host.bus, dev->host.slot,
788
                 dev->host.function);
789
    error_report("***");
790
    error_report("*** You can try the following commands to free it:");
791
    error_report("***");
792
    error_report("*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/"
793
                 "new_id", vendor_id, device_id);
794
    error_report("*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
795
                 "%s/unbind",
796
                 dev->host.domain, dev->host.bus, dev->host.slot,
797
                 dev->host.function, ns);
798
    error_report("*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
799
                 "pci-stub/bind",
800
                 dev->host.domain, dev->host.bus, dev->host.slot,
801
                 dev->host.function);
802
    error_report("*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub"
803
                 "/remove_id", vendor_id, device_id);
804
    error_report("***");
805

    
806
    return;
807

    
808
fail:
809
    error_report("Couldn't find out why.");
810
}
811

    
812
static int assign_device(AssignedDevice *dev)
813
{
814
    uint32_t flags = KVM_DEV_ASSIGN_ENABLE_IOMMU;
815
    int r;
816

    
817
    /* Only pass non-zero PCI segment to capable module */
818
    if (!kvm_check_extension(kvm_state, KVM_CAP_PCI_SEGMENT) &&
819
        dev->host.domain) {
820
        error_report("Can't assign device inside non-zero PCI segment "
821
                     "as this KVM module doesn't support it.");
822
        return -ENODEV;
823
    }
824

    
825
    if (!kvm_check_extension(kvm_state, KVM_CAP_IOMMU)) {
826
        error_report("No IOMMU found.  Unable to assign device \"%s\"",
827
                     dev->dev.qdev.id);
828
        return -ENODEV;
829
    }
830

    
831
    if (dev->features & ASSIGNED_DEVICE_SHARE_INTX_MASK &&
832
        kvm_has_intx_set_mask()) {
833
        flags |= KVM_DEV_ASSIGN_PCI_2_3;
834
    }
835

    
836
    r = kvm_device_pci_assign(kvm_state, &dev->host, flags, &dev->dev_id);
837
    if (r < 0) {
838
        error_report("Failed to assign device \"%s\" : %s",
839
                     dev->dev.qdev.id, strerror(-r));
840

    
841
        switch (r) {
842
        case -EBUSY:
843
            assign_failed_examine(dev);
844
            break;
845
        default:
846
            break;
847
        }
848
    }
849
    return r;
850
}
851

    
852
static bool check_irqchip_in_kernel(void)
853
{
854
    if (kvm_irqchip_in_kernel()) {
855
        return true;
856
    }
857
    error_report("pci-assign: error: requires KVM with in-kernel irqchip "
858
                 "enabled");
859
    return false;
860
}
861

    
862
static int assign_intx(AssignedDevice *dev)
863
{
864
    AssignedIRQType new_type;
865
    PCIINTxRoute intx_route;
866
    bool intx_host_msi;
867
    int r;
868

    
869
    /* Interrupt PIN 0 means don't use INTx */
870
    if (assigned_dev_pci_read_byte(&dev->dev, PCI_INTERRUPT_PIN) == 0) {
871
        pci_device_set_intx_routing_notifier(&dev->dev, NULL);
872
        return 0;
873
    }
874

    
875
    if (!check_irqchip_in_kernel()) {
876
        return -ENOTSUP;
877
    }
878

    
879
    pci_device_set_intx_routing_notifier(&dev->dev,
880
                                         assigned_dev_update_irq_routing);
881

    
882
    intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin);
883
    assert(intx_route.mode != PCI_INTX_INVERTED);
884

    
885
    if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) {
886
        return 0;
887
    }
888

    
889
    switch (dev->assigned_irq_type) {
890
    case ASSIGNED_IRQ_INTX_HOST_INTX:
891
    case ASSIGNED_IRQ_INTX_HOST_MSI:
892
        intx_host_msi = dev->assigned_irq_type == ASSIGNED_IRQ_INTX_HOST_MSI;
893
        r = kvm_device_intx_deassign(kvm_state, dev->dev_id, intx_host_msi);
894
        break;
895
    case ASSIGNED_IRQ_MSI:
896
        r = kvm_device_msi_deassign(kvm_state, dev->dev_id);
897
        break;
898
    case ASSIGNED_IRQ_MSIX:
899
        r = kvm_device_msix_deassign(kvm_state, dev->dev_id);
900
        break;
901
    default:
902
        r = 0;
903
        break;
904
    }
905
    if (r) {
906
        perror("assign_intx: deassignment of previous interrupt failed");
907
    }
908
    dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
909

    
910
    if (intx_route.mode == PCI_INTX_DISABLED) {
911
        dev->intx_route = intx_route;
912
        return 0;
913
    }
914

    
915
retry:
916
    if (dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK &&
917
        dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
918
        intx_host_msi = true;
919
        new_type = ASSIGNED_IRQ_INTX_HOST_MSI;
920
    } else {
921
        intx_host_msi = false;
922
        new_type = ASSIGNED_IRQ_INTX_HOST_INTX;
923
    }
924

    
925
    r = kvm_device_intx_assign(kvm_state, dev->dev_id, intx_host_msi,
926
                               intx_route.irq);
927
    if (r < 0) {
928
        if (r == -EIO && !(dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK) &&
929
            dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
930
            /* Retry with host-side MSI. There might be an IRQ conflict and
931
             * either the kernel or the device doesn't support sharing. */
932
            error_report("Host-side INTx sharing not supported, "
933
                         "using MSI instead.\n"
934
                         "Some devices do not to work properly in this mode.");
935
            dev->features |= ASSIGNED_DEVICE_PREFER_MSI_MASK;
936
            goto retry;
937
        }
938
        error_report("Failed to assign irq for \"%s\": %s",
939
                     dev->dev.qdev.id, strerror(-r));
940
        error_report("Perhaps you are assigning a device "
941
                     "that shares an IRQ with another device?");
942
        return r;
943
    }
944

    
945
    dev->intx_route = intx_route;
946
    dev->assigned_irq_type = new_type;
947
    return r;
948
}
949

    
950
static void deassign_device(AssignedDevice *dev)
951
{
952
    int r;
953

    
954
    r = kvm_device_pci_deassign(kvm_state, dev->dev_id);
955
    assert(r == 0);
956
}
957

    
958
/* The pci config space got updated. Check if irq numbers have changed
959
 * for our devices
960
 */
961
static void assigned_dev_update_irq_routing(PCIDevice *dev)
962
{
963
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, dev);
964
    Error *err = NULL;
965
    int r;
966

    
967
    r = assign_intx(assigned_dev);
968
    if (r < 0) {
969
        qdev_unplug(&dev->qdev, &err);
970
        assert(!err);
971
    }
972
}
973

    
974
static void assigned_dev_update_msi(PCIDevice *pci_dev)
975
{
976
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
977
    uint8_t ctrl_byte = pci_get_byte(pci_dev->config + pci_dev->msi_cap +
978
                                     PCI_MSI_FLAGS);
979
    int r;
980

    
981
    /* Some guests gratuitously disable MSI even if they're not using it,
982
     * try to catch this by only deassigning irqs if the guest is using
983
     * MSI or intends to start. */
984
    if (assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSI ||
985
        (ctrl_byte & PCI_MSI_FLAGS_ENABLE)) {
986
        r = kvm_device_msi_deassign(kvm_state, assigned_dev->dev_id);
987
        /* -ENXIO means no assigned irq */
988
        if (r && r != -ENXIO) {
989
            perror("assigned_dev_update_msi: deassign irq");
990
        }
991

    
992
        free_msi_virqs(assigned_dev);
993

    
994
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
995
        pci_device_set_intx_routing_notifier(pci_dev, NULL);
996
    }
997

    
998
    if (ctrl_byte & PCI_MSI_FLAGS_ENABLE) {
999
        MSIMessage msg = msi_get_message(pci_dev, 0);
1000
        int virq;
1001

    
1002
        virq = kvm_irqchip_add_msi_route(kvm_state, msg);
1003
        if (virq < 0) {
1004
            perror("assigned_dev_update_msi: kvm_irqchip_add_msi_route");
1005
            return;
1006
        }
1007

    
1008
        assigned_dev->msi_virq = g_malloc(sizeof(*assigned_dev->msi_virq));
1009
        assigned_dev->msi_virq_nr = 1;
1010
        assigned_dev->msi_virq[0] = virq;
1011
        if (kvm_device_msi_assign(kvm_state, assigned_dev->dev_id, virq) < 0) {
1012
            perror("assigned_dev_update_msi: kvm_device_msi_assign");
1013
        }
1014

    
1015
        assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
1016
        assigned_dev->intx_route.irq = -1;
1017
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSI;
1018
    } else {
1019
        assign_intx(assigned_dev);
1020
    }
1021
}
1022

    
1023
static bool assigned_dev_msix_masked(MSIXTableEntry *entry)
1024
{
1025
    return (entry->ctrl & cpu_to_le32(0x1)) != 0;
1026
}
1027

    
1028
static int assigned_dev_update_msix_mmio(PCIDevice *pci_dev)
1029
{
1030
    AssignedDevice *adev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1031
    uint16_t entries_nr = 0;
1032
    int i, r = 0;
1033
    MSIXTableEntry *entry = adev->msix_table;
1034
    MSIMessage msg;
1035

    
1036
    /* Get the usable entry number for allocating */
1037
    for (i = 0; i < adev->msix_max; i++, entry++) {
1038
        if (assigned_dev_msix_masked(entry)) {
1039
            continue;
1040
        }
1041
        entries_nr++;
1042
    }
1043

    
1044
    DEBUG("MSI-X entries: %d\n", entries_nr);
1045

    
1046
    /* It's valid to enable MSI-X with all entries masked */
1047
    if (!entries_nr) {
1048
        return 0;
1049
    }
1050

    
1051
    r = kvm_device_msix_init_vectors(kvm_state, adev->dev_id, entries_nr);
1052
    if (r != 0) {
1053
        error_report("fail to set MSI-X entry number for MSIX! %s",
1054
                     strerror(-r));
1055
        return r;
1056
    }
1057

    
1058
    free_msi_virqs(adev);
1059

    
1060
    adev->msi_virq_nr = adev->msix_max;
1061
    adev->msi_virq = g_malloc(adev->msix_max * sizeof(*adev->msi_virq));
1062

    
1063
    entry = adev->msix_table;
1064
    for (i = 0; i < adev->msix_max; i++, entry++) {
1065
        adev->msi_virq[i] = -1;
1066

    
1067
        if (assigned_dev_msix_masked(entry)) {
1068
            continue;
1069
        }
1070

    
1071
        msg.address = entry->addr_lo | ((uint64_t)entry->addr_hi << 32);
1072
        msg.data = entry->data;
1073
        r = kvm_irqchip_add_msi_route(kvm_state, msg);
1074
        if (r < 0) {
1075
            return r;
1076
        }
1077
        adev->msi_virq[i] = r;
1078

    
1079
        DEBUG("MSI-X vector %d, gsi %d, addr %08x_%08x, data %08x\n", i,
1080
              r, entry->addr_hi, entry->addr_lo, entry->data);
1081

    
1082
        r = kvm_device_msix_set_vector(kvm_state, adev->dev_id, i,
1083
                                       adev->msi_virq[i]);
1084
        if (r) {
1085
            error_report("fail to set MSI-X entry! %s", strerror(-r));
1086
            break;
1087
        }
1088
    }
1089

    
1090
    return r;
1091
}
1092

    
1093
static void assigned_dev_update_msix(PCIDevice *pci_dev)
1094
{
1095
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1096
    uint16_t ctrl_word = pci_get_word(pci_dev->config + pci_dev->msix_cap +
1097
                                      PCI_MSIX_FLAGS);
1098
    int r;
1099

    
1100
    /* Some guests gratuitously disable MSIX even if they're not using it,
1101
     * try to catch this by only deassigning irqs if the guest is using
1102
     * MSIX or intends to start. */
1103
    if ((assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSIX) ||
1104
        (ctrl_word & PCI_MSIX_FLAGS_ENABLE)) {
1105
        r = kvm_device_msix_deassign(kvm_state, assigned_dev->dev_id);
1106
        /* -ENXIO means no assigned irq */
1107
        if (r && r != -ENXIO) {
1108
            perror("assigned_dev_update_msix: deassign irq");
1109
        }
1110

    
1111
        free_msi_virqs(assigned_dev);
1112

    
1113
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
1114
        pci_device_set_intx_routing_notifier(pci_dev, NULL);
1115
    }
1116

    
1117
    if (ctrl_word & PCI_MSIX_FLAGS_ENABLE) {
1118
        if (assigned_dev_update_msix_mmio(pci_dev) < 0) {
1119
            perror("assigned_dev_update_msix_mmio");
1120
            return;
1121
        }
1122

    
1123
        if (assigned_dev->msi_virq_nr > 0) {
1124
            if (kvm_device_msix_assign(kvm_state, assigned_dev->dev_id) < 0) {
1125
                perror("assigned_dev_enable_msix: assign irq");
1126
                return;
1127
            }
1128
        }
1129
        assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
1130
        assigned_dev->intx_route.irq = -1;
1131
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSIX;
1132
    } else {
1133
        assign_intx(assigned_dev);
1134
    }
1135
}
1136

    
1137
static uint32_t assigned_dev_pci_read_config(PCIDevice *pci_dev,
1138
                                             uint32_t address, int len)
1139
{
1140
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1141
    uint32_t virt_val = pci_default_read_config(pci_dev, address, len);
1142
    uint32_t real_val, emulate_mask, full_emulation_mask;
1143

    
1144
    emulate_mask = 0;
1145
    memcpy(&emulate_mask, assigned_dev->emulate_config_read + address, len);
1146
    emulate_mask = le32_to_cpu(emulate_mask);
1147

    
1148
    full_emulation_mask = 0xffffffff >> (32 - len * 8);
1149

    
1150
    if (emulate_mask != full_emulation_mask) {
1151
        real_val = assigned_dev_pci_read(pci_dev, address, len);
1152
        return (virt_val & emulate_mask) | (real_val & ~emulate_mask);
1153
    } else {
1154
        return virt_val;
1155
    }
1156
}
1157

    
1158
static void assigned_dev_pci_write_config(PCIDevice *pci_dev, uint32_t address,
1159
                                          uint32_t val, int len)
1160
{
1161
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1162
    uint16_t old_cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1163
    uint32_t emulate_mask, full_emulation_mask;
1164
    int ret;
1165

    
1166
    pci_default_write_config(pci_dev, address, val, len);
1167

    
1168
    if (kvm_has_intx_set_mask() &&
1169
        range_covers_byte(address, len, PCI_COMMAND + 1)) {
1170
        bool intx_masked = (pci_get_word(pci_dev->config + PCI_COMMAND) &
1171
                            PCI_COMMAND_INTX_DISABLE);
1172

    
1173
        if (intx_masked != !!(old_cmd & PCI_COMMAND_INTX_DISABLE)) {
1174
            ret = kvm_device_intx_set_mask(kvm_state, assigned_dev->dev_id,
1175
                                           intx_masked);
1176
            if (ret) {
1177
                perror("assigned_dev_pci_write_config: set intx mask");
1178
            }
1179
        }
1180
    }
1181
    if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
1182
        if (range_covers_byte(address, len,
1183
                              pci_dev->msi_cap + PCI_MSI_FLAGS)) {
1184
            assigned_dev_update_msi(pci_dev);
1185
        }
1186
    }
1187
    if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1188
        if (range_covers_byte(address, len,
1189
                              pci_dev->msix_cap + PCI_MSIX_FLAGS + 1)) {
1190
            assigned_dev_update_msix(pci_dev);
1191
        }
1192
    }
1193

    
1194
    emulate_mask = 0;
1195
    memcpy(&emulate_mask, assigned_dev->emulate_config_write + address, len);
1196
    emulate_mask = le32_to_cpu(emulate_mask);
1197

    
1198
    full_emulation_mask = 0xffffffff >> (32 - len * 8);
1199

    
1200
    if (emulate_mask != full_emulation_mask) {
1201
        if (emulate_mask) {
1202
            val &= ~emulate_mask;
1203
            val |= assigned_dev_pci_read(pci_dev, address, len) & emulate_mask;
1204
        }
1205
        assigned_dev_pci_write(pci_dev, address, val, len);
1206
    }
1207
}
1208

    
1209
static void assigned_dev_setup_cap_read(AssignedDevice *dev, uint32_t offset,
1210
                                        uint32_t len)
1211
{
1212
    assigned_dev_direct_config_read(dev, offset, len);
1213
    assigned_dev_emulate_config_read(dev, offset + PCI_CAP_LIST_NEXT, 1);
1214
}
1215

    
1216
static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
1217
{
1218
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1219
    PCIRegion *pci_region = dev->real_device.regions;
1220
    int ret, pos;
1221

    
1222
    /* Clear initial capabilities pointer and status copied from hw */
1223
    pci_set_byte(pci_dev->config + PCI_CAPABILITY_LIST, 0);
1224
    pci_set_word(pci_dev->config + PCI_STATUS,
1225
                 pci_get_word(pci_dev->config + PCI_STATUS) &
1226
                 ~PCI_STATUS_CAP_LIST);
1227

    
1228
    /* Expose MSI capability
1229
     * MSI capability is the 1st capability in capability config */
1230
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSI, 0);
1231
    if (pos != 0 && kvm_check_extension(kvm_state, KVM_CAP_ASSIGN_DEV_IRQ)) {
1232
        if (!check_irqchip_in_kernel()) {
1233
            return -ENOTSUP;
1234
        }
1235
        dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI;
1236
        /* Only 32-bit/no-mask currently supported */
1237
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSI, pos, 10);
1238
        if (ret < 0) {
1239
            return ret;
1240
        }
1241
        pci_dev->msi_cap = pos;
1242

    
1243
        pci_set_word(pci_dev->config + pos + PCI_MSI_FLAGS,
1244
                     pci_get_word(pci_dev->config + pos + PCI_MSI_FLAGS) &
1245
                     PCI_MSI_FLAGS_QMASK);
1246
        pci_set_long(pci_dev->config + pos + PCI_MSI_ADDRESS_LO, 0);
1247
        pci_set_word(pci_dev->config + pos + PCI_MSI_DATA_32, 0);
1248

    
1249
        /* Set writable fields */
1250
        pci_set_word(pci_dev->wmask + pos + PCI_MSI_FLAGS,
1251
                     PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
1252
        pci_set_long(pci_dev->wmask + pos + PCI_MSI_ADDRESS_LO, 0xfffffffc);
1253
        pci_set_word(pci_dev->wmask + pos + PCI_MSI_DATA_32, 0xffff);
1254
    }
1255
    /* Expose MSI-X capability */
1256
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSIX, 0);
1257
    if (pos != 0 && kvm_device_msix_supported(kvm_state)) {
1258
        int bar_nr;
1259
        uint32_t msix_table_entry;
1260

    
1261
        if (!check_irqchip_in_kernel()) {
1262
            return -ENOTSUP;
1263
        }
1264
        dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX;
1265
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSIX, pos, 12);
1266
        if (ret < 0) {
1267
            return ret;
1268
        }
1269
        pci_dev->msix_cap = pos;
1270

    
1271
        pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS,
1272
                     pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) &
1273
                     PCI_MSIX_FLAGS_QSIZE);
1274

    
1275
        /* Only enable and function mask bits are writable */
1276
        pci_set_word(pci_dev->wmask + pos + PCI_MSIX_FLAGS,
1277
                     PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
1278

    
1279
        msix_table_entry = pci_get_long(pci_dev->config + pos + PCI_MSIX_TABLE);
1280
        bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK;
1281
        msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK;
1282
        dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
1283
        dev->msix_max = pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS);
1284
        dev->msix_max &= PCI_MSIX_FLAGS_QSIZE;
1285
        dev->msix_max += 1;
1286
    }
1287

    
1288
    /* Minimal PM support, nothing writable, device appears to NAK changes */
1289
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PM, 0);
1290
    if (pos) {
1291
        uint16_t pmc;
1292

    
1293
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, pos, PCI_PM_SIZEOF);
1294
        if (ret < 0) {
1295
            return ret;
1296
        }
1297

    
1298
        assigned_dev_setup_cap_read(dev, pos, PCI_PM_SIZEOF);
1299

    
1300
        pmc = pci_get_word(pci_dev->config + pos + PCI_CAP_FLAGS);
1301
        pmc &= (PCI_PM_CAP_VER_MASK | PCI_PM_CAP_DSI);
1302
        pci_set_word(pci_dev->config + pos + PCI_CAP_FLAGS, pmc);
1303

    
1304
        /* assign_device will bring the device up to D0, so we don't need
1305
         * to worry about doing that ourselves here. */
1306
        pci_set_word(pci_dev->config + pos + PCI_PM_CTRL,
1307
                     PCI_PM_CTRL_NO_SOFT_RESET);
1308

    
1309
        pci_set_byte(pci_dev->config + pos + PCI_PM_PPB_EXTENSIONS, 0);
1310
        pci_set_byte(pci_dev->config + pos + PCI_PM_DATA_REGISTER, 0);
1311
    }
1312

    
1313
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_EXP, 0);
1314
    if (pos) {
1315
        uint8_t version, size = 0;
1316
        uint16_t type, devctl, lnksta;
1317
        uint32_t devcap, lnkcap;
1318

    
1319
        version = pci_get_byte(pci_dev->config + pos + PCI_EXP_FLAGS);
1320
        version &= PCI_EXP_FLAGS_VERS;
1321
        if (version == 1) {
1322
            size = 0x14;
1323
        } else if (version == 2) {
1324
            /*
1325
             * Check for non-std size, accept reduced size to 0x34,
1326
             * which is what bcm5761 implemented, violating the
1327
             * PCIe v3.0 spec that regs should exist and be read as 0,
1328
             * not optionally provided and shorten the struct size.
1329
             */
1330
            size = MIN(0x3c, PCI_CONFIG_SPACE_SIZE - pos);
1331
            if (size < 0x34) {
1332
                error_report("%s: Invalid size PCIe cap-id 0x%x",
1333
                             __func__, PCI_CAP_ID_EXP);
1334
                return -EINVAL;
1335
            } else if (size != 0x3c) {
1336
                error_report("WARNING, %s: PCIe cap-id 0x%x has "
1337
                             "non-standard size 0x%x; std size should be 0x3c",
1338
                             __func__, PCI_CAP_ID_EXP, size);
1339
            }
1340
        } else if (version == 0) {
1341
            uint16_t vid, did;
1342
            vid = pci_get_word(pci_dev->config + PCI_VENDOR_ID);
1343
            did = pci_get_word(pci_dev->config + PCI_DEVICE_ID);
1344
            if (vid == PCI_VENDOR_ID_INTEL && did == 0x10ed) {
1345
                /*
1346
                 * quirk for Intel 82599 VF with invalid PCIe capability
1347
                 * version, should really be version 2 (same as PF)
1348
                 */
1349
                size = 0x3c;
1350
            }
1351
        }
1352

    
1353
        if (size == 0) {
1354
            error_report("%s: Unsupported PCI express capability version %d",
1355
                         __func__, version);
1356
            return -EINVAL;
1357
        }
1358

    
1359
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_EXP, pos, size);
1360
        if (ret < 0) {
1361
            return ret;
1362
        }
1363

    
1364
        assigned_dev_setup_cap_read(dev, pos, size);
1365

    
1366
        type = pci_get_word(pci_dev->config + pos + PCI_EXP_FLAGS);
1367
        type = (type & PCI_EXP_FLAGS_TYPE) >> 4;
1368
        if (type != PCI_EXP_TYPE_ENDPOINT &&
1369
            type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) {
1370
            error_report("Device assignment only supports endpoint assignment,"
1371
                         " device type %d", type);
1372
            return -EINVAL;
1373
        }
1374

    
1375
        /* capabilities, pass existing read-only copy
1376
         * PCI_EXP_FLAGS_IRQ: updated by hardware, should be direct read */
1377

    
1378
        /* device capabilities: hide FLR */
1379
        devcap = pci_get_long(pci_dev->config + pos + PCI_EXP_DEVCAP);
1380
        devcap &= ~PCI_EXP_DEVCAP_FLR;
1381
        pci_set_long(pci_dev->config + pos + PCI_EXP_DEVCAP, devcap);
1382

    
1383
        /* device control: clear all error reporting enable bits, leaving
1384
         *                 only a few host values.  Note, these are
1385
         *                 all writable, but not passed to hw.
1386
         */
1387
        devctl = pci_get_word(pci_dev->config + pos + PCI_EXP_DEVCTL);
1388
        devctl = (devctl & (PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_PAYLOAD)) |
1389
                  PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
1390
        pci_set_word(pci_dev->config + pos + PCI_EXP_DEVCTL, devctl);
1391
        devctl = PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_AUX_PME;
1392
        pci_set_word(pci_dev->wmask + pos + PCI_EXP_DEVCTL, ~devctl);
1393

    
1394
        /* Clear device status */
1395
        pci_set_word(pci_dev->config + pos + PCI_EXP_DEVSTA, 0);
1396

    
1397
        /* Link capabilities, expose links and latencues, clear reporting */
1398
        lnkcap = pci_get_long(pci_dev->config + pos + PCI_EXP_LNKCAP);
1399
        lnkcap &= (PCI_EXP_LNKCAP_SLS | PCI_EXP_LNKCAP_MLW |
1400
                   PCI_EXP_LNKCAP_ASPMS | PCI_EXP_LNKCAP_L0SEL |
1401
                   PCI_EXP_LNKCAP_L1EL);
1402
        pci_set_long(pci_dev->config + pos + PCI_EXP_LNKCAP, lnkcap);
1403

    
1404
        /* Link control, pass existing read-only copy.  Should be writable? */
1405

    
1406
        /* Link status, only expose current speed and width */
1407
        lnksta = pci_get_word(pci_dev->config + pos + PCI_EXP_LNKSTA);
1408
        lnksta &= (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
1409
        pci_set_word(pci_dev->config + pos + PCI_EXP_LNKSTA, lnksta);
1410

    
1411
        if (version >= 2) {
1412
            /* Slot capabilities, control, status - not needed for endpoints */
1413
            pci_set_long(pci_dev->config + pos + PCI_EXP_SLTCAP, 0);
1414
            pci_set_word(pci_dev->config + pos + PCI_EXP_SLTCTL, 0);
1415
            pci_set_word(pci_dev->config + pos + PCI_EXP_SLTSTA, 0);
1416

    
1417
            /* Root control, capabilities, status - not needed for endpoints */
1418
            pci_set_word(pci_dev->config + pos + PCI_EXP_RTCTL, 0);
1419
            pci_set_word(pci_dev->config + pos + PCI_EXP_RTCAP, 0);
1420
            pci_set_long(pci_dev->config + pos + PCI_EXP_RTSTA, 0);
1421

    
1422
            /* Device capabilities/control 2, pass existing read-only copy */
1423
            /* Link control 2, pass existing read-only copy */
1424
        }
1425
    }
1426

    
1427
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PCIX, 0);
1428
    if (pos) {
1429
        uint16_t cmd;
1430
        uint32_t status;
1431

    
1432
        /* Only expose the minimum, 8 byte capability */
1433
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_PCIX, pos, 8);
1434
        if (ret < 0) {
1435
            return ret;
1436
        }
1437

    
1438
        assigned_dev_setup_cap_read(dev, pos, 8);
1439

    
1440
        /* Command register, clear upper bits, including extended modes */
1441
        cmd = pci_get_word(pci_dev->config + pos + PCI_X_CMD);
1442
        cmd &= (PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO | PCI_X_CMD_MAX_READ |
1443
                PCI_X_CMD_MAX_SPLIT);
1444
        pci_set_word(pci_dev->config + pos + PCI_X_CMD, cmd);
1445

    
1446
        /* Status register, update with emulated PCI bus location, clear
1447
         * error bits, leave the rest. */
1448
        status = pci_get_long(pci_dev->config + pos + PCI_X_STATUS);
1449
        status &= ~(PCI_X_STATUS_BUS | PCI_X_STATUS_DEVFN);
1450
        status |= (pci_bus_num(pci_dev->bus) << 8) | pci_dev->devfn;
1451
        status &= ~(PCI_X_STATUS_SPL_DISC | PCI_X_STATUS_UNX_SPL |
1452
                    PCI_X_STATUS_SPL_ERR);
1453
        pci_set_long(pci_dev->config + pos + PCI_X_STATUS, status);
1454
    }
1455

    
1456
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VPD, 0);
1457
    if (pos) {
1458
        /* Direct R/W passthrough */
1459
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_VPD, pos, 8);
1460
        if (ret < 0) {
1461
            return ret;
1462
        }
1463

    
1464
        assigned_dev_setup_cap_read(dev, pos, 8);
1465

    
1466
        /* direct write for cap content */
1467
        assigned_dev_direct_config_write(dev, pos + 2, 6);
1468
    }
1469

    
1470
    /* Devices can have multiple vendor capabilities, get them all */
1471
    for (pos = 0; (pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VNDR, pos));
1472
        pos += PCI_CAP_LIST_NEXT) {
1473
        uint8_t len = pci_get_byte(pci_dev->config + pos + PCI_CAP_FLAGS);
1474
        /* Direct R/W passthrough */
1475
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_VNDR, pos, len);
1476
        if (ret < 0) {
1477
            return ret;
1478
        }
1479

    
1480
        assigned_dev_setup_cap_read(dev, pos, len);
1481

    
1482
        /* direct write for cap content */
1483
        assigned_dev_direct_config_write(dev, pos + 2, len - 2);
1484
    }
1485

    
1486
    /* If real and virtual capability list status bits differ, virtualize the
1487
     * access. */
1488
    if ((pci_get_word(pci_dev->config + PCI_STATUS) & PCI_STATUS_CAP_LIST) !=
1489
        (assigned_dev_pci_read_byte(pci_dev, PCI_STATUS) &
1490
         PCI_STATUS_CAP_LIST)) {
1491
        dev->emulate_config_read[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1492
    }
1493

    
1494
    return 0;
1495
}
1496

    
1497
static uint64_t
1498
assigned_dev_msix_mmio_read(void *opaque, hwaddr addr,
1499
                            unsigned size)
1500
{
1501
    AssignedDevice *adev = opaque;
1502
    uint64_t val;
1503

    
1504
    memcpy(&val, (void *)((uint8_t *)adev->msix_table + addr), size);
1505

    
1506
    return val;
1507
}
1508

    
1509
static void assigned_dev_msix_mmio_write(void *opaque, hwaddr addr,
1510
                                         uint64_t val, unsigned size)
1511
{
1512
    AssignedDevice *adev = opaque;
1513
    PCIDevice *pdev = &adev->dev;
1514
    uint16_t ctrl;
1515
    MSIXTableEntry orig;
1516
    int i = addr >> 4;
1517

    
1518
    if (i >= adev->msix_max) {
1519
        return; /* Drop write */
1520
    }
1521

    
1522
    ctrl = pci_get_word(pdev->config + pdev->msix_cap + PCI_MSIX_FLAGS);
1523

    
1524
    DEBUG("write to MSI-X table offset 0x%lx, val 0x%lx\n", addr, val);
1525

    
1526
    if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1527
        orig = adev->msix_table[i];
1528
    }
1529

    
1530
    memcpy((uint8_t *)adev->msix_table + addr, &val, size);
1531

    
1532
    if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1533
        MSIXTableEntry *entry = &adev->msix_table[i];
1534

    
1535
        if (!assigned_dev_msix_masked(&orig) &&
1536
            assigned_dev_msix_masked(entry)) {
1537
            /*
1538
             * Vector masked, disable it
1539
             *
1540
             * XXX It's not clear if we can or should actually attempt
1541
             * to mask or disable the interrupt.  KVM doesn't have
1542
             * support for pending bits and kvm_assign_set_msix_entry
1543
             * doesn't modify the device hardware mask.  Interrupts
1544
             * while masked are simply not injected to the guest, so
1545
             * are lost.  Can we get away with always injecting an
1546
             * interrupt on unmask?
1547
             */
1548
        } else if (assigned_dev_msix_masked(&orig) &&
1549
                   !assigned_dev_msix_masked(entry)) {
1550
            /* Vector unmasked */
1551
            if (i >= adev->msi_virq_nr || adev->msi_virq[i] < 0) {
1552
                /* Previously unassigned vector, start from scratch */
1553
                assigned_dev_update_msix(pdev);
1554
                return;
1555
            } else {
1556
                /* Update an existing, previously masked vector */
1557
                MSIMessage msg;
1558
                int ret;
1559

    
1560
                msg.address = entry->addr_lo |
1561
                    ((uint64_t)entry->addr_hi << 32);
1562
                msg.data = entry->data;
1563

    
1564
                ret = kvm_irqchip_update_msi_route(kvm_state,
1565
                                                   adev->msi_virq[i], msg);
1566
                if (ret) {
1567
                    error_report("Error updating irq routing entry (%d)", ret);
1568
                }
1569
            }
1570
        }
1571
    }
1572
}
1573

    
1574
static const MemoryRegionOps assigned_dev_msix_mmio_ops = {
1575
    .read = assigned_dev_msix_mmio_read,
1576
    .write = assigned_dev_msix_mmio_write,
1577
    .endianness = DEVICE_NATIVE_ENDIAN,
1578
    .valid = {
1579
        .min_access_size = 4,
1580
        .max_access_size = 8,
1581
    },
1582
    .impl = {
1583
        .min_access_size = 4,
1584
        .max_access_size = 8,
1585
    },
1586
};
1587

    
1588
static void assigned_dev_msix_reset(AssignedDevice *dev)
1589
{
1590
    MSIXTableEntry *entry;
1591
    int i;
1592

    
1593
    if (!dev->msix_table) {
1594
        return;
1595
    }
1596

    
1597
    memset(dev->msix_table, 0, MSIX_PAGE_SIZE);
1598

    
1599
    for (i = 0, entry = dev->msix_table; i < dev->msix_max; i++, entry++) {
1600
        entry->ctrl = cpu_to_le32(0x1); /* Masked */
1601
    }
1602
}
1603

    
1604
static int assigned_dev_register_msix_mmio(AssignedDevice *dev)
1605
{
1606
    dev->msix_table = mmap(NULL, MSIX_PAGE_SIZE, PROT_READ|PROT_WRITE,
1607
                           MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
1608
    if (dev->msix_table == MAP_FAILED) {
1609
        error_report("fail allocate msix_table! %s", strerror(errno));
1610
        return -EFAULT;
1611
    }
1612

    
1613
    assigned_dev_msix_reset(dev);
1614

    
1615
    memory_region_init_io(&dev->mmio, &assigned_dev_msix_mmio_ops, dev,
1616
                          "assigned-dev-msix", MSIX_PAGE_SIZE);
1617
    return 0;
1618
}
1619

    
1620
static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
1621
{
1622
    if (!dev->msix_table) {
1623
        return;
1624
    }
1625

    
1626
    memory_region_destroy(&dev->mmio);
1627

    
1628
    if (munmap(dev->msix_table, MSIX_PAGE_SIZE) == -1) {
1629
        error_report("error unmapping msix_table! %s", strerror(errno));
1630
    }
1631
    dev->msix_table = NULL;
1632
}
1633

    
1634
static const VMStateDescription vmstate_assigned_device = {
1635
    .name = "pci-assign",
1636
    .unmigratable = 1,
1637
};
1638

    
1639
static void reset_assigned_device(DeviceState *dev)
1640
{
1641
    PCIDevice *pci_dev = DO_UPCAST(PCIDevice, qdev, dev);
1642
    AssignedDevice *adev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1643
    char reset_file[64];
1644
    const char reset[] = "1";
1645
    int fd, ret;
1646

    
1647
    /*
1648
     * If a guest is reset without being shutdown, MSI/MSI-X can still
1649
     * be running.  We want to return the device to a known state on
1650
     * reset, so disable those here.  We especially do not want MSI-X
1651
     * enabled since it lives in MMIO space, which is about to get
1652
     * disabled.
1653
     */
1654
    if (adev->assigned_irq_type == ASSIGNED_IRQ_MSIX) {
1655
        uint16_t ctrl = pci_get_word(pci_dev->config +
1656
                                     pci_dev->msix_cap + PCI_MSIX_FLAGS);
1657

    
1658
        pci_set_word(pci_dev->config + pci_dev->msix_cap + PCI_MSIX_FLAGS,
1659
                     ctrl & ~PCI_MSIX_FLAGS_ENABLE);
1660
        assigned_dev_update_msix(pci_dev);
1661
    } else if (adev->assigned_irq_type == ASSIGNED_IRQ_MSI) {
1662
        uint8_t ctrl = pci_get_byte(pci_dev->config +
1663
                                    pci_dev->msi_cap + PCI_MSI_FLAGS);
1664

    
1665
        pci_set_byte(pci_dev->config + pci_dev->msi_cap + PCI_MSI_FLAGS,
1666
                     ctrl & ~PCI_MSI_FLAGS_ENABLE);
1667
        assigned_dev_update_msi(pci_dev);
1668
    }
1669

    
1670
    snprintf(reset_file, sizeof(reset_file),
1671
             "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/reset",
1672
             adev->host.domain, adev->host.bus, adev->host.slot,
1673
             adev->host.function);
1674

    
1675
    /*
1676
     * Issue a device reset via pci-sysfs.  Note that we use write(2) here
1677
     * and ignore the return value because some kernels have a bug that
1678
     * returns 0 rather than bytes written on success, sending us into an
1679
     * infinite retry loop using other write mechanisms.
1680
     */
1681
    fd = open(reset_file, O_WRONLY);
1682
    if (fd != -1) {
1683
        ret = write(fd, reset, strlen(reset));
1684
        (void)ret;
1685
        close(fd);
1686
    }
1687

    
1688
    /*
1689
     * When a 0 is written to the bus master register, the device is logically
1690
     * disconnected from the PCI bus. This avoids further DMA transfers.
1691
     */
1692
    assigned_dev_pci_write_config(pci_dev, PCI_COMMAND, 0, 1);
1693
}
1694

    
1695
static int assigned_initfn(struct PCIDevice *pci_dev)
1696
{
1697
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1698
    uint8_t e_intx;
1699
    int r;
1700

    
1701
    if (!kvm_enabled()) {
1702
        error_report("pci-assign: error: requires KVM support");
1703
        return -1;
1704
    }
1705

    
1706
    if (!dev->host.domain && !dev->host.bus && !dev->host.slot &&
1707
        !dev->host.function) {
1708
        error_report("pci-assign: error: no host device specified");
1709
        return -1;
1710
    }
1711

    
1712
    /*
1713
     * Set up basic config space access control. Will be further refined during
1714
     * device initialization.
1715
     */
1716
    assigned_dev_emulate_config_read(dev, 0, PCI_CONFIG_SPACE_SIZE);
1717
    assigned_dev_direct_config_read(dev, PCI_STATUS, 2);
1718
    assigned_dev_direct_config_read(dev, PCI_REVISION_ID, 1);
1719
    assigned_dev_direct_config_read(dev, PCI_CLASS_PROG, 3);
1720
    assigned_dev_direct_config_read(dev, PCI_CACHE_LINE_SIZE, 1);
1721
    assigned_dev_direct_config_read(dev, PCI_LATENCY_TIMER, 1);
1722
    assigned_dev_direct_config_read(dev, PCI_BIST, 1);
1723
    assigned_dev_direct_config_read(dev, PCI_CARDBUS_CIS, 4);
1724
    assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_VENDOR_ID, 2);
1725
    assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_ID, 2);
1726
    assigned_dev_direct_config_read(dev, PCI_CAPABILITY_LIST + 1, 7);
1727
    assigned_dev_direct_config_read(dev, PCI_MIN_GNT, 1);
1728
    assigned_dev_direct_config_read(dev, PCI_MAX_LAT, 1);
1729
    memcpy(dev->emulate_config_write, dev->emulate_config_read,
1730
           sizeof(dev->emulate_config_read));
1731

    
1732
    if (get_real_device(dev, dev->host.domain, dev->host.bus,
1733
                        dev->host.slot, dev->host.function)) {
1734
        error_report("pci-assign: Error: Couldn't get real device (%s)!",
1735
                     dev->dev.qdev.id);
1736
        goto out;
1737
    }
1738

    
1739
    if (assigned_device_pci_cap_init(pci_dev) < 0) {
1740
        goto out;
1741
    }
1742

    
1743
    /* intercept MSI-X entry page in the MMIO */
1744
    if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1745
        if (assigned_dev_register_msix_mmio(dev)) {
1746
            goto out;
1747
        }
1748
    }
1749

    
1750
    /* handle real device's MMIO/PIO BARs */
1751
    if (assigned_dev_register_regions(dev->real_device.regions,
1752
                                      dev->real_device.region_number,
1753
                                      dev)) {
1754
        goto out;
1755
    }
1756

    
1757
    /* handle interrupt routing */
1758
    e_intx = dev->dev.config[PCI_INTERRUPT_PIN] - 1;
1759
    dev->intpin = e_intx;
1760
    dev->intx_route.mode = PCI_INTX_DISABLED;
1761
    dev->intx_route.irq = -1;
1762

    
1763
    /* assign device to guest */
1764
    r = assign_device(dev);
1765
    if (r < 0) {
1766
        goto out;
1767
    }
1768

    
1769
    /* assign legacy INTx to the device */
1770
    r = assign_intx(dev);
1771
    if (r < 0) {
1772
        goto assigned_out;
1773
    }
1774

    
1775
    assigned_dev_load_option_rom(dev);
1776

    
1777
    add_boot_device_path(dev->bootindex, &pci_dev->qdev, NULL);
1778

    
1779
    return 0;
1780

    
1781
assigned_out:
1782
    deassign_device(dev);
1783
out:
1784
    free_assigned_device(dev);
1785
    return -1;
1786
}
1787

    
1788
static void assigned_exitfn(struct PCIDevice *pci_dev)
1789
{
1790
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1791

    
1792
    deassign_device(dev);
1793
    free_assigned_device(dev);
1794
}
1795

    
1796
static Property assigned_dev_properties[] = {
1797
    DEFINE_PROP_PCI_HOST_DEVADDR("host", AssignedDevice, host),
1798
    DEFINE_PROP_BIT("prefer_msi", AssignedDevice, features,
1799
                    ASSIGNED_DEVICE_PREFER_MSI_BIT, false),
1800
    DEFINE_PROP_BIT("share_intx", AssignedDevice, features,
1801
                    ASSIGNED_DEVICE_SHARE_INTX_BIT, true),
1802
    DEFINE_PROP_INT32("bootindex", AssignedDevice, bootindex, -1),
1803
    DEFINE_PROP_STRING("configfd", AssignedDevice, configfd_name),
1804
    DEFINE_PROP_END_OF_LIST(),
1805
};
1806

    
1807
static void assign_class_init(ObjectClass *klass, void *data)
1808
{
1809
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1810
    DeviceClass *dc = DEVICE_CLASS(klass);
1811

    
1812
    k->init         = assigned_initfn;
1813
    k->exit         = assigned_exitfn;
1814
    k->config_read  = assigned_dev_pci_read_config;
1815
    k->config_write = assigned_dev_pci_write_config;
1816
    dc->props       = assigned_dev_properties;
1817
    dc->vmsd        = &vmstate_assigned_device;
1818
    dc->reset       = reset_assigned_device;
1819
    dc->desc        = "KVM-based PCI passthrough";
1820
}
1821

    
1822
static const TypeInfo assign_info = {
1823
    .name               = "kvm-pci-assign",
1824
    .parent             = TYPE_PCI_DEVICE,
1825
    .instance_size      = sizeof(AssignedDevice),
1826
    .class_init         = assign_class_init,
1827
};
1828

    
1829
static void assign_register_types(void)
1830
{
1831
    type_register_static(&assign_info);
1832
}
1833

    
1834
type_init(assign_register_types)
1835

    
1836
/*
1837
 * Scan the assigned devices for the devices that have an option ROM, and then
1838
 * load the corresponding ROM data to RAM. If an error occurs while loading an
1839
 * option ROM, we just ignore that option ROM and continue with the next one.
1840
 */
1841
static void assigned_dev_load_option_rom(AssignedDevice *dev)
1842
{
1843
    char name[32], rom_file[64];
1844
    FILE *fp;
1845
    uint8_t val;
1846
    struct stat st;
1847
    void *ptr;
1848

    
1849
    /* If loading ROM from file, pci handles it */
1850
    if (dev->dev.romfile || !dev->dev.rom_bar) {
1851
        return;
1852
    }
1853

    
1854
    snprintf(rom_file, sizeof(rom_file),
1855
             "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom",
1856
             dev->host.domain, dev->host.bus, dev->host.slot,
1857
             dev->host.function);
1858

    
1859
    if (stat(rom_file, &st)) {
1860
        return;
1861
    }
1862

    
1863
    if (access(rom_file, F_OK)) {
1864
        error_report("pci-assign: Insufficient privileges for %s", rom_file);
1865
        return;
1866
    }
1867

    
1868
    /* Write "1" to the ROM file to enable it */
1869
    fp = fopen(rom_file, "r+");
1870
    if (fp == NULL) {
1871
        return;
1872
    }
1873
    val = 1;
1874
    if (fwrite(&val, 1, 1, fp) != 1) {
1875
        goto close_rom;
1876
    }
1877
    fseek(fp, 0, SEEK_SET);
1878

    
1879
    snprintf(name, sizeof(name), "%s.rom",
1880
            object_get_typename(OBJECT(dev)));
1881
    memory_region_init_ram(&dev->dev.rom, name, st.st_size);
1882
    vmstate_register_ram(&dev->dev.rom, &dev->dev.qdev);
1883
    ptr = memory_region_get_ram_ptr(&dev->dev.rom);
1884
    memset(ptr, 0xff, st.st_size);
1885

    
1886
    if (!fread(ptr, 1, st.st_size, fp)) {
1887
        error_report("pci-assign: Cannot read from host %s\n"
1888
                     "\tDevice option ROM contents are probably invalid "
1889
                     "(check dmesg).\n\tSkip option ROM probe with rombar=0, "
1890
                     "or load from file with romfile=", rom_file);
1891
        memory_region_destroy(&dev->dev.rom);
1892
        goto close_rom;
1893
    }
1894

    
1895
    pci_register_bar(&dev->dev, PCI_ROM_SLOT, 0, &dev->dev.rom);
1896
    dev->dev.has_rom = true;
1897
close_rom:
1898
    /* Write "0" to disable ROM */
1899
    fseek(fp, 0, SEEK_SET);
1900
    val = 0;
1901
    if (!fwrite(&val, 1, 1, fp)) {
1902
        DEBUG("%s\n", "Failed to disable pci-sysfs rom file");
1903
    }
1904
    fclose(fp);
1905
}