Statistics
| Branch: | Revision:

root / hw / i386 / kvm / pci-assign.c @ 25a666d2

History | View | Annotate | Download (63.4 kB)

1
/*
2
 * Copyright (c) 2007, Neocleus Corporation.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 *
8
 *  Assign a PCI device from the host to a guest VM.
9
 *
10
 *  This implementation uses the classic device assignment interface of KVM
11
 *  and is only available on x86 hosts. It is expected to be obsoleted by VFIO
12
 *  based device assignment.
13
 *
14
 *  Adapted for KVM (qemu-kvm) by Qumranet. QEMU version was based on qemu-kvm
15
 *  revision 4144fe9d48. See its repository for the history.
16
 *
17
 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
18
 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
19
 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
20
 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
21
 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22
 */
23
#include <stdio.h>
24
#include <unistd.h>
25
#include <sys/io.h>
26
#include <sys/mman.h>
27
#include <sys/types.h>
28
#include <sys/stat.h>
29
#include "hw/hw.h"
30
#include "hw/i386/pc.h"
31
#include "qemu/error-report.h"
32
#include "ui/console.h"
33
#include "hw/loader.h"
34
#include "monitor/monitor.h"
35
#include "qemu/range.h"
36
#include "sysemu/sysemu.h"
37
#include "hw/pci/pci.h"
38
#include "hw/pci/msi.h"
39
#include "kvm_i386.h"
40

    
41
#define MSIX_PAGE_SIZE 0x1000
42

    
43
/* From linux/ioport.h */
44
#define IORESOURCE_IO       0x00000100  /* Resource type */
45
#define IORESOURCE_MEM      0x00000200
46
#define IORESOURCE_IRQ      0x00000400
47
#define IORESOURCE_DMA      0x00000800
48
#define IORESOURCE_PREFETCH 0x00002000  /* No side effects */
49
#define IORESOURCE_MEM_64   0x00100000
50

    
51
//#define DEVICE_ASSIGNMENT_DEBUG
52

    
53
#ifdef DEVICE_ASSIGNMENT_DEBUG
54
#define DEBUG(fmt, ...)                                       \
55
    do {                                                      \
56
        fprintf(stderr, "%s: " fmt, __func__ , __VA_ARGS__);  \
57
    } while (0)
58
#else
59
#define DEBUG(fmt, ...)
60
#endif
61

    
62
typedef struct PCIRegion {
63
    int type;           /* Memory or port I/O */
64
    int valid;
65
    uint64_t base_addr;
66
    uint64_t size;    /* size of the region */
67
    int resource_fd;
68
} PCIRegion;
69

    
70
typedef struct PCIDevRegions {
71
    uint8_t bus, dev, func; /* Bus inside domain, device and function */
72
    int irq;                /* IRQ number */
73
    uint16_t region_number; /* number of active regions */
74

    
75
    /* Port I/O or MMIO Regions */
76
    PCIRegion regions[PCI_NUM_REGIONS - 1];
77
    int config_fd;
78
} PCIDevRegions;
79

    
80
typedef struct AssignedDevRegion {
81
    MemoryRegion container;
82
    MemoryRegion real_iomem;
83
    union {
84
        uint8_t *r_virtbase; /* mmapped access address for memory regions */
85
        uint32_t r_baseport; /* the base guest port for I/O regions */
86
    } u;
87
    pcibus_t e_size;    /* emulated size of region in bytes */
88
    pcibus_t r_size;    /* real size of region in bytes */
89
    PCIRegion *region;
90
} AssignedDevRegion;
91

    
92
#define ASSIGNED_DEVICE_PREFER_MSI_BIT  0
93
#define ASSIGNED_DEVICE_SHARE_INTX_BIT  1
94

    
95
#define ASSIGNED_DEVICE_PREFER_MSI_MASK (1 << ASSIGNED_DEVICE_PREFER_MSI_BIT)
96
#define ASSIGNED_DEVICE_SHARE_INTX_MASK (1 << ASSIGNED_DEVICE_SHARE_INTX_BIT)
97

    
98
typedef struct MSIXTableEntry {
99
    uint32_t addr_lo;
100
    uint32_t addr_hi;
101
    uint32_t data;
102
    uint32_t ctrl;
103
} MSIXTableEntry;
104

    
105
typedef enum AssignedIRQType {
106
    ASSIGNED_IRQ_NONE = 0,
107
    ASSIGNED_IRQ_INTX_HOST_INTX,
108
    ASSIGNED_IRQ_INTX_HOST_MSI,
109
    ASSIGNED_IRQ_MSI,
110
    ASSIGNED_IRQ_MSIX
111
} AssignedIRQType;
112

    
113
typedef struct AssignedDevice {
114
    PCIDevice dev;
115
    PCIHostDeviceAddress host;
116
    uint32_t dev_id;
117
    uint32_t features;
118
    int intpin;
119
    AssignedDevRegion v_addrs[PCI_NUM_REGIONS - 1];
120
    PCIDevRegions real_device;
121
    PCIINTxRoute intx_route;
122
    AssignedIRQType assigned_irq_type;
123
    struct {
124
#define ASSIGNED_DEVICE_CAP_MSI (1 << 0)
125
#define ASSIGNED_DEVICE_CAP_MSIX (1 << 1)
126
        uint32_t available;
127
#define ASSIGNED_DEVICE_MSI_ENABLED (1 << 0)
128
#define ASSIGNED_DEVICE_MSIX_ENABLED (1 << 1)
129
#define ASSIGNED_DEVICE_MSIX_MASKED (1 << 2)
130
        uint32_t state;
131
    } cap;
132
    uint8_t emulate_config_read[PCI_CONFIG_SPACE_SIZE];
133
    uint8_t emulate_config_write[PCI_CONFIG_SPACE_SIZE];
134
    int msi_virq_nr;
135
    int *msi_virq;
136
    MSIXTableEntry *msix_table;
137
    hwaddr msix_table_addr;
138
    uint16_t msix_max;
139
    MemoryRegion mmio;
140
    char *configfd_name;
141
    int32_t bootindex;
142
} AssignedDevice;
143

    
144
static void assigned_dev_update_irq_routing(PCIDevice *dev);
145

    
146
static void assigned_dev_load_option_rom(AssignedDevice *dev);
147

    
148
static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev);
149

    
150
static uint64_t assigned_dev_ioport_rw(AssignedDevRegion *dev_region,
151
                                       hwaddr addr, int size,
152
                                       uint64_t *data)
153
{
154
    uint64_t val = 0;
155
    int fd = dev_region->region->resource_fd;
156

    
157
    if (fd >= 0) {
158
        if (data) {
159
            DEBUG("pwrite data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
160
                  ", addr="TARGET_FMT_plx"\n", *data, size, addr, addr);
161
            if (pwrite(fd, data, size, addr) != size) {
162
                error_report("%s - pwrite failed %s",
163
                             __func__, strerror(errno));
164
            }
165
        } else {
166
            if (pread(fd, &val, size, addr) != size) {
167
                error_report("%s - pread failed %s",
168
                             __func__, strerror(errno));
169
                val = (1UL << (size * 8)) - 1;
170
            }
171
            DEBUG("pread val=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
172
                  ", addr=" TARGET_FMT_plx "\n", val, size, addr, addr);
173
        }
174
    } else {
175
        uint32_t port = addr + dev_region->u.r_baseport;
176

    
177
        if (data) {
178
            DEBUG("out data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
179
                  ", host=%x\n", *data, size, addr, port);
180
            switch (size) {
181
            case 1:
182
                outb(*data, port);
183
                break;
184
            case 2:
185
                outw(*data, port);
186
                break;
187
            case 4:
188
                outl(*data, port);
189
                break;
190
            }
191
        } else {
192
            switch (size) {
193
            case 1:
194
                val = inb(port);
195
                break;
196
            case 2:
197
                val = inw(port);
198
                break;
199
            case 4:
200
                val = inl(port);
201
                break;
202
            }
203
            DEBUG("in data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
204
                  ", host=%x\n", val, size, addr, port);
205
        }
206
    }
207
    return val;
208
}
209

    
210
static void assigned_dev_ioport_write(void *opaque, hwaddr addr,
211
                                      uint64_t data, unsigned size)
212
{
213
    assigned_dev_ioport_rw(opaque, addr, size, &data);
214
}
215

    
216
static uint64_t assigned_dev_ioport_read(void *opaque,
217
                                         hwaddr addr, unsigned size)
218
{
219
    return assigned_dev_ioport_rw(opaque, addr, size, NULL);
220
}
221

    
222
static uint32_t slow_bar_readb(void *opaque, hwaddr addr)
223
{
224
    AssignedDevRegion *d = opaque;
225
    uint8_t *in = d->u.r_virtbase + addr;
226
    uint32_t r;
227

    
228
    r = *in;
229
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
230

    
231
    return r;
232
}
233

    
234
static uint32_t slow_bar_readw(void *opaque, hwaddr addr)
235
{
236
    AssignedDevRegion *d = opaque;
237
    uint16_t *in = (uint16_t *)(d->u.r_virtbase + addr);
238
    uint32_t r;
239

    
240
    r = *in;
241
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
242

    
243
    return r;
244
}
245

    
246
static uint32_t slow_bar_readl(void *opaque, hwaddr addr)
247
{
248
    AssignedDevRegion *d = opaque;
249
    uint32_t *in = (uint32_t *)(d->u.r_virtbase + addr);
250
    uint32_t r;
251

    
252
    r = *in;
253
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
254

    
255
    return r;
256
}
257

    
258
static void slow_bar_writeb(void *opaque, hwaddr addr, uint32_t val)
259
{
260
    AssignedDevRegion *d = opaque;
261
    uint8_t *out = d->u.r_virtbase + addr;
262

    
263
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%02x\n", addr, val);
264
    *out = val;
265
}
266

    
267
static void slow_bar_writew(void *opaque, hwaddr addr, uint32_t val)
268
{
269
    AssignedDevRegion *d = opaque;
270
    uint16_t *out = (uint16_t *)(d->u.r_virtbase + addr);
271

    
272
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%04x\n", addr, val);
273
    *out = val;
274
}
275

    
276
static void slow_bar_writel(void *opaque, hwaddr addr, uint32_t val)
277
{
278
    AssignedDevRegion *d = opaque;
279
    uint32_t *out = (uint32_t *)(d->u.r_virtbase + addr);
280

    
281
    DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, val);
282
    *out = val;
283
}
284

    
285
static const MemoryRegionOps slow_bar_ops = {
286
    .old_mmio = {
287
        .read = { slow_bar_readb, slow_bar_readw, slow_bar_readl, },
288
        .write = { slow_bar_writeb, slow_bar_writew, slow_bar_writel, },
289
    },
290
    .endianness = DEVICE_NATIVE_ENDIAN,
291
};
292

    
293
static void assigned_dev_iomem_setup(PCIDevice *pci_dev, int region_num,
294
                                     pcibus_t e_size)
295
{
296
    AssignedDevice *r_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
297
    AssignedDevRegion *region = &r_dev->v_addrs[region_num];
298
    PCIRegion *real_region = &r_dev->real_device.regions[region_num];
299

    
300
    if (e_size > 0) {
301
        memory_region_init(&region->container, OBJECT(pci_dev),
302
                           "assigned-dev-container", e_size);
303
        memory_region_add_subregion(&region->container, 0, &region->real_iomem);
304

    
305
        /* deal with MSI-X MMIO page */
306
        if (real_region->base_addr <= r_dev->msix_table_addr &&
307
                real_region->base_addr + real_region->size >
308
                r_dev->msix_table_addr) {
309
            uint64_t offset = r_dev->msix_table_addr - real_region->base_addr;
310

    
311
            memory_region_add_subregion_overlap(&region->container,
312
                                                offset,
313
                                                &r_dev->mmio,
314
                                                1);
315
        }
316
    }
317
}
318

    
319
static const MemoryRegionOps assigned_dev_ioport_ops = {
320
    .read = assigned_dev_ioport_read,
321
    .write = assigned_dev_ioport_write,
322
    .endianness = DEVICE_NATIVE_ENDIAN,
323
};
324

    
325
static void assigned_dev_ioport_setup(PCIDevice *pci_dev, int region_num,
326
                                      pcibus_t size)
327
{
328
    AssignedDevice *r_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
329
    AssignedDevRegion *region = &r_dev->v_addrs[region_num];
330

    
331
    region->e_size = size;
332
    memory_region_init(&region->container, OBJECT(pci_dev),
333
                       "assigned-dev-container", size);
334
    memory_region_init_io(&region->real_iomem, OBJECT(pci_dev),
335
                          &assigned_dev_ioport_ops, r_dev->v_addrs + region_num,
336
                          "assigned-dev-iomem", size);
337
    memory_region_add_subregion(&region->container, 0, &region->real_iomem);
338
}
339

    
340
static uint32_t assigned_dev_pci_read(PCIDevice *d, int pos, int len)
341
{
342
    AssignedDevice *pci_dev = DO_UPCAST(AssignedDevice, dev, d);
343
    uint32_t val;
344
    ssize_t ret;
345
    int fd = pci_dev->real_device.config_fd;
346

    
347
again:
348
    ret = pread(fd, &val, len, pos);
349
    if (ret != len) {
350
        if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
351
            goto again;
352
        }
353

    
354
        hw_error("pci read failed, ret = %zd errno = %d\n", ret, errno);
355
    }
356

    
357
    return val;
358
}
359

    
360
static uint8_t assigned_dev_pci_read_byte(PCIDevice *d, int pos)
361
{
362
    return (uint8_t)assigned_dev_pci_read(d, pos, 1);
363
}
364

    
365
static void assigned_dev_pci_write(PCIDevice *d, int pos, uint32_t val, int len)
366
{
367
    AssignedDevice *pci_dev = DO_UPCAST(AssignedDevice, dev, d);
368
    ssize_t ret;
369
    int fd = pci_dev->real_device.config_fd;
370

    
371
again:
372
    ret = pwrite(fd, &val, len, pos);
373
    if (ret != len) {
374
        if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
375
            goto again;
376
        }
377

    
378
        hw_error("pci write failed, ret = %zd errno = %d\n", ret, errno);
379
    }
380
}
381

    
382
static void assigned_dev_emulate_config_read(AssignedDevice *dev,
383
                                             uint32_t offset, uint32_t len)
384
{
385
    memset(dev->emulate_config_read + offset, 0xff, len);
386
}
387

    
388
static void assigned_dev_direct_config_read(AssignedDevice *dev,
389
                                            uint32_t offset, uint32_t len)
390
{
391
    memset(dev->emulate_config_read + offset, 0, len);
392
}
393

    
394
static void assigned_dev_direct_config_write(AssignedDevice *dev,
395
                                             uint32_t offset, uint32_t len)
396
{
397
    memset(dev->emulate_config_write + offset, 0, len);
398
}
399

    
400
static uint8_t pci_find_cap_offset(PCIDevice *d, uint8_t cap, uint8_t start)
401
{
402
    int id;
403
    int max_cap = 48;
404
    int pos = start ? start : PCI_CAPABILITY_LIST;
405
    int status;
406

    
407
    status = assigned_dev_pci_read_byte(d, PCI_STATUS);
408
    if ((status & PCI_STATUS_CAP_LIST) == 0) {
409
        return 0;
410
    }
411

    
412
    while (max_cap--) {
413
        pos = assigned_dev_pci_read_byte(d, pos);
414
        if (pos < 0x40) {
415
            break;
416
        }
417

    
418
        pos &= ~3;
419
        id = assigned_dev_pci_read_byte(d, pos + PCI_CAP_LIST_ID);
420

    
421
        if (id == 0xff) {
422
            break;
423
        }
424
        if (id == cap) {
425
            return pos;
426
        }
427

    
428
        pos += PCI_CAP_LIST_NEXT;
429
    }
430
    return 0;
431
}
432

    
433
static int assigned_dev_register_regions(PCIRegion *io_regions,
434
                                         unsigned long regions_num,
435
                                         AssignedDevice *pci_dev)
436
{
437
    uint32_t i;
438
    PCIRegion *cur_region = io_regions;
439

    
440
    for (i = 0; i < regions_num; i++, cur_region++) {
441
        if (!cur_region->valid) {
442
            continue;
443
        }
444

    
445
        /* handle memory io regions */
446
        if (cur_region->type & IORESOURCE_MEM) {
447
            int t = PCI_BASE_ADDRESS_SPACE_MEMORY;
448
            if (cur_region->type & IORESOURCE_PREFETCH) {
449
                t |= PCI_BASE_ADDRESS_MEM_PREFETCH;
450
            }
451
            if (cur_region->type & IORESOURCE_MEM_64) {
452
                t |= PCI_BASE_ADDRESS_MEM_TYPE_64;
453
            }
454

    
455
            /* map physical memory */
456
            pci_dev->v_addrs[i].u.r_virtbase = mmap(NULL, cur_region->size,
457
                                                    PROT_WRITE | PROT_READ,
458
                                                    MAP_SHARED,
459
                                                    cur_region->resource_fd,
460
                                                    (off_t)0);
461

    
462
            if (pci_dev->v_addrs[i].u.r_virtbase == MAP_FAILED) {
463
                pci_dev->v_addrs[i].u.r_virtbase = NULL;
464
                error_report("%s: Error: Couldn't mmap 0x%" PRIx64 "!",
465
                             __func__, cur_region->base_addr);
466
                return -1;
467
            }
468

    
469
            pci_dev->v_addrs[i].r_size = cur_region->size;
470
            pci_dev->v_addrs[i].e_size = 0;
471

    
472
            /* add offset */
473
            pci_dev->v_addrs[i].u.r_virtbase +=
474
                (cur_region->base_addr & 0xFFF);
475

    
476
            if (cur_region->size & 0xFFF) {
477
                error_report("PCI region %d at address 0x%" PRIx64 " has "
478
                             "size 0x%" PRIx64 ", which is not a multiple of "
479
                             "4K.  You might experience some performance hit "
480
                             "due to that.",
481
                             i, cur_region->base_addr, cur_region->size);
482
                memory_region_init_io(&pci_dev->v_addrs[i].real_iomem,
483
                                      OBJECT(pci_dev), &slow_bar_ops,
484
                                      &pci_dev->v_addrs[i],
485
                                      "assigned-dev-slow-bar",
486
                                      cur_region->size);
487
            } else {
488
                void *virtbase = pci_dev->v_addrs[i].u.r_virtbase;
489
                char name[32];
490
                snprintf(name, sizeof(name), "%s.bar%d",
491
                         object_get_typename(OBJECT(pci_dev)), i);
492
                memory_region_init_ram_ptr(&pci_dev->v_addrs[i].real_iomem,
493
                                           OBJECT(pci_dev), name,
494
                                           cur_region->size, virtbase);
495
                vmstate_register_ram(&pci_dev->v_addrs[i].real_iomem,
496
                                     &pci_dev->dev.qdev);
497
            }
498

    
499
            assigned_dev_iomem_setup(&pci_dev->dev, i, cur_region->size);
500
            pci_register_bar((PCIDevice *) pci_dev, i, t,
501
                             &pci_dev->v_addrs[i].container);
502
            continue;
503
        } else {
504
            /* handle port io regions */
505
            uint32_t val;
506
            int ret;
507

    
508
            /* Test kernel support for ioport resource read/write.  Old
509
             * kernels return EIO.  New kernels only allow 1/2/4 byte reads
510
             * so should return EINVAL for a 3 byte read */
511
            ret = pread(pci_dev->v_addrs[i].region->resource_fd, &val, 3, 0);
512
            if (ret >= 0) {
513
                error_report("Unexpected return from I/O port read: %d", ret);
514
                abort();
515
            } else if (errno != EINVAL) {
516
                error_report("Kernel doesn't support ioport resource "
517
                             "access, hiding this region.");
518
                close(pci_dev->v_addrs[i].region->resource_fd);
519
                cur_region->valid = 0;
520
                continue;
521
            }
522

    
523
            pci_dev->v_addrs[i].u.r_baseport = cur_region->base_addr;
524
            pci_dev->v_addrs[i].r_size = cur_region->size;
525
            pci_dev->v_addrs[i].e_size = 0;
526

    
527
            assigned_dev_ioport_setup(&pci_dev->dev, i, cur_region->size);
528
            pci_register_bar((PCIDevice *) pci_dev, i,
529
                             PCI_BASE_ADDRESS_SPACE_IO,
530
                             &pci_dev->v_addrs[i].container);
531
        }
532
    }
533

    
534
    /* success */
535
    return 0;
536
}
537

    
538
static int get_real_id(const char *devpath, const char *idname, uint16_t *val)
539
{
540
    FILE *f;
541
    char name[128];
542
    long id;
543

    
544
    snprintf(name, sizeof(name), "%s%s", devpath, idname);
545
    f = fopen(name, "r");
546
    if (f == NULL) {
547
        error_report("%s: %s: %m", __func__, name);
548
        return -1;
549
    }
550
    if (fscanf(f, "%li\n", &id) == 1) {
551
        *val = id;
552
    } else {
553
        fclose(f);
554
        return -1;
555
    }
556
    fclose(f);
557

    
558
    return 0;
559
}
560

    
561
static int get_real_vendor_id(const char *devpath, uint16_t *val)
562
{
563
    return get_real_id(devpath, "vendor", val);
564
}
565

    
566
static int get_real_device_id(const char *devpath, uint16_t *val)
567
{
568
    return get_real_id(devpath, "device", val);
569
}
570

    
571
static int get_real_device(AssignedDevice *pci_dev)
572
{
573
    char dir[128], name[128];
574
    int fd, r = 0, v;
575
    FILE *f;
576
    uint64_t start, end, size, flags;
577
    uint16_t id;
578
    PCIRegion *rp;
579
    PCIDevRegions *dev = &pci_dev->real_device;
580

    
581
    dev->region_number = 0;
582

    
583
    snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%x/",
584
             pci_dev->host.domain, pci_dev->host.bus,
585
             pci_dev->host.slot, pci_dev->host.function);
586

    
587
    snprintf(name, sizeof(name), "%sconfig", dir);
588

    
589
    if (pci_dev->configfd_name && *pci_dev->configfd_name) {
590
        dev->config_fd = monitor_handle_fd_param(cur_mon, pci_dev->configfd_name);
591
        if (dev->config_fd < 0) {
592
            return 1;
593
        }
594
    } else {
595
        dev->config_fd = open(name, O_RDWR);
596

    
597
        if (dev->config_fd == -1) {
598
            error_report("%s: %s: %m", __func__, name);
599
            return 1;
600
        }
601
    }
602
again:
603
    r = read(dev->config_fd, pci_dev->dev.config,
604
             pci_config_size(&pci_dev->dev));
605
    if (r < 0) {
606
        if (errno == EINTR || errno == EAGAIN) {
607
            goto again;
608
        }
609
        error_report("%s: read failed, errno = %d", __func__, errno);
610
    }
611

    
612
    /* Restore or clear multifunction, this is always controlled by qemu */
613
    if (pci_dev->dev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
614
        pci_dev->dev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
615
    } else {
616
        pci_dev->dev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
617
    }
618

    
619
    /* Clear host resource mapping info.  If we choose not to register a
620
     * BAR, such as might be the case with the option ROM, we can get
621
     * confusing, unwritable, residual addresses from the host here. */
622
    memset(&pci_dev->dev.config[PCI_BASE_ADDRESS_0], 0, 24);
623
    memset(&pci_dev->dev.config[PCI_ROM_ADDRESS], 0, 4);
624

    
625
    snprintf(name, sizeof(name), "%sresource", dir);
626

    
627
    f = fopen(name, "r");
628
    if (f == NULL) {
629
        error_report("%s: %s: %m", __func__, name);
630
        return 1;
631
    }
632

    
633
    for (r = 0; r < PCI_ROM_SLOT; r++) {
634
        if (fscanf(f, "%" SCNi64 " %" SCNi64 " %" SCNi64 "\n",
635
                   &start, &end, &flags) != 3) {
636
            break;
637
        }
638

    
639
        rp = dev->regions + r;
640
        rp->valid = 0;
641
        rp->resource_fd = -1;
642
        size = end - start + 1;
643
        flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH
644
                 | IORESOURCE_MEM_64;
645
        if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0) {
646
            continue;
647
        }
648
        if (flags & IORESOURCE_MEM) {
649
            flags &= ~IORESOURCE_IO;
650
        } else {
651
            flags &= ~IORESOURCE_PREFETCH;
652
        }
653
        snprintf(name, sizeof(name), "%sresource%d", dir, r);
654
        fd = open(name, O_RDWR);
655
        if (fd == -1) {
656
            continue;
657
        }
658
        rp->resource_fd = fd;
659

    
660
        rp->type = flags;
661
        rp->valid = 1;
662
        rp->base_addr = start;
663
        rp->size = size;
664
        pci_dev->v_addrs[r].region = rp;
665
        DEBUG("region %d size %" PRIu64 " start 0x%" PRIx64
666
              " type %d resource_fd %d\n",
667
              r, rp->size, start, rp->type, rp->resource_fd);
668
    }
669

    
670
    fclose(f);
671

    
672
    /* read and fill vendor ID */
673
    v = get_real_vendor_id(dir, &id);
674
    if (v) {
675
        return 1;
676
    }
677
    pci_dev->dev.config[0] = id & 0xff;
678
    pci_dev->dev.config[1] = (id & 0xff00) >> 8;
679

    
680
    /* read and fill device ID */
681
    v = get_real_device_id(dir, &id);
682
    if (v) {
683
        return 1;
684
    }
685
    pci_dev->dev.config[2] = id & 0xff;
686
    pci_dev->dev.config[3] = (id & 0xff00) >> 8;
687

    
688
    pci_word_test_and_clear_mask(pci_dev->emulate_config_write + PCI_COMMAND,
689
                                 PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE);
690

    
691
    dev->region_number = r;
692
    return 0;
693
}
694

    
695
static void free_msi_virqs(AssignedDevice *dev)
696
{
697
    int i;
698

    
699
    for (i = 0; i < dev->msi_virq_nr; i++) {
700
        if (dev->msi_virq[i] >= 0) {
701
            kvm_irqchip_release_virq(kvm_state, dev->msi_virq[i]);
702
            dev->msi_virq[i] = -1;
703
        }
704
    }
705
    g_free(dev->msi_virq);
706
    dev->msi_virq = NULL;
707
    dev->msi_virq_nr = 0;
708
}
709

    
710
static void free_assigned_device(AssignedDevice *dev)
711
{
712
    int i;
713

    
714
    if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
715
        assigned_dev_unregister_msix_mmio(dev);
716
    }
717
    for (i = 0; i < dev->real_device.region_number; i++) {
718
        PCIRegion *pci_region = &dev->real_device.regions[i];
719
        AssignedDevRegion *region = &dev->v_addrs[i];
720

    
721
        if (!pci_region->valid) {
722
            continue;
723
        }
724
        if (pci_region->type & IORESOURCE_IO) {
725
            if (region->u.r_baseport) {
726
                memory_region_del_subregion(&region->container,
727
                                            &region->real_iomem);
728
                memory_region_destroy(&region->real_iomem);
729
                memory_region_destroy(&region->container);
730
            }
731
        } else if (pci_region->type & IORESOURCE_MEM) {
732
            if (region->u.r_virtbase) {
733
                memory_region_del_subregion(&region->container,
734
                                            &region->real_iomem);
735

    
736
                /* Remove MSI-X table subregion */
737
                if (pci_region->base_addr <= dev->msix_table_addr &&
738
                    pci_region->base_addr + pci_region->size >
739
                    dev->msix_table_addr) {
740
                    memory_region_del_subregion(&region->container,
741
                                                &dev->mmio);
742
                }
743

    
744
                memory_region_destroy(&region->real_iomem);
745
                memory_region_destroy(&region->container);
746
                if (munmap(region->u.r_virtbase,
747
                           (pci_region->size + 0xFFF) & 0xFFFFF000)) {
748
                    error_report("Failed to unmap assigned device region: %s",
749
                                 strerror(errno));
750
                }
751
            }
752
        }
753
        if (pci_region->resource_fd >= 0) {
754
            close(pci_region->resource_fd);
755
        }
756
    }
757

    
758
    if (dev->real_device.config_fd >= 0) {
759
        close(dev->real_device.config_fd);
760
    }
761

    
762
    free_msi_virqs(dev);
763
}
764

    
765
static void assign_failed_examine(AssignedDevice *dev)
766
{
767
    char name[PATH_MAX], dir[PATH_MAX], driver[PATH_MAX] = {}, *ns;
768
    uint16_t vendor_id, device_id;
769
    int r;
770

    
771
    snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
772
            dev->host.domain, dev->host.bus, dev->host.slot,
773
            dev->host.function);
774

    
775
    snprintf(name, sizeof(name), "%sdriver", dir);
776

    
777
    r = readlink(name, driver, sizeof(driver));
778
    if ((r <= 0) || r >= sizeof(driver)) {
779
        goto fail;
780
    }
781

    
782
    ns = strrchr(driver, '/');
783
    if (!ns) {
784
        goto fail;
785
    }
786

    
787
    ns++;
788

    
789
    if (get_real_vendor_id(dir, &vendor_id) ||
790
        get_real_device_id(dir, &device_id)) {
791
        goto fail;
792
    }
793

    
794
    error_printf("*** The driver '%s' is occupying your device "
795
        "%04x:%02x:%02x.%x.\n"
796
        "***\n"
797
        "*** You can try the following commands to free it:\n"
798
        "***\n"
799
        "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/new_id\n"
800
        "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/%s/unbind\n"
801
        "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
802
        "pci-stub/bind\n"
803
        "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/remove_id\n"
804
        "***",
805
        ns, dev->host.domain, dev->host.bus, dev->host.slot,
806
        dev->host.function, vendor_id, device_id,
807
        dev->host.domain, dev->host.bus, dev->host.slot, dev->host.function,
808
        ns, dev->host.domain, dev->host.bus, dev->host.slot,
809
        dev->host.function, vendor_id, device_id);
810

    
811
    return;
812

    
813
fail:
814
    error_report("Couldn't find out why.");
815
}
816

    
817
static int assign_device(AssignedDevice *dev)
818
{
819
    uint32_t flags = KVM_DEV_ASSIGN_ENABLE_IOMMU;
820
    int r;
821

    
822
    /* Only pass non-zero PCI segment to capable module */
823
    if (!kvm_check_extension(kvm_state, KVM_CAP_PCI_SEGMENT) &&
824
        dev->host.domain) {
825
        error_report("Can't assign device inside non-zero PCI segment "
826
                     "as this KVM module doesn't support it.");
827
        return -ENODEV;
828
    }
829

    
830
    if (!kvm_check_extension(kvm_state, KVM_CAP_IOMMU)) {
831
        error_report("No IOMMU found.  Unable to assign device \"%s\"",
832
                     dev->dev.qdev.id);
833
        return -ENODEV;
834
    }
835

    
836
    if (dev->features & ASSIGNED_DEVICE_SHARE_INTX_MASK &&
837
        kvm_has_intx_set_mask()) {
838
        flags |= KVM_DEV_ASSIGN_PCI_2_3;
839
    }
840

    
841
    r = kvm_device_pci_assign(kvm_state, &dev->host, flags, &dev->dev_id);
842
    if (r < 0) {
843
        error_report("Failed to assign device \"%s\" : %s",
844
                     dev->dev.qdev.id, strerror(-r));
845

    
846
        switch (r) {
847
        case -EBUSY:
848
            assign_failed_examine(dev);
849
            break;
850
        default:
851
            break;
852
        }
853
    }
854
    return r;
855
}
856

    
857
static bool check_irqchip_in_kernel(void)
858
{
859
    if (kvm_irqchip_in_kernel()) {
860
        return true;
861
    }
862
    error_report("pci-assign: error: requires KVM with in-kernel irqchip "
863
                 "enabled");
864
    return false;
865
}
866

    
867
static int assign_intx(AssignedDevice *dev)
868
{
869
    AssignedIRQType new_type;
870
    PCIINTxRoute intx_route;
871
    bool intx_host_msi;
872
    int r;
873

    
874
    /* Interrupt PIN 0 means don't use INTx */
875
    if (assigned_dev_pci_read_byte(&dev->dev, PCI_INTERRUPT_PIN) == 0) {
876
        pci_device_set_intx_routing_notifier(&dev->dev, NULL);
877
        return 0;
878
    }
879

    
880
    if (!check_irqchip_in_kernel()) {
881
        return -ENOTSUP;
882
    }
883

    
884
    pci_device_set_intx_routing_notifier(&dev->dev,
885
                                         assigned_dev_update_irq_routing);
886

    
887
    intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin);
888
    assert(intx_route.mode != PCI_INTX_INVERTED);
889

    
890
    if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) {
891
        return 0;
892
    }
893

    
894
    switch (dev->assigned_irq_type) {
895
    case ASSIGNED_IRQ_INTX_HOST_INTX:
896
    case ASSIGNED_IRQ_INTX_HOST_MSI:
897
        intx_host_msi = dev->assigned_irq_type == ASSIGNED_IRQ_INTX_HOST_MSI;
898
        r = kvm_device_intx_deassign(kvm_state, dev->dev_id, intx_host_msi);
899
        break;
900
    case ASSIGNED_IRQ_MSI:
901
        r = kvm_device_msi_deassign(kvm_state, dev->dev_id);
902
        break;
903
    case ASSIGNED_IRQ_MSIX:
904
        r = kvm_device_msix_deassign(kvm_state, dev->dev_id);
905
        break;
906
    default:
907
        r = 0;
908
        break;
909
    }
910
    if (r) {
911
        perror("assign_intx: deassignment of previous interrupt failed");
912
    }
913
    dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
914

    
915
    if (intx_route.mode == PCI_INTX_DISABLED) {
916
        dev->intx_route = intx_route;
917
        return 0;
918
    }
919

    
920
retry:
921
    if (dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK &&
922
        dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
923
        intx_host_msi = true;
924
        new_type = ASSIGNED_IRQ_INTX_HOST_MSI;
925
    } else {
926
        intx_host_msi = false;
927
        new_type = ASSIGNED_IRQ_INTX_HOST_INTX;
928
    }
929

    
930
    r = kvm_device_intx_assign(kvm_state, dev->dev_id, intx_host_msi,
931
                               intx_route.irq);
932
    if (r < 0) {
933
        if (r == -EIO && !(dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK) &&
934
            dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
935
            /* Retry with host-side MSI. There might be an IRQ conflict and
936
             * either the kernel or the device doesn't support sharing. */
937
            error_report("Host-side INTx sharing not supported, "
938
                         "using MSI instead");
939
            error_printf("Some devices do not work properly in this mode.\n");
940
            dev->features |= ASSIGNED_DEVICE_PREFER_MSI_MASK;
941
            goto retry;
942
        }
943
        error_report("Failed to assign irq for \"%s\": %s",
944
                     dev->dev.qdev.id, strerror(-r));
945
        error_report("Perhaps you are assigning a device "
946
                     "that shares an IRQ with another device?");
947
        return r;
948
    }
949

    
950
    dev->intx_route = intx_route;
951
    dev->assigned_irq_type = new_type;
952
    return r;
953
}
954

    
955
static void deassign_device(AssignedDevice *dev)
956
{
957
    int r;
958

    
959
    r = kvm_device_pci_deassign(kvm_state, dev->dev_id);
960
    assert(r == 0);
961
}
962

    
963
/* The pci config space got updated. Check if irq numbers have changed
964
 * for our devices
965
 */
966
static void assigned_dev_update_irq_routing(PCIDevice *dev)
967
{
968
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, dev);
969
    Error *err = NULL;
970
    int r;
971

    
972
    r = assign_intx(assigned_dev);
973
    if (r < 0) {
974
        qdev_unplug(&dev->qdev, &err);
975
        assert(!err);
976
    }
977
}
978

    
979
static void assigned_dev_update_msi(PCIDevice *pci_dev)
980
{
981
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
982
    uint8_t ctrl_byte = pci_get_byte(pci_dev->config + pci_dev->msi_cap +
983
                                     PCI_MSI_FLAGS);
984
    int r;
985

    
986
    /* Some guests gratuitously disable MSI even if they're not using it,
987
     * try to catch this by only deassigning irqs if the guest is using
988
     * MSI or intends to start. */
989
    if (assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSI ||
990
        (ctrl_byte & PCI_MSI_FLAGS_ENABLE)) {
991
        r = kvm_device_msi_deassign(kvm_state, assigned_dev->dev_id);
992
        /* -ENXIO means no assigned irq */
993
        if (r && r != -ENXIO) {
994
            perror("assigned_dev_update_msi: deassign irq");
995
        }
996

    
997
        free_msi_virqs(assigned_dev);
998

    
999
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
1000
        pci_device_set_intx_routing_notifier(pci_dev, NULL);
1001
    }
1002

    
1003
    if (ctrl_byte & PCI_MSI_FLAGS_ENABLE) {
1004
        MSIMessage msg = msi_get_message(pci_dev, 0);
1005
        int virq;
1006

    
1007
        virq = kvm_irqchip_add_msi_route(kvm_state, msg);
1008
        if (virq < 0) {
1009
            perror("assigned_dev_update_msi: kvm_irqchip_add_msi_route");
1010
            return;
1011
        }
1012

    
1013
        assigned_dev->msi_virq = g_malloc(sizeof(*assigned_dev->msi_virq));
1014
        assigned_dev->msi_virq_nr = 1;
1015
        assigned_dev->msi_virq[0] = virq;
1016
        if (kvm_device_msi_assign(kvm_state, assigned_dev->dev_id, virq) < 0) {
1017
            perror("assigned_dev_update_msi: kvm_device_msi_assign");
1018
        }
1019

    
1020
        assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
1021
        assigned_dev->intx_route.irq = -1;
1022
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSI;
1023
    } else {
1024
        assign_intx(assigned_dev);
1025
    }
1026
}
1027

    
1028
static void assigned_dev_update_msi_msg(PCIDevice *pci_dev)
1029
{
1030
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1031
    uint8_t ctrl_byte = pci_get_byte(pci_dev->config + pci_dev->msi_cap +
1032
                                     PCI_MSI_FLAGS);
1033

    
1034
    if (assigned_dev->assigned_irq_type != ASSIGNED_IRQ_MSI ||
1035
        !(ctrl_byte & PCI_MSI_FLAGS_ENABLE)) {
1036
        return;
1037
    }
1038

    
1039
    kvm_irqchip_update_msi_route(kvm_state, assigned_dev->msi_virq[0],
1040
                                 msi_get_message(pci_dev, 0));
1041
}
1042

    
1043
static bool assigned_dev_msix_masked(MSIXTableEntry *entry)
1044
{
1045
    return (entry->ctrl & cpu_to_le32(0x1)) != 0;
1046
}
1047

    
1048
/*
1049
 * When MSI-X is first enabled the vector table typically has all the
1050
 * vectors masked, so we can't use that as the obvious test to figure out
1051
 * how many vectors to initially enable.  Instead we look at the data field
1052
 * because this is what worked for pci-assign for a long time.  This makes
1053
 * sure the physical MSI-X state tracks the guest's view, which is important
1054
 * for some VF/PF and PF/fw communication channels.
1055
 */
1056
static bool assigned_dev_msix_skipped(MSIXTableEntry *entry)
1057
{
1058
    return !entry->data;
1059
}
1060

    
1061
static int assigned_dev_update_msix_mmio(PCIDevice *pci_dev)
1062
{
1063
    AssignedDevice *adev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1064
    uint16_t entries_nr = 0;
1065
    int i, r = 0;
1066
    MSIXTableEntry *entry = adev->msix_table;
1067
    MSIMessage msg;
1068

    
1069
    /* Get the usable entry number for allocating */
1070
    for (i = 0; i < adev->msix_max; i++, entry++) {
1071
        if (assigned_dev_msix_skipped(entry)) {
1072
            continue;
1073
        }
1074
        entries_nr++;
1075
    }
1076

    
1077
    DEBUG("MSI-X entries: %d\n", entries_nr);
1078

    
1079
    /* It's valid to enable MSI-X with all entries masked */
1080
    if (!entries_nr) {
1081
        return 0;
1082
    }
1083

    
1084
    r = kvm_device_msix_init_vectors(kvm_state, adev->dev_id, entries_nr);
1085
    if (r != 0) {
1086
        error_report("fail to set MSI-X entry number for MSIX! %s",
1087
                     strerror(-r));
1088
        return r;
1089
    }
1090

    
1091
    free_msi_virqs(adev);
1092

    
1093
    adev->msi_virq_nr = adev->msix_max;
1094
    adev->msi_virq = g_malloc(adev->msix_max * sizeof(*adev->msi_virq));
1095

    
1096
    entry = adev->msix_table;
1097
    for (i = 0; i < adev->msix_max; i++, entry++) {
1098
        adev->msi_virq[i] = -1;
1099

    
1100
        if (assigned_dev_msix_skipped(entry)) {
1101
            continue;
1102
        }
1103

    
1104
        msg.address = entry->addr_lo | ((uint64_t)entry->addr_hi << 32);
1105
        msg.data = entry->data;
1106
        r = kvm_irqchip_add_msi_route(kvm_state, msg);
1107
        if (r < 0) {
1108
            return r;
1109
        }
1110
        adev->msi_virq[i] = r;
1111

    
1112
        DEBUG("MSI-X vector %d, gsi %d, addr %08x_%08x, data %08x\n", i,
1113
              r, entry->addr_hi, entry->addr_lo, entry->data);
1114

    
1115
        r = kvm_device_msix_set_vector(kvm_state, adev->dev_id, i,
1116
                                       adev->msi_virq[i]);
1117
        if (r) {
1118
            error_report("fail to set MSI-X entry! %s", strerror(-r));
1119
            break;
1120
        }
1121
    }
1122

    
1123
    return r;
1124
}
1125

    
1126
static void assigned_dev_update_msix(PCIDevice *pci_dev)
1127
{
1128
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1129
    uint16_t ctrl_word = pci_get_word(pci_dev->config + pci_dev->msix_cap +
1130
                                      PCI_MSIX_FLAGS);
1131
    int r;
1132

    
1133
    /* Some guests gratuitously disable MSIX even if they're not using it,
1134
     * try to catch this by only deassigning irqs if the guest is using
1135
     * MSIX or intends to start. */
1136
    if ((assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSIX) ||
1137
        (ctrl_word & PCI_MSIX_FLAGS_ENABLE)) {
1138
        r = kvm_device_msix_deassign(kvm_state, assigned_dev->dev_id);
1139
        /* -ENXIO means no assigned irq */
1140
        if (r && r != -ENXIO) {
1141
            perror("assigned_dev_update_msix: deassign irq");
1142
        }
1143

    
1144
        free_msi_virqs(assigned_dev);
1145

    
1146
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
1147
        pci_device_set_intx_routing_notifier(pci_dev, NULL);
1148
    }
1149

    
1150
    if (ctrl_word & PCI_MSIX_FLAGS_ENABLE) {
1151
        if (assigned_dev_update_msix_mmio(pci_dev) < 0) {
1152
            perror("assigned_dev_update_msix_mmio");
1153
            return;
1154
        }
1155

    
1156
        if (assigned_dev->msi_virq_nr > 0) {
1157
            if (kvm_device_msix_assign(kvm_state, assigned_dev->dev_id) < 0) {
1158
                perror("assigned_dev_enable_msix: assign irq");
1159
                return;
1160
            }
1161
        }
1162
        assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
1163
        assigned_dev->intx_route.irq = -1;
1164
        assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSIX;
1165
    } else {
1166
        assign_intx(assigned_dev);
1167
    }
1168
}
1169

    
1170
static uint32_t assigned_dev_pci_read_config(PCIDevice *pci_dev,
1171
                                             uint32_t address, int len)
1172
{
1173
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1174
    uint32_t virt_val = pci_default_read_config(pci_dev, address, len);
1175
    uint32_t real_val, emulate_mask, full_emulation_mask;
1176

    
1177
    emulate_mask = 0;
1178
    memcpy(&emulate_mask, assigned_dev->emulate_config_read + address, len);
1179
    emulate_mask = le32_to_cpu(emulate_mask);
1180

    
1181
    full_emulation_mask = 0xffffffff >> (32 - len * 8);
1182

    
1183
    if (emulate_mask != full_emulation_mask) {
1184
        real_val = assigned_dev_pci_read(pci_dev, address, len);
1185
        return (virt_val & emulate_mask) | (real_val & ~emulate_mask);
1186
    } else {
1187
        return virt_val;
1188
    }
1189
}
1190

    
1191
static void assigned_dev_pci_write_config(PCIDevice *pci_dev, uint32_t address,
1192
                                          uint32_t val, int len)
1193
{
1194
    AssignedDevice *assigned_dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1195
    uint16_t old_cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1196
    uint32_t emulate_mask, full_emulation_mask;
1197
    int ret;
1198

    
1199
    pci_default_write_config(pci_dev, address, val, len);
1200

    
1201
    if (kvm_has_intx_set_mask() &&
1202
        range_covers_byte(address, len, PCI_COMMAND + 1)) {
1203
        bool intx_masked = (pci_get_word(pci_dev->config + PCI_COMMAND) &
1204
                            PCI_COMMAND_INTX_DISABLE);
1205

    
1206
        if (intx_masked != !!(old_cmd & PCI_COMMAND_INTX_DISABLE)) {
1207
            ret = kvm_device_intx_set_mask(kvm_state, assigned_dev->dev_id,
1208
                                           intx_masked);
1209
            if (ret) {
1210
                perror("assigned_dev_pci_write_config: set intx mask");
1211
            }
1212
        }
1213
    }
1214
    if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
1215
        if (range_covers_byte(address, len,
1216
                              pci_dev->msi_cap + PCI_MSI_FLAGS)) {
1217
            assigned_dev_update_msi(pci_dev);
1218
        } else if (ranges_overlap(address, len, /* 32bit MSI only */
1219
                                  pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 6)) {
1220
            assigned_dev_update_msi_msg(pci_dev);
1221
        }
1222
    }
1223
    if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1224
        if (range_covers_byte(address, len,
1225
                              pci_dev->msix_cap + PCI_MSIX_FLAGS + 1)) {
1226
            assigned_dev_update_msix(pci_dev);
1227
        }
1228
    }
1229

    
1230
    emulate_mask = 0;
1231
    memcpy(&emulate_mask, assigned_dev->emulate_config_write + address, len);
1232
    emulate_mask = le32_to_cpu(emulate_mask);
1233

    
1234
    full_emulation_mask = 0xffffffff >> (32 - len * 8);
1235

    
1236
    if (emulate_mask != full_emulation_mask) {
1237
        if (emulate_mask) {
1238
            val &= ~emulate_mask;
1239
            val |= assigned_dev_pci_read(pci_dev, address, len) & emulate_mask;
1240
        }
1241
        assigned_dev_pci_write(pci_dev, address, val, len);
1242
    }
1243
}
1244

    
1245
static void assigned_dev_setup_cap_read(AssignedDevice *dev, uint32_t offset,
1246
                                        uint32_t len)
1247
{
1248
    assigned_dev_direct_config_read(dev, offset, len);
1249
    assigned_dev_emulate_config_read(dev, offset + PCI_CAP_LIST_NEXT, 1);
1250
}
1251

    
1252
static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
1253
{
1254
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1255
    PCIRegion *pci_region = dev->real_device.regions;
1256
    int ret, pos;
1257

    
1258
    /* Clear initial capabilities pointer and status copied from hw */
1259
    pci_set_byte(pci_dev->config + PCI_CAPABILITY_LIST, 0);
1260
    pci_set_word(pci_dev->config + PCI_STATUS,
1261
                 pci_get_word(pci_dev->config + PCI_STATUS) &
1262
                 ~PCI_STATUS_CAP_LIST);
1263

    
1264
    /* Expose MSI capability
1265
     * MSI capability is the 1st capability in capability config */
1266
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSI, 0);
1267
    if (pos != 0 && kvm_check_extension(kvm_state, KVM_CAP_ASSIGN_DEV_IRQ)) {
1268
        if (!check_irqchip_in_kernel()) {
1269
            return -ENOTSUP;
1270
        }
1271
        dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI;
1272
        /* Only 32-bit/no-mask currently supported */
1273
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSI, pos, 10);
1274
        if (ret < 0) {
1275
            return ret;
1276
        }
1277
        pci_dev->msi_cap = pos;
1278

    
1279
        pci_set_word(pci_dev->config + pos + PCI_MSI_FLAGS,
1280
                     pci_get_word(pci_dev->config + pos + PCI_MSI_FLAGS) &
1281
                     PCI_MSI_FLAGS_QMASK);
1282
        pci_set_long(pci_dev->config + pos + PCI_MSI_ADDRESS_LO, 0);
1283
        pci_set_word(pci_dev->config + pos + PCI_MSI_DATA_32, 0);
1284

    
1285
        /* Set writable fields */
1286
        pci_set_word(pci_dev->wmask + pos + PCI_MSI_FLAGS,
1287
                     PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
1288
        pci_set_long(pci_dev->wmask + pos + PCI_MSI_ADDRESS_LO, 0xfffffffc);
1289
        pci_set_word(pci_dev->wmask + pos + PCI_MSI_DATA_32, 0xffff);
1290
    }
1291
    /* Expose MSI-X capability */
1292
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSIX, 0);
1293
    if (pos != 0 && kvm_device_msix_supported(kvm_state)) {
1294
        int bar_nr;
1295
        uint32_t msix_table_entry;
1296

    
1297
        if (!check_irqchip_in_kernel()) {
1298
            return -ENOTSUP;
1299
        }
1300
        dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX;
1301
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSIX, pos, 12);
1302
        if (ret < 0) {
1303
            return ret;
1304
        }
1305
        pci_dev->msix_cap = pos;
1306

    
1307
        pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS,
1308
                     pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) &
1309
                     PCI_MSIX_FLAGS_QSIZE);
1310

    
1311
        /* Only enable and function mask bits are writable */
1312
        pci_set_word(pci_dev->wmask + pos + PCI_MSIX_FLAGS,
1313
                     PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
1314

    
1315
        msix_table_entry = pci_get_long(pci_dev->config + pos + PCI_MSIX_TABLE);
1316
        bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK;
1317
        msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK;
1318
        dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
1319
        dev->msix_max = pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS);
1320
        dev->msix_max &= PCI_MSIX_FLAGS_QSIZE;
1321
        dev->msix_max += 1;
1322
    }
1323

    
1324
    /* Minimal PM support, nothing writable, device appears to NAK changes */
1325
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PM, 0);
1326
    if (pos) {
1327
        uint16_t pmc;
1328

    
1329
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, pos, PCI_PM_SIZEOF);
1330
        if (ret < 0) {
1331
            return ret;
1332
        }
1333

    
1334
        assigned_dev_setup_cap_read(dev, pos, PCI_PM_SIZEOF);
1335

    
1336
        pmc = pci_get_word(pci_dev->config + pos + PCI_CAP_FLAGS);
1337
        pmc &= (PCI_PM_CAP_VER_MASK | PCI_PM_CAP_DSI);
1338
        pci_set_word(pci_dev->config + pos + PCI_CAP_FLAGS, pmc);
1339

    
1340
        /* assign_device will bring the device up to D0, so we don't need
1341
         * to worry about doing that ourselves here. */
1342
        pci_set_word(pci_dev->config + pos + PCI_PM_CTRL,
1343
                     PCI_PM_CTRL_NO_SOFT_RESET);
1344

    
1345
        pci_set_byte(pci_dev->config + pos + PCI_PM_PPB_EXTENSIONS, 0);
1346
        pci_set_byte(pci_dev->config + pos + PCI_PM_DATA_REGISTER, 0);
1347
    }
1348

    
1349
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_EXP, 0);
1350
    if (pos) {
1351
        uint8_t version, size = 0;
1352
        uint16_t type, devctl, lnksta;
1353
        uint32_t devcap, lnkcap;
1354

    
1355
        version = pci_get_byte(pci_dev->config + pos + PCI_EXP_FLAGS);
1356
        version &= PCI_EXP_FLAGS_VERS;
1357
        if (version == 1) {
1358
            size = 0x14;
1359
        } else if (version == 2) {
1360
            /*
1361
             * Check for non-std size, accept reduced size to 0x34,
1362
             * which is what bcm5761 implemented, violating the
1363
             * PCIe v3.0 spec that regs should exist and be read as 0,
1364
             * not optionally provided and shorten the struct size.
1365
             */
1366
            size = MIN(0x3c, PCI_CONFIG_SPACE_SIZE - pos);
1367
            if (size < 0x34) {
1368
                error_report("%s: Invalid size PCIe cap-id 0x%x",
1369
                             __func__, PCI_CAP_ID_EXP);
1370
                return -EINVAL;
1371
            } else if (size != 0x3c) {
1372
                error_report("WARNING, %s: PCIe cap-id 0x%x has "
1373
                             "non-standard size 0x%x; std size should be 0x3c",
1374
                             __func__, PCI_CAP_ID_EXP, size);
1375
            }
1376
        } else if (version == 0) {
1377
            uint16_t vid, did;
1378
            vid = pci_get_word(pci_dev->config + PCI_VENDOR_ID);
1379
            did = pci_get_word(pci_dev->config + PCI_DEVICE_ID);
1380
            if (vid == PCI_VENDOR_ID_INTEL && did == 0x10ed) {
1381
                /*
1382
                 * quirk for Intel 82599 VF with invalid PCIe capability
1383
                 * version, should really be version 2 (same as PF)
1384
                 */
1385
                size = 0x3c;
1386
            }
1387
        }
1388

    
1389
        if (size == 0) {
1390
            error_report("%s: Unsupported PCI express capability version %d",
1391
                         __func__, version);
1392
            return -EINVAL;
1393
        }
1394

    
1395
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_EXP, pos, size);
1396
        if (ret < 0) {
1397
            return ret;
1398
        }
1399

    
1400
        assigned_dev_setup_cap_read(dev, pos, size);
1401

    
1402
        type = pci_get_word(pci_dev->config + pos + PCI_EXP_FLAGS);
1403
        type = (type & PCI_EXP_FLAGS_TYPE) >> 4;
1404
        if (type != PCI_EXP_TYPE_ENDPOINT &&
1405
            type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) {
1406
            error_report("Device assignment only supports endpoint assignment,"
1407
                         " device type %d", type);
1408
            return -EINVAL;
1409
        }
1410

    
1411
        /* capabilities, pass existing read-only copy
1412
         * PCI_EXP_FLAGS_IRQ: updated by hardware, should be direct read */
1413

    
1414
        /* device capabilities: hide FLR */
1415
        devcap = pci_get_long(pci_dev->config + pos + PCI_EXP_DEVCAP);
1416
        devcap &= ~PCI_EXP_DEVCAP_FLR;
1417
        pci_set_long(pci_dev->config + pos + PCI_EXP_DEVCAP, devcap);
1418

    
1419
        /* device control: clear all error reporting enable bits, leaving
1420
         *                 only a few host values.  Note, these are
1421
         *                 all writable, but not passed to hw.
1422
         */
1423
        devctl = pci_get_word(pci_dev->config + pos + PCI_EXP_DEVCTL);
1424
        devctl = (devctl & (PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_PAYLOAD)) |
1425
                  PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
1426
        pci_set_word(pci_dev->config + pos + PCI_EXP_DEVCTL, devctl);
1427
        devctl = PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_AUX_PME;
1428
        pci_set_word(pci_dev->wmask + pos + PCI_EXP_DEVCTL, ~devctl);
1429

    
1430
        /* Clear device status */
1431
        pci_set_word(pci_dev->config + pos + PCI_EXP_DEVSTA, 0);
1432

    
1433
        /* Link capabilities, expose links and latencues, clear reporting */
1434
        lnkcap = pci_get_long(pci_dev->config + pos + PCI_EXP_LNKCAP);
1435
        lnkcap &= (PCI_EXP_LNKCAP_SLS | PCI_EXP_LNKCAP_MLW |
1436
                   PCI_EXP_LNKCAP_ASPMS | PCI_EXP_LNKCAP_L0SEL |
1437
                   PCI_EXP_LNKCAP_L1EL);
1438
        pci_set_long(pci_dev->config + pos + PCI_EXP_LNKCAP, lnkcap);
1439

    
1440
        /* Link control, pass existing read-only copy.  Should be writable? */
1441

    
1442
        /* Link status, only expose current speed and width */
1443
        lnksta = pci_get_word(pci_dev->config + pos + PCI_EXP_LNKSTA);
1444
        lnksta &= (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
1445
        pci_set_word(pci_dev->config + pos + PCI_EXP_LNKSTA, lnksta);
1446

    
1447
        if (version >= 2) {
1448
            /* Slot capabilities, control, status - not needed for endpoints */
1449
            pci_set_long(pci_dev->config + pos + PCI_EXP_SLTCAP, 0);
1450
            pci_set_word(pci_dev->config + pos + PCI_EXP_SLTCTL, 0);
1451
            pci_set_word(pci_dev->config + pos + PCI_EXP_SLTSTA, 0);
1452

    
1453
            /* Root control, capabilities, status - not needed for endpoints */
1454
            pci_set_word(pci_dev->config + pos + PCI_EXP_RTCTL, 0);
1455
            pci_set_word(pci_dev->config + pos + PCI_EXP_RTCAP, 0);
1456
            pci_set_long(pci_dev->config + pos + PCI_EXP_RTSTA, 0);
1457

    
1458
            /* Device capabilities/control 2, pass existing read-only copy */
1459
            /* Link control 2, pass existing read-only copy */
1460
        }
1461
    }
1462

    
1463
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PCIX, 0);
1464
    if (pos) {
1465
        uint16_t cmd;
1466
        uint32_t status;
1467

    
1468
        /* Only expose the minimum, 8 byte capability */
1469
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_PCIX, pos, 8);
1470
        if (ret < 0) {
1471
            return ret;
1472
        }
1473

    
1474
        assigned_dev_setup_cap_read(dev, pos, 8);
1475

    
1476
        /* Command register, clear upper bits, including extended modes */
1477
        cmd = pci_get_word(pci_dev->config + pos + PCI_X_CMD);
1478
        cmd &= (PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO | PCI_X_CMD_MAX_READ |
1479
                PCI_X_CMD_MAX_SPLIT);
1480
        pci_set_word(pci_dev->config + pos + PCI_X_CMD, cmd);
1481

    
1482
        /* Status register, update with emulated PCI bus location, clear
1483
         * error bits, leave the rest. */
1484
        status = pci_get_long(pci_dev->config + pos + PCI_X_STATUS);
1485
        status &= ~(PCI_X_STATUS_BUS | PCI_X_STATUS_DEVFN);
1486
        status |= (pci_bus_num(pci_dev->bus) << 8) | pci_dev->devfn;
1487
        status &= ~(PCI_X_STATUS_SPL_DISC | PCI_X_STATUS_UNX_SPL |
1488
                    PCI_X_STATUS_SPL_ERR);
1489
        pci_set_long(pci_dev->config + pos + PCI_X_STATUS, status);
1490
    }
1491

    
1492
    pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VPD, 0);
1493
    if (pos) {
1494
        /* Direct R/W passthrough */
1495
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_VPD, pos, 8);
1496
        if (ret < 0) {
1497
            return ret;
1498
        }
1499

    
1500
        assigned_dev_setup_cap_read(dev, pos, 8);
1501

    
1502
        /* direct write for cap content */
1503
        assigned_dev_direct_config_write(dev, pos + 2, 6);
1504
    }
1505

    
1506
    /* Devices can have multiple vendor capabilities, get them all */
1507
    for (pos = 0; (pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VNDR, pos));
1508
        pos += PCI_CAP_LIST_NEXT) {
1509
        uint8_t len = pci_get_byte(pci_dev->config + pos + PCI_CAP_FLAGS);
1510
        /* Direct R/W passthrough */
1511
        ret = pci_add_capability(pci_dev, PCI_CAP_ID_VNDR, pos, len);
1512
        if (ret < 0) {
1513
            return ret;
1514
        }
1515

    
1516
        assigned_dev_setup_cap_read(dev, pos, len);
1517

    
1518
        /* direct write for cap content */
1519
        assigned_dev_direct_config_write(dev, pos + 2, len - 2);
1520
    }
1521

    
1522
    /* If real and virtual capability list status bits differ, virtualize the
1523
     * access. */
1524
    if ((pci_get_word(pci_dev->config + PCI_STATUS) & PCI_STATUS_CAP_LIST) !=
1525
        (assigned_dev_pci_read_byte(pci_dev, PCI_STATUS) &
1526
         PCI_STATUS_CAP_LIST)) {
1527
        dev->emulate_config_read[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1528
    }
1529

    
1530
    return 0;
1531
}
1532

    
1533
static uint64_t
1534
assigned_dev_msix_mmio_read(void *opaque, hwaddr addr,
1535
                            unsigned size)
1536
{
1537
    AssignedDevice *adev = opaque;
1538
    uint64_t val;
1539

    
1540
    memcpy(&val, (void *)((uint8_t *)adev->msix_table + addr), size);
1541

    
1542
    return val;
1543
}
1544

    
1545
static void assigned_dev_msix_mmio_write(void *opaque, hwaddr addr,
1546
                                         uint64_t val, unsigned size)
1547
{
1548
    AssignedDevice *adev = opaque;
1549
    PCIDevice *pdev = &adev->dev;
1550
    uint16_t ctrl;
1551
    MSIXTableEntry orig;
1552
    int i = addr >> 4;
1553

    
1554
    if (i >= adev->msix_max) {
1555
        return; /* Drop write */
1556
    }
1557

    
1558
    ctrl = pci_get_word(pdev->config + pdev->msix_cap + PCI_MSIX_FLAGS);
1559

    
1560
    DEBUG("write to MSI-X table offset 0x%lx, val 0x%lx\n", addr, val);
1561

    
1562
    if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1563
        orig = adev->msix_table[i];
1564
    }
1565

    
1566
    memcpy((uint8_t *)adev->msix_table + addr, &val, size);
1567

    
1568
    if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1569
        MSIXTableEntry *entry = &adev->msix_table[i];
1570

    
1571
        if (!assigned_dev_msix_masked(&orig) &&
1572
            assigned_dev_msix_masked(entry)) {
1573
            /*
1574
             * Vector masked, disable it
1575
             *
1576
             * XXX It's not clear if we can or should actually attempt
1577
             * to mask or disable the interrupt.  KVM doesn't have
1578
             * support for pending bits and kvm_assign_set_msix_entry
1579
             * doesn't modify the device hardware mask.  Interrupts
1580
             * while masked are simply not injected to the guest, so
1581
             * are lost.  Can we get away with always injecting an
1582
             * interrupt on unmask?
1583
             */
1584
        } else if (assigned_dev_msix_masked(&orig) &&
1585
                   !assigned_dev_msix_masked(entry)) {
1586
            /* Vector unmasked */
1587
            if (i >= adev->msi_virq_nr || adev->msi_virq[i] < 0) {
1588
                /* Previously unassigned vector, start from scratch */
1589
                assigned_dev_update_msix(pdev);
1590
                return;
1591
            } else {
1592
                /* Update an existing, previously masked vector */
1593
                MSIMessage msg;
1594
                int ret;
1595

    
1596
                msg.address = entry->addr_lo |
1597
                    ((uint64_t)entry->addr_hi << 32);
1598
                msg.data = entry->data;
1599

    
1600
                ret = kvm_irqchip_update_msi_route(kvm_state,
1601
                                                   adev->msi_virq[i], msg);
1602
                if (ret) {
1603
                    error_report("Error updating irq routing entry (%d)", ret);
1604
                }
1605
            }
1606
        }
1607
    }
1608
}
1609

    
1610
static const MemoryRegionOps assigned_dev_msix_mmio_ops = {
1611
    .read = assigned_dev_msix_mmio_read,
1612
    .write = assigned_dev_msix_mmio_write,
1613
    .endianness = DEVICE_NATIVE_ENDIAN,
1614
    .valid = {
1615
        .min_access_size = 4,
1616
        .max_access_size = 8,
1617
    },
1618
    .impl = {
1619
        .min_access_size = 4,
1620
        .max_access_size = 8,
1621
    },
1622
};
1623

    
1624
static void assigned_dev_msix_reset(AssignedDevice *dev)
1625
{
1626
    MSIXTableEntry *entry;
1627
    int i;
1628

    
1629
    if (!dev->msix_table) {
1630
        return;
1631
    }
1632

    
1633
    memset(dev->msix_table, 0, MSIX_PAGE_SIZE);
1634

    
1635
    for (i = 0, entry = dev->msix_table; i < dev->msix_max; i++, entry++) {
1636
        entry->ctrl = cpu_to_le32(0x1); /* Masked */
1637
    }
1638
}
1639

    
1640
static int assigned_dev_register_msix_mmio(AssignedDevice *dev)
1641
{
1642
    dev->msix_table = mmap(NULL, MSIX_PAGE_SIZE, PROT_READ|PROT_WRITE,
1643
                           MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
1644
    if (dev->msix_table == MAP_FAILED) {
1645
        error_report("fail allocate msix_table! %s", strerror(errno));
1646
        return -EFAULT;
1647
    }
1648

    
1649
    assigned_dev_msix_reset(dev);
1650

    
1651
    memory_region_init_io(&dev->mmio, OBJECT(dev), &assigned_dev_msix_mmio_ops,
1652
                          dev, "assigned-dev-msix", MSIX_PAGE_SIZE);
1653
    return 0;
1654
}
1655

    
1656
static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
1657
{
1658
    if (!dev->msix_table) {
1659
        return;
1660
    }
1661

    
1662
    memory_region_destroy(&dev->mmio);
1663

    
1664
    if (munmap(dev->msix_table, MSIX_PAGE_SIZE) == -1) {
1665
        error_report("error unmapping msix_table! %s", strerror(errno));
1666
    }
1667
    dev->msix_table = NULL;
1668
}
1669

    
1670
static const VMStateDescription vmstate_assigned_device = {
1671
    .name = "pci-assign",
1672
    .unmigratable = 1,
1673
};
1674

    
1675
static void reset_assigned_device(DeviceState *dev)
1676
{
1677
    PCIDevice *pci_dev = DO_UPCAST(PCIDevice, qdev, dev);
1678
    AssignedDevice *adev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1679
    char reset_file[64];
1680
    const char reset[] = "1";
1681
    int fd, ret;
1682

    
1683
    /*
1684
     * If a guest is reset without being shutdown, MSI/MSI-X can still
1685
     * be running.  We want to return the device to a known state on
1686
     * reset, so disable those here.  We especially do not want MSI-X
1687
     * enabled since it lives in MMIO space, which is about to get
1688
     * disabled.
1689
     */
1690
    if (adev->assigned_irq_type == ASSIGNED_IRQ_MSIX) {
1691
        uint16_t ctrl = pci_get_word(pci_dev->config +
1692
                                     pci_dev->msix_cap + PCI_MSIX_FLAGS);
1693

    
1694
        pci_set_word(pci_dev->config + pci_dev->msix_cap + PCI_MSIX_FLAGS,
1695
                     ctrl & ~PCI_MSIX_FLAGS_ENABLE);
1696
        assigned_dev_update_msix(pci_dev);
1697
    } else if (adev->assigned_irq_type == ASSIGNED_IRQ_MSI) {
1698
        uint8_t ctrl = pci_get_byte(pci_dev->config +
1699
                                    pci_dev->msi_cap + PCI_MSI_FLAGS);
1700

    
1701
        pci_set_byte(pci_dev->config + pci_dev->msi_cap + PCI_MSI_FLAGS,
1702
                     ctrl & ~PCI_MSI_FLAGS_ENABLE);
1703
        assigned_dev_update_msi(pci_dev);
1704
    }
1705

    
1706
    snprintf(reset_file, sizeof(reset_file),
1707
             "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/reset",
1708
             adev->host.domain, adev->host.bus, adev->host.slot,
1709
             adev->host.function);
1710

    
1711
    /*
1712
     * Issue a device reset via pci-sysfs.  Note that we use write(2) here
1713
     * and ignore the return value because some kernels have a bug that
1714
     * returns 0 rather than bytes written on success, sending us into an
1715
     * infinite retry loop using other write mechanisms.
1716
     */
1717
    fd = open(reset_file, O_WRONLY);
1718
    if (fd != -1) {
1719
        ret = write(fd, reset, strlen(reset));
1720
        (void)ret;
1721
        close(fd);
1722
    }
1723

    
1724
    /*
1725
     * When a 0 is written to the bus master register, the device is logically
1726
     * disconnected from the PCI bus. This avoids further DMA transfers.
1727
     */
1728
    assigned_dev_pci_write_config(pci_dev, PCI_COMMAND, 0, 1);
1729
}
1730

    
1731
static int assigned_initfn(struct PCIDevice *pci_dev)
1732
{
1733
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1734
    uint8_t e_intx;
1735
    int r;
1736

    
1737
    if (!kvm_enabled()) {
1738
        error_report("pci-assign: error: requires KVM support");
1739
        return -1;
1740
    }
1741

    
1742
    if (!dev->host.domain && !dev->host.bus && !dev->host.slot &&
1743
        !dev->host.function) {
1744
        error_report("pci-assign: error: no host device specified");
1745
        return -1;
1746
    }
1747

    
1748
    /*
1749
     * Set up basic config space access control. Will be further refined during
1750
     * device initialization.
1751
     */
1752
    assigned_dev_emulate_config_read(dev, 0, PCI_CONFIG_SPACE_SIZE);
1753
    assigned_dev_direct_config_read(dev, PCI_STATUS, 2);
1754
    assigned_dev_direct_config_read(dev, PCI_REVISION_ID, 1);
1755
    assigned_dev_direct_config_read(dev, PCI_CLASS_PROG, 3);
1756
    assigned_dev_direct_config_read(dev, PCI_CACHE_LINE_SIZE, 1);
1757
    assigned_dev_direct_config_read(dev, PCI_LATENCY_TIMER, 1);
1758
    assigned_dev_direct_config_read(dev, PCI_BIST, 1);
1759
    assigned_dev_direct_config_read(dev, PCI_CARDBUS_CIS, 4);
1760
    assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_VENDOR_ID, 2);
1761
    assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_ID, 2);
1762
    assigned_dev_direct_config_read(dev, PCI_CAPABILITY_LIST + 1, 7);
1763
    assigned_dev_direct_config_read(dev, PCI_MIN_GNT, 1);
1764
    assigned_dev_direct_config_read(dev, PCI_MAX_LAT, 1);
1765
    memcpy(dev->emulate_config_write, dev->emulate_config_read,
1766
           sizeof(dev->emulate_config_read));
1767

    
1768
    if (get_real_device(dev)) {
1769
        error_report("pci-assign: Error: Couldn't get real device (%s)!",
1770
                     dev->dev.qdev.id);
1771
        goto out;
1772
    }
1773

    
1774
    if (assigned_device_pci_cap_init(pci_dev) < 0) {
1775
        goto out;
1776
    }
1777

    
1778
    /* intercept MSI-X entry page in the MMIO */
1779
    if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1780
        if (assigned_dev_register_msix_mmio(dev)) {
1781
            goto out;
1782
        }
1783
    }
1784

    
1785
    /* handle real device's MMIO/PIO BARs */
1786
    if (assigned_dev_register_regions(dev->real_device.regions,
1787
                                      dev->real_device.region_number,
1788
                                      dev)) {
1789
        goto out;
1790
    }
1791

    
1792
    /* handle interrupt routing */
1793
    e_intx = dev->dev.config[PCI_INTERRUPT_PIN] - 1;
1794
    dev->intpin = e_intx;
1795
    dev->intx_route.mode = PCI_INTX_DISABLED;
1796
    dev->intx_route.irq = -1;
1797

    
1798
    /* assign device to guest */
1799
    r = assign_device(dev);
1800
    if (r < 0) {
1801
        goto out;
1802
    }
1803

    
1804
    /* assign legacy INTx to the device */
1805
    r = assign_intx(dev);
1806
    if (r < 0) {
1807
        goto assigned_out;
1808
    }
1809

    
1810
    assigned_dev_load_option_rom(dev);
1811

    
1812
    add_boot_device_path(dev->bootindex, &pci_dev->qdev, NULL);
1813

    
1814
    return 0;
1815

    
1816
assigned_out:
1817
    deassign_device(dev);
1818
out:
1819
    free_assigned_device(dev);
1820
    return -1;
1821
}
1822

    
1823
static void assigned_exitfn(struct PCIDevice *pci_dev)
1824
{
1825
    AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
1826

    
1827
    deassign_device(dev);
1828
    free_assigned_device(dev);
1829
}
1830

    
1831
static Property assigned_dev_properties[] = {
1832
    DEFINE_PROP_PCI_HOST_DEVADDR("host", AssignedDevice, host),
1833
    DEFINE_PROP_BIT("prefer_msi", AssignedDevice, features,
1834
                    ASSIGNED_DEVICE_PREFER_MSI_BIT, false),
1835
    DEFINE_PROP_BIT("share_intx", AssignedDevice, features,
1836
                    ASSIGNED_DEVICE_SHARE_INTX_BIT, true),
1837
    DEFINE_PROP_INT32("bootindex", AssignedDevice, bootindex, -1),
1838
    DEFINE_PROP_STRING("configfd", AssignedDevice, configfd_name),
1839
    DEFINE_PROP_END_OF_LIST(),
1840
};
1841

    
1842
static void assign_class_init(ObjectClass *klass, void *data)
1843
{
1844
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1845
    DeviceClass *dc = DEVICE_CLASS(klass);
1846

    
1847
    k->init         = assigned_initfn;
1848
    k->exit         = assigned_exitfn;
1849
    k->config_read  = assigned_dev_pci_read_config;
1850
    k->config_write = assigned_dev_pci_write_config;
1851
    dc->props       = assigned_dev_properties;
1852
    dc->vmsd        = &vmstate_assigned_device;
1853
    dc->reset       = reset_assigned_device;
1854
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1855
    dc->desc        = "KVM-based PCI passthrough";
1856
}
1857

    
1858
static const TypeInfo assign_info = {
1859
    .name               = "kvm-pci-assign",
1860
    .parent             = TYPE_PCI_DEVICE,
1861
    .instance_size      = sizeof(AssignedDevice),
1862
    .class_init         = assign_class_init,
1863
};
1864

    
1865
static void assign_register_types(void)
1866
{
1867
    type_register_static(&assign_info);
1868
}
1869

    
1870
type_init(assign_register_types)
1871

    
1872
/*
1873
 * Scan the assigned devices for the devices that have an option ROM, and then
1874
 * load the corresponding ROM data to RAM. If an error occurs while loading an
1875
 * option ROM, we just ignore that option ROM and continue with the next one.
1876
 */
1877
static void assigned_dev_load_option_rom(AssignedDevice *dev)
1878
{
1879
    char name[32], rom_file[64];
1880
    FILE *fp;
1881
    uint8_t val;
1882
    struct stat st;
1883
    void *ptr;
1884

    
1885
    /* If loading ROM from file, pci handles it */
1886
    if (dev->dev.romfile || !dev->dev.rom_bar) {
1887
        return;
1888
    }
1889

    
1890
    snprintf(rom_file, sizeof(rom_file),
1891
             "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom",
1892
             dev->host.domain, dev->host.bus, dev->host.slot,
1893
             dev->host.function);
1894

    
1895
    if (stat(rom_file, &st)) {
1896
        return;
1897
    }
1898

    
1899
    if (access(rom_file, F_OK)) {
1900
        error_report("pci-assign: Insufficient privileges for %s", rom_file);
1901
        return;
1902
    }
1903

    
1904
    /* Write "1" to the ROM file to enable it */
1905
    fp = fopen(rom_file, "r+");
1906
    if (fp == NULL) {
1907
        return;
1908
    }
1909
    val = 1;
1910
    if (fwrite(&val, 1, 1, fp) != 1) {
1911
        goto close_rom;
1912
    }
1913
    fseek(fp, 0, SEEK_SET);
1914

    
1915
    snprintf(name, sizeof(name), "%s.rom",
1916
            object_get_typename(OBJECT(dev)));
1917
    memory_region_init_ram(&dev->dev.rom, OBJECT(dev), name, st.st_size);
1918
    vmstate_register_ram(&dev->dev.rom, &dev->dev.qdev);
1919
    ptr = memory_region_get_ram_ptr(&dev->dev.rom);
1920
    memset(ptr, 0xff, st.st_size);
1921

    
1922
    if (!fread(ptr, 1, st.st_size, fp)) {
1923
        error_report("pci-assign: Cannot read from host %s", rom_file);
1924
        error_printf("Device option ROM contents are probably invalid "
1925
                     "(check dmesg).\nSkip option ROM probe with rombar=0, "
1926
                     "or load from file with romfile=\n");
1927
        memory_region_destroy(&dev->dev.rom);
1928
        goto close_rom;
1929
    }
1930

    
1931
    pci_register_bar(&dev->dev, PCI_ROM_SLOT, 0, &dev->dev.rom);
1932
    dev->dev.has_rom = true;
1933
close_rom:
1934
    /* Write "0" to disable ROM */
1935
    fseek(fp, 0, SEEK_SET);
1936
    val = 0;
1937
    if (!fwrite(&val, 1, 1, fp)) {
1938
        DEBUG("%s\n", "Failed to disable pci-sysfs rom file");
1939
    }
1940
    fclose(fp);
1941
}