Statistics
| Branch: | Revision:

root / hw / virtio / virtio-pci.c @ 881d588a

History | View | Annotate | Download (49 kB)

1
/*
2
 * Virtio PCI Bindings
3
 *
4
 * Copyright IBM, Corp. 2007
5
 * Copyright (c) 2009 CodeSourcery
6
 *
7
 * Authors:
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *  Paul Brook        <paul@codesourcery.com>
10
 *
11
 * This work is licensed under the terms of the GNU GPL, version 2.  See
12
 * the COPYING file in the top-level directory.
13
 *
14
 * Contributions after 2012-01-13 are licensed under the terms of the
15
 * GNU GPL, version 2 or (at your option) any later version.
16
 */
17

    
18
#include <inttypes.h>
19

    
20
#include "hw/virtio/virtio.h"
21
#include "hw/virtio/virtio-blk.h"
22
#include "hw/virtio/virtio-net.h"
23
#include "hw/virtio/virtio-serial.h"
24
#include "hw/virtio/virtio-scsi.h"
25
#include "hw/virtio/virtio-balloon.h"
26
#include "hw/pci/pci.h"
27
#include "qemu/error-report.h"
28
#include "hw/pci/msi.h"
29
#include "hw/pci/msix.h"
30
#include "hw/loader.h"
31
#include "sysemu/kvm.h"
32
#include "sysemu/blockdev.h"
33
#include "virtio-pci.h"
34
#include "qemu/range.h"
35
#include "hw/virtio/virtio-bus.h"
36
#include "qapi/visitor.h"
37

    
38
/* from Linux's linux/virtio_pci.h */
39

    
40
/* A 32-bit r/o bitmask of the features supported by the host */
41
#define VIRTIO_PCI_HOST_FEATURES        0
42

    
43
/* A 32-bit r/w bitmask of features activated by the guest */
44
#define VIRTIO_PCI_GUEST_FEATURES       4
45

    
46
/* A 32-bit r/w PFN for the currently selected queue */
47
#define VIRTIO_PCI_QUEUE_PFN            8
48

    
49
/* A 16-bit r/o queue size for the currently selected queue */
50
#define VIRTIO_PCI_QUEUE_NUM            12
51

    
52
/* A 16-bit r/w queue selector */
53
#define VIRTIO_PCI_QUEUE_SEL            14
54

    
55
/* A 16-bit r/w queue notifier */
56
#define VIRTIO_PCI_QUEUE_NOTIFY         16
57

    
58
/* An 8-bit device status register.  */
59
#define VIRTIO_PCI_STATUS               18
60

    
61
/* An 8-bit r/o interrupt status register.  Reading the value will return the
62
 * current contents of the ISR and will also clear it.  This is effectively
63
 * a read-and-acknowledge. */
64
#define VIRTIO_PCI_ISR                  19
65

    
66
/* MSI-X registers: only enabled if MSI-X is enabled. */
67
/* A 16-bit vector for configuration changes. */
68
#define VIRTIO_MSI_CONFIG_VECTOR        20
69
/* A 16-bit vector for selected queue notifications. */
70
#define VIRTIO_MSI_QUEUE_VECTOR         22
71

    
72
/* Config space size */
73
#define VIRTIO_PCI_CONFIG_NOMSI         20
74
#define VIRTIO_PCI_CONFIG_MSI           24
75
#define VIRTIO_PCI_REGION_SIZE(dev)     (msix_present(dev) ? \
76
                                         VIRTIO_PCI_CONFIG_MSI : \
77
                                         VIRTIO_PCI_CONFIG_NOMSI)
78

    
79
/* The remaining space is defined by each driver as the per-driver
80
 * configuration space */
81
#define VIRTIO_PCI_CONFIG(dev)          (msix_enabled(dev) ? \
82
                                         VIRTIO_PCI_CONFIG_MSI : \
83
                                         VIRTIO_PCI_CONFIG_NOMSI)
84

    
85
/* How many bits to shift physical queue address written to QUEUE_PFN.
86
 * 12 is historical, and due to x86 page size. */
87
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT    12
88

    
89
/* Flags track per-device state like workarounds for quirks in older guests. */
90
#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG  (1 << 0)
91

    
92
/* QEMU doesn't strictly need write barriers since everything runs in
93
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
94
 * KVM or if kqemu gets SMP support.
95
 */
96
#define wmb() do { } while (0)
97

    
98
/* HACK for virtio to determine if it's running a big endian guest */
99
bool virtio_is_big_endian(void);
100

    
101
/* virtio device */
102
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
103
static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
104
{
105
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
106
}
107

    
108
/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
109
 * be careful and test performance if you change this.
110
 */
111
static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
112
{
113
    return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
114
}
115

    
116
static void virtio_pci_notify(DeviceState *d, uint16_t vector)
117
{
118
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
119
    if (msix_enabled(&proxy->pci_dev))
120
        msix_notify(&proxy->pci_dev, vector);
121
    else
122
        qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
123
}
124

    
125
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
126
{
127
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
128
    pci_device_save(&proxy->pci_dev, f);
129
    msix_save(&proxy->pci_dev, f);
130
    if (msix_present(&proxy->pci_dev))
131
        qemu_put_be16(f, proxy->vdev->config_vector);
132
}
133

    
134
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
135
{
136
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
137
    if (msix_present(&proxy->pci_dev))
138
        qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
139
}
140

    
141
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
142
{
143
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
144
    int ret;
145
    ret = pci_device_load(&proxy->pci_dev, f);
146
    if (ret) {
147
        return ret;
148
    }
149
    msix_unuse_all_vectors(&proxy->pci_dev);
150
    msix_load(&proxy->pci_dev, f);
151
    if (msix_present(&proxy->pci_dev)) {
152
        qemu_get_be16s(f, &proxy->vdev->config_vector);
153
    } else {
154
        proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
155
    }
156
    if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
157
        return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
158
    }
159
    return 0;
160
}
161

    
162
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
163
{
164
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
165
    uint16_t vector;
166
    if (msix_present(&proxy->pci_dev)) {
167
        qemu_get_be16s(f, &vector);
168
    } else {
169
        vector = VIRTIO_NO_VECTOR;
170
    }
171
    virtio_queue_set_vector(proxy->vdev, n, vector);
172
    if (vector != VIRTIO_NO_VECTOR) {
173
        return msix_vector_use(&proxy->pci_dev, vector);
174
    }
175
    return 0;
176
}
177

    
178
static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
179
                                                 int n, bool assign, bool set_handler)
180
{
181
    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
182
    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
183
    int r = 0;
184

    
185
    if (assign) {
186
        r = event_notifier_init(notifier, 1);
187
        if (r < 0) {
188
            error_report("%s: unable to init event notifier: %d",
189
                         __func__, r);
190
            return r;
191
        }
192
        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
193
        memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
194
                                  true, n, notifier);
195
    } else {
196
        memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
197
                                  true, n, notifier);
198
        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
199
        event_notifier_cleanup(notifier);
200
    }
201
    return r;
202
}
203

    
204
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
205
{
206
    int n, r;
207

    
208
    if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
209
        proxy->ioeventfd_disabled ||
210
        proxy->ioeventfd_started) {
211
        return;
212
    }
213

    
214
    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
215
        if (!virtio_queue_get_num(proxy->vdev, n)) {
216
            continue;
217
        }
218

    
219
        r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
220
        if (r < 0) {
221
            goto assign_error;
222
        }
223
    }
224
    proxy->ioeventfd_started = true;
225
    return;
226

    
227
assign_error:
228
    while (--n >= 0) {
229
        if (!virtio_queue_get_num(proxy->vdev, n)) {
230
            continue;
231
        }
232

    
233
        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
234
        assert(r >= 0);
235
    }
236
    proxy->ioeventfd_started = false;
237
    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
238
}
239

    
240
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
241
{
242
    int r;
243
    int n;
244

    
245
    if (!proxy->ioeventfd_started) {
246
        return;
247
    }
248

    
249
    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
250
        if (!virtio_queue_get_num(proxy->vdev, n)) {
251
            continue;
252
        }
253

    
254
        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
255
        assert(r >= 0);
256
    }
257
    proxy->ioeventfd_started = false;
258
}
259

    
260
static void virtio_pci_reset(DeviceState *d)
261
{
262
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
263
    virtio_pci_stop_ioeventfd(proxy);
264
    virtio_reset(proxy->vdev);
265
    msix_unuse_all_vectors(&proxy->pci_dev);
266
    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
267
}
268

    
269
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
270
{
271
    VirtIOPCIProxy *proxy = opaque;
272
    VirtIODevice *vdev = proxy->vdev;
273
    hwaddr pa;
274

    
275
    switch (addr) {
276
    case VIRTIO_PCI_GUEST_FEATURES:
277
        /* Guest does not negotiate properly?  We have to assume nothing. */
278
        if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
279
            val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
280
        }
281
        virtio_set_features(vdev, val);
282
        break;
283
    case VIRTIO_PCI_QUEUE_PFN:
284
        pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
285
        if (pa == 0) {
286
            virtio_pci_stop_ioeventfd(proxy);
287
            virtio_reset(proxy->vdev);
288
            msix_unuse_all_vectors(&proxy->pci_dev);
289
        }
290
        else
291
            virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
292
        break;
293
    case VIRTIO_PCI_QUEUE_SEL:
294
        if (val < VIRTIO_PCI_QUEUE_MAX)
295
            vdev->queue_sel = val;
296
        break;
297
    case VIRTIO_PCI_QUEUE_NOTIFY:
298
        if (val < VIRTIO_PCI_QUEUE_MAX) {
299
            virtio_queue_notify(vdev, val);
300
        }
301
        break;
302
    case VIRTIO_PCI_STATUS:
303
        if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
304
            virtio_pci_stop_ioeventfd(proxy);
305
        }
306

    
307
        virtio_set_status(vdev, val & 0xFF);
308

    
309
        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
310
            virtio_pci_start_ioeventfd(proxy);
311
        }
312

    
313
        if (vdev->status == 0) {
314
            virtio_reset(proxy->vdev);
315
            msix_unuse_all_vectors(&proxy->pci_dev);
316
        }
317

    
318
        /* Linux before 2.6.34 sets the device as OK without enabling
319
           the PCI device bus master bit. In this case we need to disable
320
           some safety checks. */
321
        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
322
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
323
            proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
324
        }
325
        break;
326
    case VIRTIO_MSI_CONFIG_VECTOR:
327
        msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
328
        /* Make it possible for guest to discover an error took place. */
329
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
330
            val = VIRTIO_NO_VECTOR;
331
        vdev->config_vector = val;
332
        break;
333
    case VIRTIO_MSI_QUEUE_VECTOR:
334
        msix_vector_unuse(&proxy->pci_dev,
335
                          virtio_queue_vector(vdev, vdev->queue_sel));
336
        /* Make it possible for guest to discover an error took place. */
337
        if (msix_vector_use(&proxy->pci_dev, val) < 0)
338
            val = VIRTIO_NO_VECTOR;
339
        virtio_queue_set_vector(vdev, vdev->queue_sel, val);
340
        break;
341
    default:
342
        error_report("%s: unexpected address 0x%x value 0x%x",
343
                     __func__, addr, val);
344
        break;
345
    }
346
}
347

    
348
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
349
{
350
    VirtIODevice *vdev = proxy->vdev;
351
    uint32_t ret = 0xFFFFFFFF;
352

    
353
    switch (addr) {
354
    case VIRTIO_PCI_HOST_FEATURES:
355
        ret = proxy->host_features;
356
        break;
357
    case VIRTIO_PCI_GUEST_FEATURES:
358
        ret = vdev->guest_features;
359
        break;
360
    case VIRTIO_PCI_QUEUE_PFN:
361
        ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
362
              >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
363
        break;
364
    case VIRTIO_PCI_QUEUE_NUM:
365
        ret = virtio_queue_get_num(vdev, vdev->queue_sel);
366
        break;
367
    case VIRTIO_PCI_QUEUE_SEL:
368
        ret = vdev->queue_sel;
369
        break;
370
    case VIRTIO_PCI_STATUS:
371
        ret = vdev->status;
372
        break;
373
    case VIRTIO_PCI_ISR:
374
        /* reading from the ISR also clears it. */
375
        ret = vdev->isr;
376
        vdev->isr = 0;
377
        qemu_set_irq(proxy->pci_dev.irq[0], 0);
378
        break;
379
    case VIRTIO_MSI_CONFIG_VECTOR:
380
        ret = vdev->config_vector;
381
        break;
382
    case VIRTIO_MSI_QUEUE_VECTOR:
383
        ret = virtio_queue_vector(vdev, vdev->queue_sel);
384
        break;
385
    default:
386
        break;
387
    }
388

    
389
    return ret;
390
}
391

    
392
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
393
                                       unsigned size)
394
{
395
    VirtIOPCIProxy *proxy = opaque;
396
    uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
397
    uint64_t val = 0;
398
    if (addr < config) {
399
        return virtio_ioport_read(proxy, addr);
400
    }
401
    addr -= config;
402

    
403
    switch (size) {
404
    case 1:
405
        val = virtio_config_readb(proxy->vdev, addr);
406
        break;
407
    case 2:
408
        val = virtio_config_readw(proxy->vdev, addr);
409
        if (virtio_is_big_endian()) {
410
            val = bswap16(val);
411
        }
412
        break;
413
    case 4:
414
        val = virtio_config_readl(proxy->vdev, addr);
415
        if (virtio_is_big_endian()) {
416
            val = bswap32(val);
417
        }
418
        break;
419
    }
420
    return val;
421
}
422

    
423
static void virtio_pci_config_write(void *opaque, hwaddr addr,
424
                                    uint64_t val, unsigned size)
425
{
426
    VirtIOPCIProxy *proxy = opaque;
427
    uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
428
    if (addr < config) {
429
        virtio_ioport_write(proxy, addr, val);
430
        return;
431
    }
432
    addr -= config;
433
    /*
434
     * Virtio-PCI is odd. Ioports are LE but config space is target native
435
     * endian.
436
     */
437
    switch (size) {
438
    case 1:
439
        virtio_config_writeb(proxy->vdev, addr, val);
440
        break;
441
    case 2:
442
        if (virtio_is_big_endian()) {
443
            val = bswap16(val);
444
        }
445
        virtio_config_writew(proxy->vdev, addr, val);
446
        break;
447
    case 4:
448
        if (virtio_is_big_endian()) {
449
            val = bswap32(val);
450
        }
451
        virtio_config_writel(proxy->vdev, addr, val);
452
        break;
453
    }
454
}
455

    
456
static const MemoryRegionOps virtio_pci_config_ops = {
457
    .read = virtio_pci_config_read,
458
    .write = virtio_pci_config_write,
459
    .impl = {
460
        .min_access_size = 1,
461
        .max_access_size = 4,
462
    },
463
    .endianness = DEVICE_LITTLE_ENDIAN,
464
};
465

    
466
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
467
                                uint32_t val, int len)
468
{
469
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
470

    
471
    pci_default_write_config(pci_dev, address, val, len);
472

    
473
    if (range_covers_byte(address, len, PCI_COMMAND) &&
474
        !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
475
        !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
476
        virtio_pci_stop_ioeventfd(proxy);
477
        virtio_set_status(proxy->vdev,
478
                          proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
479
    }
480
}
481

    
482
static unsigned virtio_pci_get_features(DeviceState *d)
483
{
484
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
485
    return proxy->host_features;
486
}
487

    
488
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
489
                                        unsigned int queue_no,
490
                                        unsigned int vector,
491
                                        MSIMessage msg)
492
{
493
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
494
    int ret;
495

    
496
    if (irqfd->users == 0) {
497
        ret = kvm_irqchip_add_msi_route(kvm_state, msg);
498
        if (ret < 0) {
499
            return ret;
500
        }
501
        irqfd->virq = ret;
502
    }
503
    irqfd->users++;
504
    return 0;
505
}
506

    
507
static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
508
                                             unsigned int vector)
509
{
510
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
511
    if (--irqfd->users == 0) {
512
        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
513
    }
514
}
515

    
516
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
517
                                 unsigned int queue_no,
518
                                 unsigned int vector)
519
{
520
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
521
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
522
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
523
    int ret;
524
    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
525
    return ret;
526
}
527

    
528
static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
529
                                      unsigned int queue_no,
530
                                      unsigned int vector)
531
{
532
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
533
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
534
    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
535
    int ret;
536

    
537
    ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
538
    assert(ret == 0);
539
}
540

    
541
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
542
{
543
    PCIDevice *dev = &proxy->pci_dev;
544
    VirtIODevice *vdev = proxy->vdev;
545
    unsigned int vector;
546
    int ret, queue_no;
547
    MSIMessage msg;
548

    
549
    for (queue_no = 0; queue_no < nvqs; queue_no++) {
550
        if (!virtio_queue_get_num(vdev, queue_no)) {
551
            break;
552
        }
553
        vector = virtio_queue_vector(vdev, queue_no);
554
        if (vector >= msix_nr_vectors_allocated(dev)) {
555
            continue;
556
        }
557
        msg = msix_get_message(dev, vector);
558
        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
559
        if (ret < 0) {
560
            goto undo;
561
        }
562
        /* If guest supports masking, set up irqfd now.
563
         * Otherwise, delay until unmasked in the frontend.
564
         */
565
        if (proxy->vdev->guest_notifier_mask) {
566
            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
567
            if (ret < 0) {
568
                kvm_virtio_pci_vq_vector_release(proxy, vector);
569
                goto undo;
570
            }
571
        }
572
    }
573
    return 0;
574

    
575
undo:
576
    while (--queue_no >= 0) {
577
        vector = virtio_queue_vector(vdev, queue_no);
578
        if (vector >= msix_nr_vectors_allocated(dev)) {
579
            continue;
580
        }
581
        if (proxy->vdev->guest_notifier_mask) {
582
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
583
        }
584
        kvm_virtio_pci_vq_vector_release(proxy, vector);
585
    }
586
    return ret;
587
}
588

    
589
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
590
{
591
    PCIDevice *dev = &proxy->pci_dev;
592
    VirtIODevice *vdev = proxy->vdev;
593
    unsigned int vector;
594
    int queue_no;
595

    
596
    for (queue_no = 0; queue_no < nvqs; queue_no++) {
597
        if (!virtio_queue_get_num(vdev, queue_no)) {
598
            break;
599
        }
600
        vector = virtio_queue_vector(vdev, queue_no);
601
        if (vector >= msix_nr_vectors_allocated(dev)) {
602
            continue;
603
        }
604
        /* If guest supports masking, clean up irqfd now.
605
         * Otherwise, it was cleaned when masked in the frontend.
606
         */
607
        if (proxy->vdev->guest_notifier_mask) {
608
            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
609
        }
610
        kvm_virtio_pci_vq_vector_release(proxy, vector);
611
    }
612
}
613

    
614
static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
615
                                       unsigned int queue_no,
616
                                       unsigned int vector,
617
                                       MSIMessage msg)
618
{
619
    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
620
    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
621
    VirtIOIRQFD *irqfd;
622
    int ret = 0;
623

    
624
    if (proxy->vector_irqfd) {
625
        irqfd = &proxy->vector_irqfd[vector];
626
        if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
627
            ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
628
            if (ret < 0) {
629
                return ret;
630
            }
631
        }
632
    }
633

    
634
    /* If guest supports masking, irqfd is already setup, unmask it.
635
     * Otherwise, set it up now.
636
     */
637
    if (proxy->vdev->guest_notifier_mask) {
638
        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false);
639
        /* Test after unmasking to avoid losing events. */
640
        if (proxy->vdev->guest_notifier_pending &&
641
            proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) {
642
            event_notifier_set(n);
643
        }
644
    } else {
645
        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
646
    }
647
    return ret;
648
}
649

    
650
static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
651
                                             unsigned int queue_no,
652
                                             unsigned int vector)
653
{
654
    /* If guest supports masking, keep irqfd but mask it.
655
     * Otherwise, clean it up now.
656
     */ 
657
    if (proxy->vdev->guest_notifier_mask) {
658
        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, true);
659
    } else {
660
        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
661
    }
662
}
663

    
664
static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
665
                                    MSIMessage msg)
666
{
667
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
668
    VirtIODevice *vdev = proxy->vdev;
669
    int ret, queue_no;
670

    
671
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
672
        if (!virtio_queue_get_num(vdev, queue_no)) {
673
            break;
674
        }
675
        if (virtio_queue_vector(vdev, queue_no) != vector) {
676
            continue;
677
        }
678
        ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
679
        if (ret < 0) {
680
            goto undo;
681
        }
682
    }
683
    return 0;
684

    
685
undo:
686
    while (--queue_no >= 0) {
687
        if (virtio_queue_vector(vdev, queue_no) != vector) {
688
            continue;
689
        }
690
        virtio_pci_vq_vector_mask(proxy, queue_no, vector);
691
    }
692
    return ret;
693
}
694

    
695
static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
696
{
697
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
698
    VirtIODevice *vdev = proxy->vdev;
699
    int queue_no;
700

    
701
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
702
        if (!virtio_queue_get_num(vdev, queue_no)) {
703
            break;
704
        }
705
        if (virtio_queue_vector(vdev, queue_no) != vector) {
706
            continue;
707
        }
708
        virtio_pci_vq_vector_mask(proxy, queue_no, vector);
709
    }
710
}
711

    
712
static void virtio_pci_vector_poll(PCIDevice *dev,
713
                                   unsigned int vector_start,
714
                                   unsigned int vector_end)
715
{
716
    VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
717
    VirtIODevice *vdev = proxy->vdev;
718
    int queue_no;
719
    unsigned int vector;
720
    EventNotifier *notifier;
721
    VirtQueue *vq;
722

    
723
    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
724
        if (!virtio_queue_get_num(vdev, queue_no)) {
725
            break;
726
        }
727
        vector = virtio_queue_vector(vdev, queue_no);
728
        if (vector < vector_start || vector >= vector_end ||
729
            !msix_is_masked(dev, vector)) {
730
            continue;
731
        }
732
        vq = virtio_get_queue(vdev, queue_no);
733
        notifier = virtio_queue_get_guest_notifier(vq);
734
        if (vdev->guest_notifier_pending) {
735
            if (vdev->guest_notifier_pending(vdev, queue_no)) {
736
                msix_set_pending(dev, vector);
737
            }
738
        } else if (event_notifier_test_and_clear(notifier)) {
739
            msix_set_pending(dev, vector);
740
        }
741
    }
742
}
743

    
744
static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
745
                                         bool with_irqfd)
746
{
747
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
748
    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
749
    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
750

    
751
    if (assign) {
752
        int r = event_notifier_init(notifier, 0);
753
        if (r < 0) {
754
            return r;
755
        }
756
        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
757
    } else {
758
        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
759
        event_notifier_cleanup(notifier);
760
    }
761

    
762
    return 0;
763
}
764

    
765
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
766
{
767
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
768
    return msix_enabled(&proxy->pci_dev);
769
}
770

    
771
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
772
{
773
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
774
    VirtIODevice *vdev = proxy->vdev;
775
    int r, n;
776
    bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
777
        kvm_msi_via_irqfd_enabled();
778

    
779
    nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
780

    
781
    /* When deassigning, pass a consistent nvqs value
782
     * to avoid leaking notifiers.
783
     */
784
    assert(assign || nvqs == proxy->nvqs_with_notifiers);
785

    
786
    proxy->nvqs_with_notifiers = nvqs;
787

    
788
    /* Must unset vector notifier while guest notifier is still assigned */
789
    if ((proxy->vector_irqfd || vdev->guest_notifier_mask) && !assign) {
790
        msix_unset_vector_notifiers(&proxy->pci_dev);
791
        if (proxy->vector_irqfd) {
792
            kvm_virtio_pci_vector_release(proxy, nvqs);
793
            g_free(proxy->vector_irqfd);
794
            proxy->vector_irqfd = NULL;
795
        }
796
    }
797

    
798
    for (n = 0; n < nvqs; n++) {
799
        if (!virtio_queue_get_num(vdev, n)) {
800
            break;
801
        }
802

    
803
        r = virtio_pci_set_guest_notifier(d, n, assign,
804
                                          kvm_msi_via_irqfd_enabled());
805
        if (r < 0) {
806
            goto assign_error;
807
        }
808
    }
809

    
810
    /* Must set vector notifier after guest notifier has been assigned */
811
    if ((with_irqfd || vdev->guest_notifier_mask) && assign) {
812
        if (with_irqfd) {
813
            proxy->vector_irqfd =
814
                g_malloc0(sizeof(*proxy->vector_irqfd) *
815
                          msix_nr_vectors_allocated(&proxy->pci_dev));
816
            r = kvm_virtio_pci_vector_use(proxy, nvqs);
817
            if (r < 0) {
818
                goto assign_error;
819
            }
820
        }
821
        r = msix_set_vector_notifiers(&proxy->pci_dev,
822
                                      virtio_pci_vector_unmask,
823
                                      virtio_pci_vector_mask,
824
                                      virtio_pci_vector_poll);
825
        if (r < 0) {
826
            goto notifiers_error;
827
        }
828
    }
829

    
830
    return 0;
831

    
832
notifiers_error:
833
    if (with_irqfd) {
834
        assert(assign);
835
        kvm_virtio_pci_vector_release(proxy, nvqs);
836
    }
837

    
838
assign_error:
839
    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
840
    assert(assign);
841
    while (--n >= 0) {
842
        virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
843
    }
844
    return r;
845
}
846

    
847
static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
848
{
849
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
850

    
851
    /* Stop using ioeventfd for virtqueue kick if the device starts using host
852
     * notifiers.  This makes it easy to avoid stepping on each others' toes.
853
     */
854
    proxy->ioeventfd_disabled = assign;
855
    if (assign) {
856
        virtio_pci_stop_ioeventfd(proxy);
857
    }
858
    /* We don't need to start here: it's not needed because backend
859
     * currently only stops on status change away from ok,
860
     * reset, vmstop and such. If we do add code to start here,
861
     * need to check vmstate, device state etc. */
862
    return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
863
}
864

    
865
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
866
{
867
    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
868

    
869
    if (running) {
870
        /* Try to find out if the guest has bus master disabled, but is
871
           in ready state. Then we have a buggy guest OS. */
872
        if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
873
            !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
874
            proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
875
        }
876
        virtio_pci_start_ioeventfd(proxy);
877
    } else {
878
        virtio_pci_stop_ioeventfd(proxy);
879
    }
880
}
881

    
882
static const VirtIOBindings virtio_pci_bindings = {
883
    .notify = virtio_pci_notify,
884
    .save_config = virtio_pci_save_config,
885
    .load_config = virtio_pci_load_config,
886
    .save_queue = virtio_pci_save_queue,
887
    .load_queue = virtio_pci_load_queue,
888
    .get_features = virtio_pci_get_features,
889
    .query_guest_notifiers = virtio_pci_query_guest_notifiers,
890
    .set_host_notifier = virtio_pci_set_host_notifier,
891
    .set_guest_notifiers = virtio_pci_set_guest_notifiers,
892
    .vmstate_change = virtio_pci_vmstate_change,
893
};
894

    
895
void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
896
{
897
    uint8_t *config;
898
    uint32_t size;
899

    
900
    proxy->vdev = vdev;
901

    
902
    config = proxy->pci_dev.config;
903

    
904
    if (proxy->class_code) {
905
        pci_config_set_class(config, proxy->class_code);
906
    }
907
    pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
908
                 pci_get_word(config + PCI_VENDOR_ID));
909
    pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
910
    config[PCI_INTERRUPT_PIN] = 1;
911

    
912
    if (vdev->nvectors &&
913
        msix_init_exclusive_bar(&proxy->pci_dev, vdev->nvectors, 1)) {
914
        vdev->nvectors = 0;
915
    }
916

    
917
    proxy->pci_dev.config_write = virtio_write_config;
918

    
919
    size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
920
    if (size & (size-1))
921
        size = 1 << qemu_fls(size);
922

    
923
    memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
924
                          "virtio-pci", size);
925
    pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
926
                     &proxy->bar);
927

    
928
    if (!kvm_has_many_ioeventfds()) {
929
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
930
    }
931

    
932
    virtio_bind_device(vdev, &virtio_pci_bindings, DEVICE(proxy));
933
    proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
934
    proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
935
    proxy->host_features = vdev->get_features(vdev, proxy->host_features);
936
}
937

    
938
static void virtio_exit_pci(PCIDevice *pci_dev)
939
{
940
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
941

    
942
    memory_region_destroy(&proxy->bar);
943
    msix_uninit_exclusive_bar(pci_dev);
944
}
945

    
946
static int virtio_rng_init_pci(PCIDevice *pci_dev)
947
{
948
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
949
    VirtIODevice *vdev;
950

    
951
    if (proxy->rng.rng == NULL) {
952
        proxy->rng.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM));
953

    
954
        object_property_add_child(OBJECT(pci_dev),
955
                                  "default-backend",
956
                                  OBJECT(proxy->rng.default_backend),
957
                                  NULL);
958

    
959
        object_property_set_link(OBJECT(pci_dev),
960
                                 OBJECT(proxy->rng.default_backend),
961
                                 "rng", NULL);
962
    }
963

    
964
    vdev = virtio_rng_init(&pci_dev->qdev, &proxy->rng);
965
    if (!vdev) {
966
        return -1;
967
    }
968
    virtio_init_pci(proxy, vdev);
969
    return 0;
970
}
971

    
972
static void virtio_rng_exit_pci(PCIDevice *pci_dev)
973
{
974
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
975

    
976
    virtio_pci_stop_ioeventfd(proxy);
977
    virtio_rng_exit(proxy->vdev);
978
    virtio_exit_pci(pci_dev);
979
}
980

    
981
static void virtio_rng_initfn(Object *obj)
982
{
983
    PCIDevice *pci_dev = PCI_DEVICE(obj);
984
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
985

    
986
    object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
987
                             (Object **)&proxy->rng.rng, NULL);
988
}
989

    
990
static Property virtio_rng_properties[] = {
991
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
992
    /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s.  If
993
       you have an entropy source capable of generating more entropy than this
994
       and you can pass it through via virtio-rng, then hats off to you.  Until
995
       then, this is unlimited for all practical purposes.
996
    */
997
    DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy, rng.max_bytes, INT64_MAX),
998
    DEFINE_PROP_UINT32("period", VirtIOPCIProxy, rng.period_ms, 1 << 16),
999
    DEFINE_PROP_END_OF_LIST(),
1000
};
1001

    
1002
static void virtio_rng_class_init(ObjectClass *klass, void *data)
1003
{
1004
    DeviceClass *dc = DEVICE_CLASS(klass);
1005
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1006

    
1007
    k->init = virtio_rng_init_pci;
1008
    k->exit = virtio_rng_exit_pci;
1009
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1010
    k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
1011
    k->revision = VIRTIO_PCI_ABI_VERSION;
1012
    k->class_id = PCI_CLASS_OTHERS;
1013
    dc->reset = virtio_pci_reset;
1014
    dc->props = virtio_rng_properties;
1015
}
1016

    
1017
static const TypeInfo virtio_rng_info = {
1018
    .name          = "virtio-rng-pci",
1019
    .parent        = TYPE_PCI_DEVICE,
1020
    .instance_size = sizeof(VirtIOPCIProxy),
1021
    .instance_init = virtio_rng_initfn,
1022
    .class_init    = virtio_rng_class_init,
1023
};
1024

    
1025
#ifdef CONFIG_VIRTFS
1026
static int virtio_9p_init_pci(PCIDevice *pci_dev)
1027
{
1028
    VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1029
    VirtIODevice *vdev;
1030

    
1031
    vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
1032
    vdev->nvectors = proxy->nvectors;
1033
    virtio_init_pci(proxy, vdev);
1034
    /* make the actual value visible */
1035
    proxy->nvectors = vdev->nvectors;
1036
    return 0;
1037
}
1038

    
1039
static Property virtio_9p_properties[] = {
1040
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1041
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1042
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1043
    DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
1044
    DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
1045
    DEFINE_PROP_END_OF_LIST(),
1046
};
1047

    
1048
static void virtio_9p_class_init(ObjectClass *klass, void *data)
1049
{
1050
    DeviceClass *dc = DEVICE_CLASS(klass);
1051
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1052

    
1053
    k->init = virtio_9p_init_pci;
1054
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1055
    k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1056
    k->revision = VIRTIO_PCI_ABI_VERSION;
1057
    k->class_id = 0x2;
1058
    dc->props = virtio_9p_properties;
1059
    dc->reset = virtio_pci_reset;
1060
}
1061

    
1062
static const TypeInfo virtio_9p_info = {
1063
    .name          = "virtio-9p-pci",
1064
    .parent        = TYPE_PCI_DEVICE,
1065
    .instance_size = sizeof(VirtIOPCIProxy),
1066
    .class_init    = virtio_9p_class_init,
1067
};
1068
#endif
1069

    
1070
/*
1071
 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1072
 */
1073

    
1074
/* This is called by virtio-bus just after the device is plugged. */
1075
static void virtio_pci_device_plugged(DeviceState *d)
1076
{
1077
    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1078
    VirtioBusState *bus = &proxy->bus;
1079
    uint8_t *config;
1080
    uint32_t size;
1081

    
1082
    proxy->vdev = bus->vdev;
1083

    
1084
    config = proxy->pci_dev.config;
1085
    if (proxy->class_code) {
1086
        pci_config_set_class(config, proxy->class_code);
1087
    }
1088
    pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1089
                 pci_get_word(config + PCI_VENDOR_ID));
1090
    pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1091
    config[PCI_INTERRUPT_PIN] = 1;
1092

    
1093
    if (proxy->nvectors &&
1094
        msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
1095
        proxy->nvectors = 0;
1096
    }
1097

    
1098
    proxy->pci_dev.config_write = virtio_write_config;
1099

    
1100
    size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1101
         + virtio_bus_get_vdev_config_len(bus);
1102
    if (size & (size - 1)) {
1103
        size = 1 << qemu_fls(size);
1104
    }
1105

    
1106
    memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
1107
                          "virtio-pci", size);
1108
    pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
1109
                     &proxy->bar);
1110

    
1111
    if (!kvm_has_many_ioeventfds()) {
1112
        proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1113
    }
1114

    
1115
    proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
1116
    proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
1117
    proxy->host_features = virtio_bus_get_vdev_features(bus,
1118
                                                      proxy->host_features);
1119
}
1120

    
1121
static int virtio_pci_init(PCIDevice *pci_dev)
1122
{
1123
    VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
1124
    VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1125
    virtio_pci_bus_new(&dev->bus, dev);
1126
    if (k->init != NULL) {
1127
        return k->init(dev);
1128
    }
1129
    return 0;
1130
}
1131

    
1132
static void virtio_pci_exit(PCIDevice *pci_dev)
1133
{
1134
    VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1135
    virtio_pci_stop_ioeventfd(proxy);
1136
    virtio_exit_pci(pci_dev);
1137
}
1138

    
1139
/*
1140
 * This will be renamed virtio_pci_reset at the end of the series.
1141
 * virtio_pci_reset is still in use at this moment.
1142
 */
1143
static void virtio_pci_rst(DeviceState *qdev)
1144
{
1145
    VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1146
    VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1147
    virtio_pci_stop_ioeventfd(proxy);
1148
    virtio_bus_reset(bus);
1149
    msix_unuse_all_vectors(&proxy->pci_dev);
1150
    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
1151
}
1152

    
1153
static void virtio_pci_class_init(ObjectClass *klass, void *data)
1154
{
1155
    DeviceClass *dc = DEVICE_CLASS(klass);
1156
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1157

    
1158
    k->init = virtio_pci_init;
1159
    k->exit = virtio_pci_exit;
1160
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1161
    k->revision = VIRTIO_PCI_ABI_VERSION;
1162
    k->class_id = PCI_CLASS_OTHERS;
1163
    dc->reset = virtio_pci_rst;
1164
}
1165

    
1166
static const TypeInfo virtio_pci_info = {
1167
    .name          = TYPE_VIRTIO_PCI,
1168
    .parent        = TYPE_PCI_DEVICE,
1169
    .instance_size = sizeof(VirtIOPCIProxy),
1170
    .class_init    = virtio_pci_class_init,
1171
    .class_size    = sizeof(VirtioPCIClass),
1172
    .abstract      = true,
1173
};
1174

    
1175
/* virtio-blk-pci */
1176

    
1177
static Property virtio_blk_pci_properties[] = {
1178
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1179
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1180
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1181
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1182
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
1183
    DEFINE_PROP_BIT("x-data-plane", VirtIOBlkPCI, blk.data_plane, 0, false),
1184
#endif
1185
    DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
1186
    DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlkPCI, blk),
1187
    DEFINE_PROP_END_OF_LIST(),
1188
};
1189

    
1190
static int virtio_blk_pci_init(VirtIOPCIProxy *vpci_dev)
1191
{
1192
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1193
    DeviceState *vdev = DEVICE(&dev->vdev);
1194
    virtio_blk_set_conf(vdev, &(dev->blk));
1195
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1196
    if (qdev_init(vdev) < 0) {
1197
        return -1;
1198
    }
1199
    return 0;
1200
}
1201

    
1202
static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1203
{
1204
    DeviceClass *dc = DEVICE_CLASS(klass);
1205
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1206
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1207

    
1208
    dc->props = virtio_blk_pci_properties;
1209
    k->init = virtio_blk_pci_init;
1210
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1211
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1212
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1213
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1214
}
1215

    
1216
static void virtio_blk_pci_instance_init(Object *obj)
1217
{
1218
    VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1219
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BLK);
1220
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1221
}
1222

    
1223
static const TypeInfo virtio_blk_pci_info = {
1224
    .name          = TYPE_VIRTIO_BLK_PCI,
1225
    .parent        = TYPE_VIRTIO_PCI,
1226
    .instance_size = sizeof(VirtIOBlkPCI),
1227
    .instance_init = virtio_blk_pci_instance_init,
1228
    .class_init    = virtio_blk_pci_class_init,
1229
};
1230

    
1231
/* virtio-scsi-pci */
1232

    
1233
static Property virtio_scsi_pci_properties[] = {
1234
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1235
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1236
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1237
                       DEV_NVECTORS_UNSPECIFIED),
1238
    DEFINE_VIRTIO_SCSI_FEATURES(VirtIOPCIProxy, host_features),
1239
    DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSIPCI, vdev.conf),
1240
    DEFINE_PROP_END_OF_LIST(),
1241
};
1242

    
1243
static int virtio_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
1244
{
1245
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1246
    DeviceState *vdev = DEVICE(&dev->vdev);
1247

    
1248
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1249
        vpci_dev->nvectors = dev->vdev.conf.num_queues + 3;
1250
    }
1251

    
1252
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1253
    if (qdev_init(vdev) < 0) {
1254
        return -1;
1255
    }
1256
    return 0;
1257
}
1258

    
1259
static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
1260
{
1261
    DeviceClass *dc = DEVICE_CLASS(klass);
1262
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1263
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1264
    k->init = virtio_scsi_pci_init_pci;
1265
    dc->props = virtio_scsi_pci_properties;
1266
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1267
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1268
    pcidev_k->revision = 0x00;
1269
    pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1270
}
1271

    
1272
static void virtio_scsi_pci_instance_init(Object *obj)
1273
{
1274
    VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
1275
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SCSI);
1276
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1277
}
1278

    
1279
static const TypeInfo virtio_scsi_pci_info = {
1280
    .name          = TYPE_VIRTIO_SCSI_PCI,
1281
    .parent        = TYPE_VIRTIO_PCI,
1282
    .instance_size = sizeof(VirtIOSCSIPCI),
1283
    .instance_init = virtio_scsi_pci_instance_init,
1284
    .class_init    = virtio_scsi_pci_class_init,
1285
};
1286

    
1287
/* virtio-balloon-pci */
1288

    
1289
static void balloon_pci_stats_get_all(Object *obj, struct Visitor *v,
1290
                                      void *opaque, const char *name,
1291
                                      Error **errp)
1292
{
1293
    VirtIOBalloonPCI *dev = opaque;
1294
    object_property_get(OBJECT(&dev->vdev), v, "guest-stats", errp);
1295
}
1296

    
1297
static void balloon_pci_stats_get_poll_interval(Object *obj, struct Visitor *v,
1298
                                                void *opaque, const char *name,
1299
                                                Error **errp)
1300
{
1301
    VirtIOBalloonPCI *dev = opaque;
1302
    object_property_get(OBJECT(&dev->vdev), v, "guest-stats-polling-interval",
1303
                        errp);
1304
}
1305

    
1306
static void balloon_pci_stats_set_poll_interval(Object *obj, struct Visitor *v,
1307
                                                void *opaque, const char *name,
1308
                                                Error **errp)
1309
{
1310
    VirtIOBalloonPCI *dev = opaque;
1311
    object_property_set(OBJECT(&dev->vdev), v, "guest-stats-polling-interval",
1312
                        errp);
1313
}
1314

    
1315
static Property virtio_balloon_pci_properties[] = {
1316
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1317
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1318
    DEFINE_PROP_END_OF_LIST(),
1319
};
1320

    
1321
static int virtio_balloon_pci_init(VirtIOPCIProxy *vpci_dev)
1322
{
1323
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
1324
    DeviceState *vdev = DEVICE(&dev->vdev);
1325

    
1326
    if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
1327
        vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
1328
        vpci_dev->class_code = PCI_CLASS_OTHERS;
1329
    }
1330

    
1331
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1332
    if (qdev_init(vdev) < 0) {
1333
        return -1;
1334
    }
1335
    return 0;
1336
}
1337

    
1338
static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
1339
{
1340
    DeviceClass *dc = DEVICE_CLASS(klass);
1341
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1342
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1343
    k->init = virtio_balloon_pci_init;
1344
    dc->props = virtio_balloon_pci_properties;
1345
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1346
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1347
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1348
    pcidev_k->class_id = PCI_CLASS_OTHERS;
1349
}
1350

    
1351
static void virtio_balloon_pci_instance_init(Object *obj)
1352
{
1353
    VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
1354
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BALLOON);
1355
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1356

    
1357
    object_property_add(obj, "guest-stats", "guest statistics",
1358
                        balloon_pci_stats_get_all, NULL, NULL, dev,
1359
                        NULL);
1360

    
1361
    object_property_add(obj, "guest-stats-polling-interval", "int",
1362
                        balloon_pci_stats_get_poll_interval,
1363
                        balloon_pci_stats_set_poll_interval,
1364
                        NULL, dev, NULL);
1365
}
1366

    
1367
static const TypeInfo virtio_balloon_pci_info = {
1368
    .name          = TYPE_VIRTIO_BALLOON_PCI,
1369
    .parent        = TYPE_VIRTIO_PCI,
1370
    .instance_size = sizeof(VirtIOBalloonPCI),
1371
    .instance_init = virtio_balloon_pci_instance_init,
1372
    .class_init    = virtio_balloon_pci_class_init,
1373
};
1374

    
1375
/* virtio-serial-pci */
1376

    
1377
static int virtio_serial_pci_init(VirtIOPCIProxy *vpci_dev)
1378
{
1379
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
1380
    DeviceState *vdev = DEVICE(&dev->vdev);
1381

    
1382
    if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
1383
        vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
1384
        vpci_dev->class_code != PCI_CLASS_OTHERS) {        /* qemu-kvm  */
1385
            vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
1386
    }
1387

    
1388
    /* backwards-compatibility with machines that were created with
1389
       DEV_NVECTORS_UNSPECIFIED */
1390
    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1391
        vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
1392
    }
1393

    
1394
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1395
    if (qdev_init(vdev) < 0) {
1396
        return -1;
1397
    }
1398
    return 0;
1399
}
1400

    
1401
static Property virtio_serial_pci_properties[] = {
1402
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1403
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1404
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1405
    DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1406
    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1407
    DEFINE_VIRTIO_SERIAL_PROPERTIES(VirtIOSerialPCI, vdev.serial),
1408
    DEFINE_PROP_END_OF_LIST(),
1409
};
1410

    
1411
static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
1412
{
1413
    DeviceClass *dc = DEVICE_CLASS(klass);
1414
    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1415
    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1416
    k->init = virtio_serial_pci_init;
1417
    dc->props = virtio_serial_pci_properties;
1418
    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1419
    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1420
    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1421
    pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1422
}
1423

    
1424
static void virtio_serial_pci_instance_init(Object *obj)
1425
{
1426
    VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
1427
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SERIAL);
1428
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1429
}
1430

    
1431
static const TypeInfo virtio_serial_pci_info = {
1432
    .name          = TYPE_VIRTIO_SERIAL_PCI,
1433
    .parent        = TYPE_VIRTIO_PCI,
1434
    .instance_size = sizeof(VirtIOSerialPCI),
1435
    .instance_init = virtio_serial_pci_instance_init,
1436
    .class_init    = virtio_serial_pci_class_init,
1437
};
1438

    
1439
/* virtio-net-pci */
1440

    
1441
static Property virtio_net_properties[] = {
1442
    DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1443
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
1444
    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
1445
    DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
1446
    DEFINE_NIC_PROPERTIES(VirtIONetPCI, vdev.nic_conf),
1447
    DEFINE_VIRTIO_NET_PROPERTIES(VirtIONetPCI, vdev.net_conf),
1448
    DEFINE_PROP_END_OF_LIST(),
1449
};
1450

    
1451
static int virtio_net_pci_init(VirtIOPCIProxy *vpci_dev)
1452
{
1453
    VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
1454
    DeviceState *vdev = DEVICE(&dev->vdev);
1455

    
1456
    virtio_net_set_config_size(&dev->vdev, vpci_dev->host_features);
1457
    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1458
    if (qdev_init(vdev) < 0) {
1459
        return -1;
1460
    }
1461
    return 0;
1462
}
1463

    
1464
static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
1465
{
1466
    DeviceClass *dc = DEVICE_CLASS(klass);
1467
    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1468
    VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1469

    
1470
    k->romfile = "efi-virtio.rom";
1471
    k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1472
    k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
1473
    k->revision = VIRTIO_PCI_ABI_VERSION;
1474
    k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1475
    dc->props = virtio_net_properties;
1476
    vpciklass->init = virtio_net_pci_init;
1477
}
1478

    
1479
static void virtio_net_pci_instance_init(Object *obj)
1480
{
1481
    VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
1482
    object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_NET);
1483
    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1484
}
1485

    
1486
static const TypeInfo virtio_net_pci_info = {
1487
    .name          = TYPE_VIRTIO_NET_PCI,
1488
    .parent        = TYPE_VIRTIO_PCI,
1489
    .instance_size = sizeof(VirtIONetPCI),
1490
    .instance_init = virtio_net_pci_instance_init,
1491
    .class_init    = virtio_net_pci_class_init,
1492
};
1493

    
1494
/* virtio-pci-bus */
1495

    
1496
void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev)
1497
{
1498
    DeviceState *qdev = DEVICE(dev);
1499
    BusState *qbus;
1500
    qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_PCI_BUS, qdev, NULL);
1501
    qbus = BUS(bus);
1502
    qbus->allow_hotplug = 1;
1503
}
1504

    
1505
static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
1506
{
1507
    BusClass *bus_class = BUS_CLASS(klass);
1508
    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1509
    bus_class->max_dev = 1;
1510
    k->notify = virtio_pci_notify;
1511
    k->save_config = virtio_pci_save_config;
1512
    k->load_config = virtio_pci_load_config;
1513
    k->save_queue = virtio_pci_save_queue;
1514
    k->load_queue = virtio_pci_load_queue;
1515
    k->get_features = virtio_pci_get_features;
1516
    k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
1517
    k->set_host_notifier = virtio_pci_set_host_notifier;
1518
    k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
1519
    k->vmstate_change = virtio_pci_vmstate_change;
1520
    k->device_plugged = virtio_pci_device_plugged;
1521
}
1522

    
1523
static const TypeInfo virtio_pci_bus_info = {
1524
    .name          = TYPE_VIRTIO_PCI_BUS,
1525
    .parent        = TYPE_VIRTIO_BUS,
1526
    .instance_size = sizeof(VirtioPCIBusState),
1527
    .class_init    = virtio_pci_bus_class_init,
1528
};
1529

    
1530
static void virtio_pci_register_types(void)
1531
{
1532
    type_register_static(&virtio_rng_info);
1533
    type_register_static(&virtio_pci_bus_info);
1534
    type_register_static(&virtio_pci_info);
1535
#ifdef CONFIG_VIRTFS
1536
    type_register_static(&virtio_9p_info);
1537
#endif
1538
    type_register_static(&virtio_blk_pci_info);
1539
    type_register_static(&virtio_scsi_pci_info);
1540
    type_register_static(&virtio_balloon_pci_info);
1541
    type_register_static(&virtio_serial_pci_info);
1542
    type_register_static(&virtio_net_pci_info);
1543
}
1544

    
1545
type_init(virtio_pci_register_types)