Statistics
| Branch: | Revision:

root / hw / virtio.c @ bf16cc8f

History | View | Annotate | Download (22.8 kB)

1 967f97fa aliguori
/*
2 967f97fa aliguori
 * Virtio Support
3 967f97fa aliguori
 *
4 967f97fa aliguori
 * Copyright IBM, Corp. 2007
5 967f97fa aliguori
 *
6 967f97fa aliguori
 * Authors:
7 967f97fa aliguori
 *  Anthony Liguori   <aliguori@us.ibm.com>
8 967f97fa aliguori
 *
9 967f97fa aliguori
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10 967f97fa aliguori
 * the COPYING file in the top-level directory.
11 967f97fa aliguori
 *
12 967f97fa aliguori
 */
13 967f97fa aliguori
14 967f97fa aliguori
#include <inttypes.h>
15 967f97fa aliguori
16 967f97fa aliguori
#include "virtio.h"
17 967f97fa aliguori
#include "sysemu.h"
18 967f97fa aliguori
19 967f97fa aliguori
//#define VIRTIO_ZERO_COPY
20 967f97fa aliguori
21 967f97fa aliguori
/* from Linux's linux/virtio_pci.h */
22 967f97fa aliguori
23 967f97fa aliguori
/* A 32-bit r/o bitmask of the features supported by the host */
24 967f97fa aliguori
#define VIRTIO_PCI_HOST_FEATURES        0
25 967f97fa aliguori
26 967f97fa aliguori
/* A 32-bit r/w bitmask of features activated by the guest */
27 967f97fa aliguori
#define VIRTIO_PCI_GUEST_FEATURES       4
28 967f97fa aliguori
29 967f97fa aliguori
/* A 32-bit r/w PFN for the currently selected queue */
30 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_PFN            8
31 967f97fa aliguori
32 967f97fa aliguori
/* A 16-bit r/o queue size for the currently selected queue */
33 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_NUM            12
34 967f97fa aliguori
35 967f97fa aliguori
/* A 16-bit r/w queue selector */
36 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_SEL            14
37 967f97fa aliguori
38 967f97fa aliguori
/* A 16-bit r/w queue notifier */
39 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_NOTIFY         16
40 967f97fa aliguori
41 967f97fa aliguori
/* An 8-bit device status register.  */
42 967f97fa aliguori
#define VIRTIO_PCI_STATUS               18
43 967f97fa aliguori
44 967f97fa aliguori
/* An 8-bit r/o interrupt status register.  Reading the value will return the
45 967f97fa aliguori
 * current contents of the ISR and will also clear it.  This is effectively
46 967f97fa aliguori
 * a read-and-acknowledge. */
47 967f97fa aliguori
#define VIRTIO_PCI_ISR                  19
48 967f97fa aliguori
49 967f97fa aliguori
#define VIRTIO_PCI_CONFIG               20
50 967f97fa aliguori
51 967f97fa aliguori
/* Virtio ABI version, if we increment this, we break the guest driver. */
52 967f97fa aliguori
#define VIRTIO_PCI_ABI_VERSION          0
53 967f97fa aliguori
54 f46f15bc aliguori
/* How many bits to shift physical queue address written to QUEUE_PFN.
55 f46f15bc aliguori
 * 12 is historical, and due to x86 page size. */
56 f46f15bc aliguori
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT    12
57 f46f15bc aliguori
58 f46f15bc aliguori
/* The alignment to use between consumer and producer parts of vring.
59 f46f15bc aliguori
 * x86 pagesize again. */
60 f46f15bc aliguori
#define VIRTIO_PCI_VRING_ALIGN         4096
61 f46f15bc aliguori
62 967f97fa aliguori
/* QEMU doesn't strictly need write barriers since everything runs in
63 967f97fa aliguori
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
64 967f97fa aliguori
 * KVM or if kqemu gets SMP support.
65 967f97fa aliguori
 */
66 967f97fa aliguori
#define wmb() do { } while (0)
67 967f97fa aliguori
68 967f97fa aliguori
typedef struct VRingDesc
69 967f97fa aliguori
{
70 967f97fa aliguori
    uint64_t addr;
71 967f97fa aliguori
    uint32_t len;
72 967f97fa aliguori
    uint16_t flags;
73 967f97fa aliguori
    uint16_t next;
74 967f97fa aliguori
} VRingDesc;
75 967f97fa aliguori
76 967f97fa aliguori
typedef struct VRingAvail
77 967f97fa aliguori
{
78 967f97fa aliguori
    uint16_t flags;
79 967f97fa aliguori
    uint16_t idx;
80 967f97fa aliguori
    uint16_t ring[0];
81 967f97fa aliguori
} VRingAvail;
82 967f97fa aliguori
83 967f97fa aliguori
typedef struct VRingUsedElem
84 967f97fa aliguori
{
85 967f97fa aliguori
    uint32_t id;
86 967f97fa aliguori
    uint32_t len;
87 967f97fa aliguori
} VRingUsedElem;
88 967f97fa aliguori
89 967f97fa aliguori
typedef struct VRingUsed
90 967f97fa aliguori
{
91 967f97fa aliguori
    uint16_t flags;
92 967f97fa aliguori
    uint16_t idx;
93 967f97fa aliguori
    VRingUsedElem ring[0];
94 967f97fa aliguori
} VRingUsed;
95 967f97fa aliguori
96 967f97fa aliguori
typedef struct VRing
97 967f97fa aliguori
{
98 967f97fa aliguori
    unsigned int num;
99 967f97fa aliguori
    target_phys_addr_t desc;
100 967f97fa aliguori
    target_phys_addr_t avail;
101 967f97fa aliguori
    target_phys_addr_t used;
102 967f97fa aliguori
} VRing;
103 967f97fa aliguori
104 967f97fa aliguori
struct VirtQueue
105 967f97fa aliguori
{
106 967f97fa aliguori
    VRing vring;
107 967f97fa aliguori
    uint32_t pfn;
108 967f97fa aliguori
    uint16_t last_avail_idx;
109 967f97fa aliguori
    int inuse;
110 967f97fa aliguori
    void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
111 967f97fa aliguori
};
112 967f97fa aliguori
113 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_MAX        16
114 967f97fa aliguori
115 967f97fa aliguori
/* virt queue functions */
116 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
117 967f97fa aliguori
static void *virtio_map_gpa(target_phys_addr_t addr, size_t size)
118 967f97fa aliguori
{
119 967f97fa aliguori
    ram_addr_t off;
120 967f97fa aliguori
    target_phys_addr_t addr1;
121 967f97fa aliguori
122 967f97fa aliguori
    off = cpu_get_physical_page_desc(addr);
123 967f97fa aliguori
    if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
124 967f97fa aliguori
        fprintf(stderr, "virtio DMA to IO ram\n");
125 967f97fa aliguori
        exit(1);
126 967f97fa aliguori
    }
127 967f97fa aliguori
128 967f97fa aliguori
    off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
129 967f97fa aliguori
130 967f97fa aliguori
    for (addr1 = addr + TARGET_PAGE_SIZE;
131 967f97fa aliguori
         addr1 < TARGET_PAGE_ALIGN(addr + size);
132 967f97fa aliguori
         addr1 += TARGET_PAGE_SIZE) {
133 967f97fa aliguori
        ram_addr_t off1;
134 967f97fa aliguori
135 967f97fa aliguori
        off1 = cpu_get_physical_page_desc(addr1);
136 967f97fa aliguori
        if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
137 967f97fa aliguori
            fprintf(stderr, "virtio DMA to IO ram\n");
138 967f97fa aliguori
            exit(1);
139 967f97fa aliguori
        }
140 967f97fa aliguori
141 967f97fa aliguori
        off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK);
142 967f97fa aliguori
143 967f97fa aliguori
        if (off1 != (off + (addr1 - addr))) {
144 967f97fa aliguori
            fprintf(stderr, "discontigous virtio memory\n");
145 967f97fa aliguori
            exit(1);
146 967f97fa aliguori
        }
147 967f97fa aliguori
    }
148 967f97fa aliguori
149 967f97fa aliguori
    return phys_ram_base + off;
150 967f97fa aliguori
}
151 967f97fa aliguori
#endif
152 967f97fa aliguori
153 967f97fa aliguori
static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa)
154 967f97fa aliguori
{
155 967f97fa aliguori
    vq->vring.desc = pa;
156 967f97fa aliguori
    vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
157 f46f15bc aliguori
    vq->vring.used = vring_align(vq->vring.avail +
158 f46f15bc aliguori
                                 offsetof(VRingAvail, ring[vq->vring.num]),
159 f46f15bc aliguori
                                 VIRTIO_PCI_VRING_ALIGN);
160 967f97fa aliguori
}
161 967f97fa aliguori
162 967f97fa aliguori
static inline uint64_t vring_desc_addr(VirtQueue *vq, int i)
163 967f97fa aliguori
{
164 967f97fa aliguori
    target_phys_addr_t pa;
165 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
166 967f97fa aliguori
    return ldq_phys(pa);
167 967f97fa aliguori
}
168 967f97fa aliguori
169 967f97fa aliguori
static inline uint32_t vring_desc_len(VirtQueue *vq, int i)
170 967f97fa aliguori
{
171 967f97fa aliguori
    target_phys_addr_t pa;
172 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
173 967f97fa aliguori
    return ldl_phys(pa);
174 967f97fa aliguori
}
175 967f97fa aliguori
176 967f97fa aliguori
static inline uint16_t vring_desc_flags(VirtQueue *vq, int i)
177 967f97fa aliguori
{
178 967f97fa aliguori
    target_phys_addr_t pa;
179 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
180 967f97fa aliguori
    return lduw_phys(pa);
181 967f97fa aliguori
}
182 967f97fa aliguori
183 967f97fa aliguori
static inline uint16_t vring_desc_next(VirtQueue *vq, int i)
184 967f97fa aliguori
{
185 967f97fa aliguori
    target_phys_addr_t pa;
186 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
187 967f97fa aliguori
    return lduw_phys(pa);
188 967f97fa aliguori
}
189 967f97fa aliguori
190 967f97fa aliguori
static inline uint16_t vring_avail_flags(VirtQueue *vq)
191 967f97fa aliguori
{
192 967f97fa aliguori
    target_phys_addr_t pa;
193 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
194 967f97fa aliguori
    return lduw_phys(pa);
195 967f97fa aliguori
}
196 967f97fa aliguori
197 967f97fa aliguori
static inline uint16_t vring_avail_idx(VirtQueue *vq)
198 967f97fa aliguori
{
199 967f97fa aliguori
    target_phys_addr_t pa;
200 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
201 967f97fa aliguori
    return lduw_phys(pa);
202 967f97fa aliguori
}
203 967f97fa aliguori
204 967f97fa aliguori
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
205 967f97fa aliguori
{
206 967f97fa aliguori
    target_phys_addr_t pa;
207 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
208 967f97fa aliguori
    return lduw_phys(pa);
209 967f97fa aliguori
}
210 967f97fa aliguori
211 967f97fa aliguori
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
212 967f97fa aliguori
{
213 967f97fa aliguori
    target_phys_addr_t pa;
214 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
215 967f97fa aliguori
    stl_phys(pa, val);
216 967f97fa aliguori
}
217 967f97fa aliguori
218 967f97fa aliguori
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
219 967f97fa aliguori
{
220 967f97fa aliguori
    target_phys_addr_t pa;
221 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
222 967f97fa aliguori
    stl_phys(pa, val);
223 967f97fa aliguori
}
224 967f97fa aliguori
225 967f97fa aliguori
static uint16_t vring_used_idx(VirtQueue *vq)
226 967f97fa aliguori
{
227 967f97fa aliguori
    target_phys_addr_t pa;
228 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, idx);
229 967f97fa aliguori
    return lduw_phys(pa);
230 967f97fa aliguori
}
231 967f97fa aliguori
232 967f97fa aliguori
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
233 967f97fa aliguori
{
234 967f97fa aliguori
    target_phys_addr_t pa;
235 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, idx);
236 967f97fa aliguori
    stw_phys(pa, vring_used_idx(vq) + val);
237 967f97fa aliguori
}
238 967f97fa aliguori
239 967f97fa aliguori
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
240 967f97fa aliguori
{
241 967f97fa aliguori
    target_phys_addr_t pa;
242 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, flags);
243 967f97fa aliguori
    stw_phys(pa, lduw_phys(pa) | mask);
244 967f97fa aliguori
}
245 967f97fa aliguori
246 967f97fa aliguori
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
247 967f97fa aliguori
{
248 967f97fa aliguori
    target_phys_addr_t pa;
249 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, flags);
250 967f97fa aliguori
    stw_phys(pa, lduw_phys(pa) & ~mask);
251 967f97fa aliguori
}
252 967f97fa aliguori
253 967f97fa aliguori
void virtio_queue_set_notification(VirtQueue *vq, int enable)
254 967f97fa aliguori
{
255 967f97fa aliguori
    if (enable)
256 967f97fa aliguori
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
257 967f97fa aliguori
    else
258 967f97fa aliguori
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
259 967f97fa aliguori
}
260 967f97fa aliguori
261 967f97fa aliguori
int virtio_queue_ready(VirtQueue *vq)
262 967f97fa aliguori
{
263 967f97fa aliguori
    return vq->vring.avail != 0;
264 967f97fa aliguori
}
265 967f97fa aliguori
266 967f97fa aliguori
int virtio_queue_empty(VirtQueue *vq)
267 967f97fa aliguori
{
268 967f97fa aliguori
    return vring_avail_idx(vq) == vq->last_avail_idx;
269 967f97fa aliguori
}
270 967f97fa aliguori
271 967f97fa aliguori
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
272 967f97fa aliguori
                    unsigned int len, unsigned int idx)
273 967f97fa aliguori
{
274 967f97fa aliguori
    unsigned int offset;
275 967f97fa aliguori
    int i;
276 967f97fa aliguori
277 967f97fa aliguori
#ifndef VIRTIO_ZERO_COPY
278 967f97fa aliguori
    for (i = 0; i < elem->out_num; i++)
279 967f97fa aliguori
        qemu_free(elem->out_sg[i].iov_base);
280 967f97fa aliguori
#endif
281 967f97fa aliguori
282 967f97fa aliguori
    offset = 0;
283 967f97fa aliguori
    for (i = 0; i < elem->in_num; i++) {
284 967f97fa aliguori
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
285 967f97fa aliguori
286 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
287 967f97fa aliguori
        if (size) {
288 967f97fa aliguori
            ram_addr_t addr = (uint8_t *)elem->in_sg[i].iov_base - phys_ram_base;
289 967f97fa aliguori
            ram_addr_t off;
290 967f97fa aliguori
291 967f97fa aliguori
            for (off = 0; off < size; off += TARGET_PAGE_SIZE)
292 967f97fa aliguori
                cpu_physical_memory_set_dirty(addr + off);
293 967f97fa aliguori
        }
294 967f97fa aliguori
#else
295 967f97fa aliguori
        if (size)
296 967f97fa aliguori
            cpu_physical_memory_write(elem->in_addr[i],
297 967f97fa aliguori
                                      elem->in_sg[i].iov_base,
298 967f97fa aliguori
                                      size);
299 967f97fa aliguori
300 967f97fa aliguori
        qemu_free(elem->in_sg[i].iov_base);
301 967f97fa aliguori
#endif
302 967f97fa aliguori
        
303 967f97fa aliguori
        offset += size;
304 967f97fa aliguori
    }
305 967f97fa aliguori
306 967f97fa aliguori
    idx = (idx + vring_used_idx(vq)) % vq->vring.num;
307 967f97fa aliguori
308 967f97fa aliguori
    /* Get a pointer to the next entry in the used ring. */
309 967f97fa aliguori
    vring_used_ring_id(vq, idx, elem->index);
310 967f97fa aliguori
    vring_used_ring_len(vq, idx, len);
311 967f97fa aliguori
}
312 967f97fa aliguori
313 967f97fa aliguori
void virtqueue_flush(VirtQueue *vq, unsigned int count)
314 967f97fa aliguori
{
315 967f97fa aliguori
    /* Make sure buffer is written before we update index. */
316 967f97fa aliguori
    wmb();
317 967f97fa aliguori
    vring_used_idx_increment(vq, count);
318 967f97fa aliguori
    vq->inuse -= count;
319 967f97fa aliguori
}
320 967f97fa aliguori
321 967f97fa aliguori
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
322 967f97fa aliguori
                    unsigned int len)
323 967f97fa aliguori
{
324 967f97fa aliguori
    virtqueue_fill(vq, elem, len, 0);
325 967f97fa aliguori
    virtqueue_flush(vq, 1);
326 967f97fa aliguori
}
327 967f97fa aliguori
328 967f97fa aliguori
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
329 967f97fa aliguori
{
330 967f97fa aliguori
    uint16_t num_heads = vring_avail_idx(vq) - idx;
331 967f97fa aliguori
332 967f97fa aliguori
    /* Check it isn't doing very strange things with descriptor numbers. */
333 bb6834cf aliguori
    if (num_heads > vq->vring.num) {
334 bb6834cf aliguori
        fprintf(stderr, "Guest moved used index from %u to %u",
335 bb6834cf aliguori
                idx, vring_avail_idx(vq));
336 bb6834cf aliguori
        exit(1);
337 bb6834cf aliguori
    }
338 967f97fa aliguori
339 967f97fa aliguori
    return num_heads;
340 967f97fa aliguori
}
341 967f97fa aliguori
342 967f97fa aliguori
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
343 967f97fa aliguori
{
344 967f97fa aliguori
    unsigned int head;
345 967f97fa aliguori
346 967f97fa aliguori
    /* Grab the next descriptor number they're advertising, and increment
347 967f97fa aliguori
     * the index we've seen. */
348 967f97fa aliguori
    head = vring_avail_ring(vq, idx % vq->vring.num);
349 967f97fa aliguori
350 967f97fa aliguori
    /* If their number is silly, that's a fatal mistake. */
351 bb6834cf aliguori
    if (head >= vq->vring.num) {
352 bb6834cf aliguori
        fprintf(stderr, "Guest says index %u is available", head);
353 bb6834cf aliguori
        exit(1);
354 bb6834cf aliguori
    }
355 967f97fa aliguori
356 967f97fa aliguori
    return head;
357 967f97fa aliguori
}
358 967f97fa aliguori
359 967f97fa aliguori
static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
360 967f97fa aliguori
{
361 967f97fa aliguori
    unsigned int next;
362 967f97fa aliguori
363 967f97fa aliguori
    /* If this descriptor says it doesn't chain, we're done. */
364 967f97fa aliguori
    if (!(vring_desc_flags(vq, i) & VRING_DESC_F_NEXT))
365 967f97fa aliguori
        return vq->vring.num;
366 967f97fa aliguori
367 967f97fa aliguori
    /* Check they're not leading us off end of descriptors. */
368 967f97fa aliguori
    next = vring_desc_next(vq, i);
369 967f97fa aliguori
    /* Make sure compiler knows to grab that: we don't want it changing! */
370 967f97fa aliguori
    wmb();
371 967f97fa aliguori
372 bb6834cf aliguori
    if (next >= vq->vring.num) {
373 bb6834cf aliguori
        fprintf(stderr, "Desc next is %u", next);
374 bb6834cf aliguori
        exit(1);
375 bb6834cf aliguori
    }
376 967f97fa aliguori
377 967f97fa aliguori
    return next;
378 967f97fa aliguori
}
379 967f97fa aliguori
380 967f97fa aliguori
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
381 967f97fa aliguori
{
382 967f97fa aliguori
    unsigned int idx;
383 967f97fa aliguori
    int num_bufs, in_total, out_total;
384 967f97fa aliguori
385 967f97fa aliguori
    idx = vq->last_avail_idx;
386 967f97fa aliguori
387 967f97fa aliguori
    num_bufs = in_total = out_total = 0;
388 967f97fa aliguori
    while (virtqueue_num_heads(vq, idx)) {
389 967f97fa aliguori
        int i;
390 967f97fa aliguori
391 967f97fa aliguori
        i = virtqueue_get_head(vq, idx++);
392 967f97fa aliguori
        do {
393 967f97fa aliguori
            /* If we've got too many, that implies a descriptor loop. */
394 bb6834cf aliguori
            if (++num_bufs > vq->vring.num) {
395 bb6834cf aliguori
                fprintf(stderr, "Looped descriptor");
396 bb6834cf aliguori
                exit(1);
397 bb6834cf aliguori
            }
398 967f97fa aliguori
399 967f97fa aliguori
            if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
400 967f97fa aliguori
                if (in_bytes > 0 &&
401 967f97fa aliguori
                    (in_total += vring_desc_len(vq, i)) >= in_bytes)
402 967f97fa aliguori
                    return 1;
403 967f97fa aliguori
            } else {
404 967f97fa aliguori
                if (out_bytes > 0 &&
405 967f97fa aliguori
                    (out_total += vring_desc_len(vq, i)) >= out_bytes)
406 967f97fa aliguori
                    return 1;
407 967f97fa aliguori
            }
408 967f97fa aliguori
        } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
409 967f97fa aliguori
    }
410 967f97fa aliguori
411 967f97fa aliguori
    return 0;
412 967f97fa aliguori
}
413 967f97fa aliguori
414 967f97fa aliguori
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
415 967f97fa aliguori
{
416 967f97fa aliguori
    unsigned int i, head;
417 967f97fa aliguori
418 967f97fa aliguori
    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
419 967f97fa aliguori
        return 0;
420 967f97fa aliguori
421 967f97fa aliguori
    /* When we start there are none of either input nor output. */
422 967f97fa aliguori
    elem->out_num = elem->in_num = 0;
423 967f97fa aliguori
424 967f97fa aliguori
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
425 967f97fa aliguori
    do {
426 967f97fa aliguori
        struct iovec *sg;
427 967f97fa aliguori
428 967f97fa aliguori
        if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
429 967f97fa aliguori
            elem->in_addr[elem->in_num] = vring_desc_addr(vq, i);
430 967f97fa aliguori
            sg = &elem->in_sg[elem->in_num++];
431 967f97fa aliguori
        } else
432 967f97fa aliguori
            sg = &elem->out_sg[elem->out_num++];
433 967f97fa aliguori
434 967f97fa aliguori
        /* Grab the first descriptor, and check it's OK. */
435 967f97fa aliguori
        sg->iov_len = vring_desc_len(vq, i);
436 967f97fa aliguori
437 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
438 967f97fa aliguori
        sg->iov_base = virtio_map_gpa(vring_desc_addr(vq, i), sg->iov_len);
439 967f97fa aliguori
#else
440 967f97fa aliguori
        /* cap individual scatter element size to prevent unbounded allocations
441 967f97fa aliguori
           of memory from the guest.  Practically speaking, no virtio driver
442 967f97fa aliguori
           will ever pass more than a page in each element.  We set the cap to
443 967f97fa aliguori
           be 2MB in case for some reason a large page makes it way into the
444 967f97fa aliguori
           sg list.  When we implement a zero copy API, this limitation will
445 967f97fa aliguori
           disappear */
446 967f97fa aliguori
        if (sg->iov_len > (2 << 20))
447 967f97fa aliguori
            sg->iov_len = 2 << 20;
448 967f97fa aliguori
449 967f97fa aliguori
        sg->iov_base = qemu_malloc(sg->iov_len);
450 487414f1 aliguori
        if (!(vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)) {
451 967f97fa aliguori
            cpu_physical_memory_read(vring_desc_addr(vq, i),
452 967f97fa aliguori
                                     sg->iov_base,
453 967f97fa aliguori
                                     sg->iov_len);
454 967f97fa aliguori
        }
455 967f97fa aliguori
#endif
456 bb6834cf aliguori
        if (sg->iov_base == NULL) {
457 bb6834cf aliguori
            fprintf(stderr, "Invalid mapping\n");
458 bb6834cf aliguori
            exit(1);
459 bb6834cf aliguori
        }
460 967f97fa aliguori
461 967f97fa aliguori
        /* If we've got too many, that implies a descriptor loop. */
462 bb6834cf aliguori
        if ((elem->in_num + elem->out_num) > vq->vring.num) {
463 bb6834cf aliguori
            fprintf(stderr, "Looped descriptor");
464 bb6834cf aliguori
            exit(1);
465 bb6834cf aliguori
        }
466 967f97fa aliguori
    } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
467 967f97fa aliguori
468 967f97fa aliguori
    elem->index = head;
469 967f97fa aliguori
470 967f97fa aliguori
    vq->inuse++;
471 967f97fa aliguori
472 967f97fa aliguori
    return elem->in_num + elem->out_num;
473 967f97fa aliguori
}
474 967f97fa aliguori
475 967f97fa aliguori
/* virtio device */
476 967f97fa aliguori
477 967f97fa aliguori
static VirtIODevice *to_virtio_device(PCIDevice *pci_dev)
478 967f97fa aliguori
{
479 967f97fa aliguori
    return (VirtIODevice *)pci_dev;
480 967f97fa aliguori
}
481 967f97fa aliguori
482 967f97fa aliguori
static void virtio_update_irq(VirtIODevice *vdev)
483 967f97fa aliguori
{
484 967f97fa aliguori
    qemu_set_irq(vdev->pci_dev.irq[0], vdev->isr & 1);
485 967f97fa aliguori
}
486 967f97fa aliguori
487 69d6451c blueswir1
static void virtio_reset(void *opaque)
488 967f97fa aliguori
{
489 967f97fa aliguori
    VirtIODevice *vdev = opaque;
490 967f97fa aliguori
    int i;
491 967f97fa aliguori
492 967f97fa aliguori
    if (vdev->reset)
493 967f97fa aliguori
        vdev->reset(vdev);
494 967f97fa aliguori
495 967f97fa aliguori
    vdev->features = 0;
496 967f97fa aliguori
    vdev->queue_sel = 0;
497 967f97fa aliguori
    vdev->status = 0;
498 967f97fa aliguori
    vdev->isr = 0;
499 967f97fa aliguori
    virtio_update_irq(vdev);
500 967f97fa aliguori
501 967f97fa aliguori
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
502 967f97fa aliguori
        vdev->vq[i].vring.desc = 0;
503 967f97fa aliguori
        vdev->vq[i].vring.avail = 0;
504 967f97fa aliguori
        vdev->vq[i].vring.used = 0;
505 967f97fa aliguori
        vdev->vq[i].last_avail_idx = 0;
506 967f97fa aliguori
        vdev->vq[i].pfn = 0;
507 967f97fa aliguori
    }
508 967f97fa aliguori
}
509 967f97fa aliguori
510 967f97fa aliguori
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
511 967f97fa aliguori
{
512 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(opaque);
513 967f97fa aliguori
    ram_addr_t pa;
514 967f97fa aliguori
515 967f97fa aliguori
    addr -= vdev->addr;
516 967f97fa aliguori
517 967f97fa aliguori
    switch (addr) {
518 967f97fa aliguori
    case VIRTIO_PCI_GUEST_FEATURES:
519 967f97fa aliguori
        if (vdev->set_features)
520 967f97fa aliguori
            vdev->set_features(vdev, val);
521 967f97fa aliguori
        vdev->features = val;
522 967f97fa aliguori
        break;
523 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_PFN:
524 f46f15bc aliguori
        pa = (ram_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
525 967f97fa aliguori
        vdev->vq[vdev->queue_sel].pfn = val;
526 967f97fa aliguori
        if (pa == 0) {
527 967f97fa aliguori
            virtio_reset(vdev);
528 967f97fa aliguori
        } else {
529 967f97fa aliguori
            virtqueue_init(&vdev->vq[vdev->queue_sel], pa);
530 967f97fa aliguori
        }
531 967f97fa aliguori
        break;
532 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_SEL:
533 967f97fa aliguori
        if (val < VIRTIO_PCI_QUEUE_MAX)
534 967f97fa aliguori
            vdev->queue_sel = val;
535 967f97fa aliguori
        break;
536 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_NOTIFY:
537 967f97fa aliguori
        if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
538 967f97fa aliguori
            vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
539 967f97fa aliguori
        break;
540 967f97fa aliguori
    case VIRTIO_PCI_STATUS:
541 967f97fa aliguori
        vdev->status = val & 0xFF;
542 967f97fa aliguori
        if (vdev->status == 0)
543 967f97fa aliguori
            virtio_reset(vdev);
544 967f97fa aliguori
        break;
545 967f97fa aliguori
    }
546 967f97fa aliguori
}
547 967f97fa aliguori
548 967f97fa aliguori
static uint32_t virtio_ioport_read(void *opaque, uint32_t addr)
549 967f97fa aliguori
{
550 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(opaque);
551 967f97fa aliguori
    uint32_t ret = 0xFFFFFFFF;
552 967f97fa aliguori
553 967f97fa aliguori
    addr -= vdev->addr;
554 967f97fa aliguori
555 967f97fa aliguori
    switch (addr) {
556 967f97fa aliguori
    case VIRTIO_PCI_HOST_FEATURES:
557 967f97fa aliguori
        ret = vdev->get_features(vdev);
558 967f97fa aliguori
        ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
559 967f97fa aliguori
        break;
560 967f97fa aliguori
    case VIRTIO_PCI_GUEST_FEATURES:
561 967f97fa aliguori
        ret = vdev->features;
562 967f97fa aliguori
        break;
563 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_PFN:
564 967f97fa aliguori
        ret = vdev->vq[vdev->queue_sel].pfn;
565 967f97fa aliguori
        break;
566 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_NUM:
567 967f97fa aliguori
        ret = vdev->vq[vdev->queue_sel].vring.num;
568 967f97fa aliguori
        break;
569 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_SEL:
570 967f97fa aliguori
        ret = vdev->queue_sel;
571 967f97fa aliguori
        break;
572 967f97fa aliguori
    case VIRTIO_PCI_STATUS:
573 967f97fa aliguori
        ret = vdev->status;
574 967f97fa aliguori
        break;
575 967f97fa aliguori
    case VIRTIO_PCI_ISR:
576 967f97fa aliguori
        /* reading from the ISR also clears it. */
577 967f97fa aliguori
        ret = vdev->isr;
578 967f97fa aliguori
        vdev->isr = 0;
579 967f97fa aliguori
        virtio_update_irq(vdev);
580 967f97fa aliguori
        break;
581 967f97fa aliguori
    default:
582 967f97fa aliguori
        break;
583 967f97fa aliguori
    }
584 967f97fa aliguori
585 967f97fa aliguori
    return ret;
586 967f97fa aliguori
}
587 967f97fa aliguori
588 967f97fa aliguori
static uint32_t virtio_config_readb(void *opaque, uint32_t addr)
589 967f97fa aliguori
{
590 967f97fa aliguori
    VirtIODevice *vdev = opaque;
591 967f97fa aliguori
    uint8_t val;
592 967f97fa aliguori
593 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
594 967f97fa aliguori
595 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
596 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
597 967f97fa aliguori
        return (uint32_t)-1;
598 967f97fa aliguori
599 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
600 967f97fa aliguori
    return val;
601 967f97fa aliguori
}
602 967f97fa aliguori
603 967f97fa aliguori
static uint32_t virtio_config_readw(void *opaque, uint32_t addr)
604 967f97fa aliguori
{
605 967f97fa aliguori
    VirtIODevice *vdev = opaque;
606 967f97fa aliguori
    uint16_t val;
607 967f97fa aliguori
608 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
609 967f97fa aliguori
610 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
611 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
612 967f97fa aliguori
        return (uint32_t)-1;
613 967f97fa aliguori
614 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
615 967f97fa aliguori
    return val;
616 967f97fa aliguori
}
617 967f97fa aliguori
618 967f97fa aliguori
static uint32_t virtio_config_readl(void *opaque, uint32_t addr)
619 967f97fa aliguori
{
620 967f97fa aliguori
    VirtIODevice *vdev = opaque;
621 967f97fa aliguori
    uint32_t val;
622 967f97fa aliguori
623 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
624 967f97fa aliguori
625 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
626 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
627 967f97fa aliguori
        return (uint32_t)-1;
628 967f97fa aliguori
629 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
630 967f97fa aliguori
    return val;
631 967f97fa aliguori
}
632 967f97fa aliguori
633 967f97fa aliguori
static void virtio_config_writeb(void *opaque, uint32_t addr, uint32_t data)
634 967f97fa aliguori
{
635 967f97fa aliguori
    VirtIODevice *vdev = opaque;
636 967f97fa aliguori
    uint8_t val = data;
637 967f97fa aliguori
638 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
639 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
640 967f97fa aliguori
        return;
641 967f97fa aliguori
642 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
643 967f97fa aliguori
644 967f97fa aliguori
    if (vdev->set_config)
645 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
646 967f97fa aliguori
}
647 967f97fa aliguori
648 967f97fa aliguori
static void virtio_config_writew(void *opaque, uint32_t addr, uint32_t data)
649 967f97fa aliguori
{
650 967f97fa aliguori
    VirtIODevice *vdev = opaque;
651 967f97fa aliguori
    uint16_t val = data;
652 967f97fa aliguori
653 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
654 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
655 967f97fa aliguori
        return;
656 967f97fa aliguori
657 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
658 967f97fa aliguori
659 967f97fa aliguori
    if (vdev->set_config)
660 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
661 967f97fa aliguori
}
662 967f97fa aliguori
663 967f97fa aliguori
static void virtio_config_writel(void *opaque, uint32_t addr, uint32_t data)
664 967f97fa aliguori
{
665 967f97fa aliguori
    VirtIODevice *vdev = opaque;
666 967f97fa aliguori
    uint32_t val = data;
667 967f97fa aliguori
668 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
669 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
670 967f97fa aliguori
        return;
671 967f97fa aliguori
672 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
673 967f97fa aliguori
674 967f97fa aliguori
    if (vdev->set_config)
675 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
676 967f97fa aliguori
}
677 967f97fa aliguori
678 967f97fa aliguori
static void virtio_map(PCIDevice *pci_dev, int region_num,
679 967f97fa aliguori
                       uint32_t addr, uint32_t size, int type)
680 967f97fa aliguori
{
681 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(pci_dev);
682 967f97fa aliguori
    int i;
683 967f97fa aliguori
684 967f97fa aliguori
    vdev->addr = addr;
685 967f97fa aliguori
    for (i = 0; i < 3; i++) {
686 967f97fa aliguori
        register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
687 967f97fa aliguori
        register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
688 967f97fa aliguori
    }
689 967f97fa aliguori
690 967f97fa aliguori
    if (vdev->config_len) {
691 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 1,
692 967f97fa aliguori
                              virtio_config_writeb, vdev);
693 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 2,
694 967f97fa aliguori
                              virtio_config_writew, vdev);
695 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 4,
696 967f97fa aliguori
                              virtio_config_writel, vdev);
697 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 1,
698 967f97fa aliguori
                             virtio_config_readb, vdev);
699 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 2,
700 967f97fa aliguori
                             virtio_config_readw, vdev);
701 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 4,
702 967f97fa aliguori
                             virtio_config_readl, vdev);
703 967f97fa aliguori
704 967f97fa aliguori
        vdev->get_config(vdev, vdev->config);
705 967f97fa aliguori
    }
706 967f97fa aliguori
}
707 967f97fa aliguori
708 967f97fa aliguori
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
709 967f97fa aliguori
                            void (*handle_output)(VirtIODevice *, VirtQueue *))
710 967f97fa aliguori
{
711 967f97fa aliguori
    int i;
712 967f97fa aliguori
713 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
714 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
715 967f97fa aliguori
            break;
716 967f97fa aliguori
    }
717 967f97fa aliguori
718 967f97fa aliguori
    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
719 967f97fa aliguori
        abort();
720 967f97fa aliguori
721 967f97fa aliguori
    vdev->vq[i].vring.num = queue_size;
722 967f97fa aliguori
    vdev->vq[i].handle_output = handle_output;
723 967f97fa aliguori
724 967f97fa aliguori
    return &vdev->vq[i];
725 967f97fa aliguori
}
726 967f97fa aliguori
727 967f97fa aliguori
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
728 967f97fa aliguori
{
729 967f97fa aliguori
    /* Always notify when queue is empty */
730 967f97fa aliguori
    if ((vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx) &&
731 967f97fa aliguori
        (vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT))
732 967f97fa aliguori
        return;
733 967f97fa aliguori
734 967f97fa aliguori
    vdev->isr |= 0x01;
735 967f97fa aliguori
    virtio_update_irq(vdev);
736 967f97fa aliguori
}
737 967f97fa aliguori
738 967f97fa aliguori
void virtio_notify_config(VirtIODevice *vdev)
739 967f97fa aliguori
{
740 7625162c aliguori
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
741 7625162c aliguori
        return;
742 7625162c aliguori
743 967f97fa aliguori
    vdev->isr |= 0x03;
744 967f97fa aliguori
    virtio_update_irq(vdev);
745 967f97fa aliguori
}
746 967f97fa aliguori
747 967f97fa aliguori
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
748 967f97fa aliguori
{
749 967f97fa aliguori
    int i;
750 967f97fa aliguori
751 967f97fa aliguori
    pci_device_save(&vdev->pci_dev, f);
752 967f97fa aliguori
753 967f97fa aliguori
    qemu_put_be32s(f, &vdev->addr);
754 967f97fa aliguori
    qemu_put_8s(f, &vdev->status);
755 967f97fa aliguori
    qemu_put_8s(f, &vdev->isr);
756 967f97fa aliguori
    qemu_put_be16s(f, &vdev->queue_sel);
757 967f97fa aliguori
    qemu_put_be32s(f, &vdev->features);
758 967f97fa aliguori
    qemu_put_be32(f, vdev->config_len);
759 967f97fa aliguori
    qemu_put_buffer(f, vdev->config, vdev->config_len);
760 967f97fa aliguori
761 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
762 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
763 967f97fa aliguori
            break;
764 967f97fa aliguori
    }
765 967f97fa aliguori
766 967f97fa aliguori
    qemu_put_be32(f, i);
767 967f97fa aliguori
768 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
769 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
770 967f97fa aliguori
            break;
771 967f97fa aliguori
772 967f97fa aliguori
        qemu_put_be32(f, vdev->vq[i].vring.num);
773 967f97fa aliguori
        qemu_put_be32s(f, &vdev->vq[i].pfn);
774 967f97fa aliguori
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
775 967f97fa aliguori
    }
776 967f97fa aliguori
}
777 967f97fa aliguori
778 967f97fa aliguori
void virtio_load(VirtIODevice *vdev, QEMUFile *f)
779 967f97fa aliguori
{
780 967f97fa aliguori
    int num, i;
781 967f97fa aliguori
782 967f97fa aliguori
    pci_device_load(&vdev->pci_dev, f);
783 967f97fa aliguori
784 967f97fa aliguori
    qemu_get_be32s(f, &vdev->addr);
785 967f97fa aliguori
    qemu_get_8s(f, &vdev->status);
786 967f97fa aliguori
    qemu_get_8s(f, &vdev->isr);
787 967f97fa aliguori
    qemu_get_be16s(f, &vdev->queue_sel);
788 967f97fa aliguori
    qemu_get_be32s(f, &vdev->features);
789 967f97fa aliguori
    vdev->config_len = qemu_get_be32(f);
790 967f97fa aliguori
    qemu_get_buffer(f, vdev->config, vdev->config_len);
791 967f97fa aliguori
792 967f97fa aliguori
    num = qemu_get_be32(f);
793 967f97fa aliguori
794 967f97fa aliguori
    for (i = 0; i < num; i++) {
795 967f97fa aliguori
        vdev->vq[i].vring.num = qemu_get_be32(f);
796 967f97fa aliguori
        qemu_get_be32s(f, &vdev->vq[i].pfn);
797 967f97fa aliguori
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
798 967f97fa aliguori
799 967f97fa aliguori
        if (vdev->vq[i].pfn) {
800 967f97fa aliguori
            target_phys_addr_t pa;
801 967f97fa aliguori
802 f46f15bc aliguori
            pa = (ram_addr_t)vdev->vq[i].pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
803 967f97fa aliguori
            virtqueue_init(&vdev->vq[i], pa);
804 967f97fa aliguori
        }
805 967f97fa aliguori
    }
806 967f97fa aliguori
807 967f97fa aliguori
    virtio_update_irq(vdev);
808 967f97fa aliguori
}
809 967f97fa aliguori
810 967f97fa aliguori
VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
811 967f97fa aliguori
                              uint16_t vendor, uint16_t device,
812 967f97fa aliguori
                              uint16_t subvendor, uint16_t subdevice,
813 173a543b blueswir1
                              uint16_t class_code, uint8_t pif,
814 173a543b blueswir1
                              size_t config_size, size_t struct_size)
815 967f97fa aliguori
{
816 967f97fa aliguori
    VirtIODevice *vdev;
817 967f97fa aliguori
    PCIDevice *pci_dev;
818 967f97fa aliguori
    uint8_t *config;
819 967f97fa aliguori
    uint32_t size;
820 967f97fa aliguori
821 967f97fa aliguori
    pci_dev = pci_register_device(bus, name, struct_size,
822 967f97fa aliguori
                                  -1, NULL, NULL);
823 967f97fa aliguori
    if (!pci_dev)
824 967f97fa aliguori
        return NULL;
825 967f97fa aliguori
826 967f97fa aliguori
    vdev = to_virtio_device(pci_dev);
827 967f97fa aliguori
828 967f97fa aliguori
    vdev->status = 0;
829 967f97fa aliguori
    vdev->isr = 0;
830 967f97fa aliguori
    vdev->queue_sel = 0;
831 967f97fa aliguori
    vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
832 967f97fa aliguori
833 967f97fa aliguori
    config = pci_dev->config;
834 deb54399 aliguori
    pci_config_set_vendor_id(config, vendor);
835 deb54399 aliguori
    pci_config_set_device_id(config, device);
836 967f97fa aliguori
837 967f97fa aliguori
    config[0x08] = VIRTIO_PCI_ABI_VERSION;
838 967f97fa aliguori
839 967f97fa aliguori
    config[0x09] = pif;
840 173a543b blueswir1
    pci_config_set_class(config, class_code);
841 967f97fa aliguori
    config[0x0e] = 0x00;
842 967f97fa aliguori
843 967f97fa aliguori
    config[0x2c] = subvendor & 0xFF;
844 967f97fa aliguori
    config[0x2d] = (subvendor >> 8) & 0xFF;
845 967f97fa aliguori
    config[0x2e] = subdevice & 0xFF;
846 967f97fa aliguori
    config[0x2f] = (subdevice >> 8) & 0xFF;
847 967f97fa aliguori
848 967f97fa aliguori
    config[0x3d] = 1;
849 967f97fa aliguori
850 967f97fa aliguori
    vdev->name = name;
851 967f97fa aliguori
    vdev->config_len = config_size;
852 967f97fa aliguori
    if (vdev->config_len)
853 967f97fa aliguori
        vdev->config = qemu_mallocz(config_size);
854 967f97fa aliguori
    else
855 967f97fa aliguori
        vdev->config = NULL;
856 967f97fa aliguori
857 967f97fa aliguori
    size = 20 + config_size;
858 967f97fa aliguori
    if (size & (size-1))
859 ad46db9a blueswir1
        size = 1 << qemu_fls(size);
860 967f97fa aliguori
861 967f97fa aliguori
    pci_register_io_region(pci_dev, 0, size, PCI_ADDRESS_SPACE_IO,
862 967f97fa aliguori
                           virtio_map);
863 967f97fa aliguori
    qemu_register_reset(virtio_reset, vdev);
864 967f97fa aliguori
865 967f97fa aliguori
    return vdev;
866 967f97fa aliguori
}