Statistics
| Branch: | Revision:

root / hw / virtio.c @ 10c144e2

History | View | Annotate | Download (22.8 kB)

1 967f97fa aliguori
/*
2 967f97fa aliguori
 * Virtio Support
3 967f97fa aliguori
 *
4 967f97fa aliguori
 * Copyright IBM, Corp. 2007
5 967f97fa aliguori
 *
6 967f97fa aliguori
 * Authors:
7 967f97fa aliguori
 *  Anthony Liguori   <aliguori@us.ibm.com>
8 967f97fa aliguori
 *
9 967f97fa aliguori
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10 967f97fa aliguori
 * the COPYING file in the top-level directory.
11 967f97fa aliguori
 *
12 967f97fa aliguori
 */
13 967f97fa aliguori
14 967f97fa aliguori
#include <inttypes.h>
15 967f97fa aliguori
16 967f97fa aliguori
#include "virtio.h"
17 967f97fa aliguori
#include "sysemu.h"
18 967f97fa aliguori
19 967f97fa aliguori
//#define VIRTIO_ZERO_COPY
20 967f97fa aliguori
21 967f97fa aliguori
/* from Linux's linux/virtio_pci.h */
22 967f97fa aliguori
23 967f97fa aliguori
/* A 32-bit r/o bitmask of the features supported by the host */
24 967f97fa aliguori
#define VIRTIO_PCI_HOST_FEATURES        0
25 967f97fa aliguori
26 967f97fa aliguori
/* A 32-bit r/w bitmask of features activated by the guest */
27 967f97fa aliguori
#define VIRTIO_PCI_GUEST_FEATURES       4
28 967f97fa aliguori
29 967f97fa aliguori
/* A 32-bit r/w PFN for the currently selected queue */
30 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_PFN            8
31 967f97fa aliguori
32 967f97fa aliguori
/* A 16-bit r/o queue size for the currently selected queue */
33 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_NUM            12
34 967f97fa aliguori
35 967f97fa aliguori
/* A 16-bit r/w queue selector */
36 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_SEL            14
37 967f97fa aliguori
38 967f97fa aliguori
/* A 16-bit r/w queue notifier */
39 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_NOTIFY         16
40 967f97fa aliguori
41 967f97fa aliguori
/* An 8-bit device status register.  */
42 967f97fa aliguori
#define VIRTIO_PCI_STATUS               18
43 967f97fa aliguori
44 967f97fa aliguori
/* An 8-bit r/o interrupt status register.  Reading the value will return the
45 967f97fa aliguori
 * current contents of the ISR and will also clear it.  This is effectively
46 967f97fa aliguori
 * a read-and-acknowledge. */
47 967f97fa aliguori
#define VIRTIO_PCI_ISR                  19
48 967f97fa aliguori
49 967f97fa aliguori
#define VIRTIO_PCI_CONFIG               20
50 967f97fa aliguori
51 967f97fa aliguori
/* Virtio ABI version, if we increment this, we break the guest driver. */
52 967f97fa aliguori
#define VIRTIO_PCI_ABI_VERSION          0
53 967f97fa aliguori
54 f46f15bc aliguori
/* How many bits to shift physical queue address written to QUEUE_PFN.
55 f46f15bc aliguori
 * 12 is historical, and due to x86 page size. */
56 f46f15bc aliguori
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT    12
57 f46f15bc aliguori
58 f46f15bc aliguori
/* The alignment to use between consumer and producer parts of vring.
59 f46f15bc aliguori
 * x86 pagesize again. */
60 f46f15bc aliguori
#define VIRTIO_PCI_VRING_ALIGN         4096
61 f46f15bc aliguori
62 967f97fa aliguori
/* QEMU doesn't strictly need write barriers since everything runs in
63 967f97fa aliguori
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
64 967f97fa aliguori
 * KVM or if kqemu gets SMP support.
65 967f97fa aliguori
 */
66 967f97fa aliguori
#define wmb() do { } while (0)
67 967f97fa aliguori
68 967f97fa aliguori
typedef struct VRingDesc
69 967f97fa aliguori
{
70 967f97fa aliguori
    uint64_t addr;
71 967f97fa aliguori
    uint32_t len;
72 967f97fa aliguori
    uint16_t flags;
73 967f97fa aliguori
    uint16_t next;
74 967f97fa aliguori
} VRingDesc;
75 967f97fa aliguori
76 967f97fa aliguori
typedef struct VRingAvail
77 967f97fa aliguori
{
78 967f97fa aliguori
    uint16_t flags;
79 967f97fa aliguori
    uint16_t idx;
80 967f97fa aliguori
    uint16_t ring[0];
81 967f97fa aliguori
} VRingAvail;
82 967f97fa aliguori
83 967f97fa aliguori
typedef struct VRingUsedElem
84 967f97fa aliguori
{
85 967f97fa aliguori
    uint32_t id;
86 967f97fa aliguori
    uint32_t len;
87 967f97fa aliguori
} VRingUsedElem;
88 967f97fa aliguori
89 967f97fa aliguori
typedef struct VRingUsed
90 967f97fa aliguori
{
91 967f97fa aliguori
    uint16_t flags;
92 967f97fa aliguori
    uint16_t idx;
93 967f97fa aliguori
    VRingUsedElem ring[0];
94 967f97fa aliguori
} VRingUsed;
95 967f97fa aliguori
96 967f97fa aliguori
typedef struct VRing
97 967f97fa aliguori
{
98 967f97fa aliguori
    unsigned int num;
99 967f97fa aliguori
    target_phys_addr_t desc;
100 967f97fa aliguori
    target_phys_addr_t avail;
101 967f97fa aliguori
    target_phys_addr_t used;
102 967f97fa aliguori
} VRing;
103 967f97fa aliguori
104 967f97fa aliguori
struct VirtQueue
105 967f97fa aliguori
{
106 967f97fa aliguori
    VRing vring;
107 967f97fa aliguori
    uint32_t pfn;
108 967f97fa aliguori
    uint16_t last_avail_idx;
109 967f97fa aliguori
    int inuse;
110 967f97fa aliguori
    void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
111 967f97fa aliguori
};
112 967f97fa aliguori
113 967f97fa aliguori
#define VIRTIO_PCI_QUEUE_MAX        16
114 967f97fa aliguori
115 967f97fa aliguori
/* virt queue functions */
116 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
117 967f97fa aliguori
static void *virtio_map_gpa(target_phys_addr_t addr, size_t size)
118 967f97fa aliguori
{
119 967f97fa aliguori
    ram_addr_t off;
120 967f97fa aliguori
    target_phys_addr_t addr1;
121 967f97fa aliguori
122 967f97fa aliguori
    off = cpu_get_physical_page_desc(addr);
123 967f97fa aliguori
    if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
124 967f97fa aliguori
        fprintf(stderr, "virtio DMA to IO ram\n");
125 967f97fa aliguori
        exit(1);
126 967f97fa aliguori
    }
127 967f97fa aliguori
128 967f97fa aliguori
    off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
129 967f97fa aliguori
130 967f97fa aliguori
    for (addr1 = addr + TARGET_PAGE_SIZE;
131 967f97fa aliguori
         addr1 < TARGET_PAGE_ALIGN(addr + size);
132 967f97fa aliguori
         addr1 += TARGET_PAGE_SIZE) {
133 967f97fa aliguori
        ram_addr_t off1;
134 967f97fa aliguori
135 967f97fa aliguori
        off1 = cpu_get_physical_page_desc(addr1);
136 967f97fa aliguori
        if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
137 967f97fa aliguori
            fprintf(stderr, "virtio DMA to IO ram\n");
138 967f97fa aliguori
            exit(1);
139 967f97fa aliguori
        }
140 967f97fa aliguori
141 967f97fa aliguori
        off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK);
142 967f97fa aliguori
143 967f97fa aliguori
        if (off1 != (off + (addr1 - addr))) {
144 967f97fa aliguori
            fprintf(stderr, "discontigous virtio memory\n");
145 967f97fa aliguori
            exit(1);
146 967f97fa aliguori
        }
147 967f97fa aliguori
    }
148 967f97fa aliguori
149 967f97fa aliguori
    return phys_ram_base + off;
150 967f97fa aliguori
}
151 967f97fa aliguori
#endif
152 967f97fa aliguori
153 967f97fa aliguori
static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa)
154 967f97fa aliguori
{
155 967f97fa aliguori
    vq->vring.desc = pa;
156 967f97fa aliguori
    vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
157 f46f15bc aliguori
    vq->vring.used = vring_align(vq->vring.avail +
158 f46f15bc aliguori
                                 offsetof(VRingAvail, ring[vq->vring.num]),
159 f46f15bc aliguori
                                 VIRTIO_PCI_VRING_ALIGN);
160 967f97fa aliguori
}
161 967f97fa aliguori
162 967f97fa aliguori
static inline uint64_t vring_desc_addr(VirtQueue *vq, int i)
163 967f97fa aliguori
{
164 967f97fa aliguori
    target_phys_addr_t pa;
165 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
166 967f97fa aliguori
    return ldq_phys(pa);
167 967f97fa aliguori
}
168 967f97fa aliguori
169 967f97fa aliguori
static inline uint32_t vring_desc_len(VirtQueue *vq, int i)
170 967f97fa aliguori
{
171 967f97fa aliguori
    target_phys_addr_t pa;
172 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
173 967f97fa aliguori
    return ldl_phys(pa);
174 967f97fa aliguori
}
175 967f97fa aliguori
176 967f97fa aliguori
static inline uint16_t vring_desc_flags(VirtQueue *vq, int i)
177 967f97fa aliguori
{
178 967f97fa aliguori
    target_phys_addr_t pa;
179 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
180 967f97fa aliguori
    return lduw_phys(pa);
181 967f97fa aliguori
}
182 967f97fa aliguori
183 967f97fa aliguori
static inline uint16_t vring_desc_next(VirtQueue *vq, int i)
184 967f97fa aliguori
{
185 967f97fa aliguori
    target_phys_addr_t pa;
186 967f97fa aliguori
    pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
187 967f97fa aliguori
    return lduw_phys(pa);
188 967f97fa aliguori
}
189 967f97fa aliguori
190 967f97fa aliguori
static inline uint16_t vring_avail_flags(VirtQueue *vq)
191 967f97fa aliguori
{
192 967f97fa aliguori
    target_phys_addr_t pa;
193 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
194 967f97fa aliguori
    return lduw_phys(pa);
195 967f97fa aliguori
}
196 967f97fa aliguori
197 967f97fa aliguori
static inline uint16_t vring_avail_idx(VirtQueue *vq)
198 967f97fa aliguori
{
199 967f97fa aliguori
    target_phys_addr_t pa;
200 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
201 967f97fa aliguori
    return lduw_phys(pa);
202 967f97fa aliguori
}
203 967f97fa aliguori
204 967f97fa aliguori
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
205 967f97fa aliguori
{
206 967f97fa aliguori
    target_phys_addr_t pa;
207 967f97fa aliguori
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
208 967f97fa aliguori
    return lduw_phys(pa);
209 967f97fa aliguori
}
210 967f97fa aliguori
211 967f97fa aliguori
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
212 967f97fa aliguori
{
213 967f97fa aliguori
    target_phys_addr_t pa;
214 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
215 967f97fa aliguori
    stl_phys(pa, val);
216 967f97fa aliguori
}
217 967f97fa aliguori
218 967f97fa aliguori
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
219 967f97fa aliguori
{
220 967f97fa aliguori
    target_phys_addr_t pa;
221 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
222 967f97fa aliguori
    stl_phys(pa, val);
223 967f97fa aliguori
}
224 967f97fa aliguori
225 967f97fa aliguori
static uint16_t vring_used_idx(VirtQueue *vq)
226 967f97fa aliguori
{
227 967f97fa aliguori
    target_phys_addr_t pa;
228 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, idx);
229 967f97fa aliguori
    return lduw_phys(pa);
230 967f97fa aliguori
}
231 967f97fa aliguori
232 967f97fa aliguori
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
233 967f97fa aliguori
{
234 967f97fa aliguori
    target_phys_addr_t pa;
235 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, idx);
236 967f97fa aliguori
    stw_phys(pa, vring_used_idx(vq) + val);
237 967f97fa aliguori
}
238 967f97fa aliguori
239 967f97fa aliguori
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
240 967f97fa aliguori
{
241 967f97fa aliguori
    target_phys_addr_t pa;
242 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, flags);
243 967f97fa aliguori
    stw_phys(pa, lduw_phys(pa) | mask);
244 967f97fa aliguori
}
245 967f97fa aliguori
246 967f97fa aliguori
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
247 967f97fa aliguori
{
248 967f97fa aliguori
    target_phys_addr_t pa;
249 967f97fa aliguori
    pa = vq->vring.used + offsetof(VRingUsed, flags);
250 967f97fa aliguori
    stw_phys(pa, lduw_phys(pa) & ~mask);
251 967f97fa aliguori
}
252 967f97fa aliguori
253 967f97fa aliguori
void virtio_queue_set_notification(VirtQueue *vq, int enable)
254 967f97fa aliguori
{
255 967f97fa aliguori
    if (enable)
256 967f97fa aliguori
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
257 967f97fa aliguori
    else
258 967f97fa aliguori
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
259 967f97fa aliguori
}
260 967f97fa aliguori
261 967f97fa aliguori
int virtio_queue_ready(VirtQueue *vq)
262 967f97fa aliguori
{
263 967f97fa aliguori
    return vq->vring.avail != 0;
264 967f97fa aliguori
}
265 967f97fa aliguori
266 967f97fa aliguori
int virtio_queue_empty(VirtQueue *vq)
267 967f97fa aliguori
{
268 967f97fa aliguori
    return vring_avail_idx(vq) == vq->last_avail_idx;
269 967f97fa aliguori
}
270 967f97fa aliguori
271 967f97fa aliguori
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
272 967f97fa aliguori
                    unsigned int len, unsigned int idx)
273 967f97fa aliguori
{
274 967f97fa aliguori
    unsigned int offset;
275 967f97fa aliguori
    int i;
276 967f97fa aliguori
277 967f97fa aliguori
#ifndef VIRTIO_ZERO_COPY
278 967f97fa aliguori
    for (i = 0; i < elem->out_num; i++)
279 967f97fa aliguori
        qemu_free(elem->out_sg[i].iov_base);
280 967f97fa aliguori
#endif
281 967f97fa aliguori
282 967f97fa aliguori
    offset = 0;
283 967f97fa aliguori
    for (i = 0; i < elem->in_num; i++) {
284 967f97fa aliguori
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
285 967f97fa aliguori
286 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
287 967f97fa aliguori
        if (size) {
288 967f97fa aliguori
            ram_addr_t addr = (uint8_t *)elem->in_sg[i].iov_base - phys_ram_base;
289 967f97fa aliguori
            ram_addr_t off;
290 967f97fa aliguori
291 967f97fa aliguori
            for (off = 0; off < size; off += TARGET_PAGE_SIZE)
292 967f97fa aliguori
                cpu_physical_memory_set_dirty(addr + off);
293 967f97fa aliguori
        }
294 967f97fa aliguori
#else
295 967f97fa aliguori
        if (size)
296 967f97fa aliguori
            cpu_physical_memory_write(elem->in_addr[i],
297 967f97fa aliguori
                                      elem->in_sg[i].iov_base,
298 967f97fa aliguori
                                      size);
299 967f97fa aliguori
300 967f97fa aliguori
        qemu_free(elem->in_sg[i].iov_base);
301 967f97fa aliguori
#endif
302 967f97fa aliguori
        
303 967f97fa aliguori
        offset += size;
304 967f97fa aliguori
    }
305 967f97fa aliguori
306 967f97fa aliguori
    idx = (idx + vring_used_idx(vq)) % vq->vring.num;
307 967f97fa aliguori
308 967f97fa aliguori
    /* Get a pointer to the next entry in the used ring. */
309 967f97fa aliguori
    vring_used_ring_id(vq, idx, elem->index);
310 967f97fa aliguori
    vring_used_ring_len(vq, idx, len);
311 967f97fa aliguori
}
312 967f97fa aliguori
313 967f97fa aliguori
void virtqueue_flush(VirtQueue *vq, unsigned int count)
314 967f97fa aliguori
{
315 967f97fa aliguori
    /* Make sure buffer is written before we update index. */
316 967f97fa aliguori
    wmb();
317 967f97fa aliguori
    vring_used_idx_increment(vq, count);
318 967f97fa aliguori
    vq->inuse -= count;
319 967f97fa aliguori
}
320 967f97fa aliguori
321 967f97fa aliguori
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
322 967f97fa aliguori
                    unsigned int len)
323 967f97fa aliguori
{
324 967f97fa aliguori
    virtqueue_fill(vq, elem, len, 0);
325 967f97fa aliguori
    virtqueue_flush(vq, 1);
326 967f97fa aliguori
}
327 967f97fa aliguori
328 967f97fa aliguori
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
329 967f97fa aliguori
{
330 967f97fa aliguori
    uint16_t num_heads = vring_avail_idx(vq) - idx;
331 967f97fa aliguori
332 967f97fa aliguori
    /* Check it isn't doing very strange things with descriptor numbers. */
333 bb6834cf aliguori
    if (num_heads > vq->vring.num) {
334 bb6834cf aliguori
        fprintf(stderr, "Guest moved used index from %u to %u",
335 bb6834cf aliguori
                idx, vring_avail_idx(vq));
336 bb6834cf aliguori
        exit(1);
337 bb6834cf aliguori
    }
338 967f97fa aliguori
339 967f97fa aliguori
    return num_heads;
340 967f97fa aliguori
}
341 967f97fa aliguori
342 967f97fa aliguori
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
343 967f97fa aliguori
{
344 967f97fa aliguori
    unsigned int head;
345 967f97fa aliguori
346 967f97fa aliguori
    /* Grab the next descriptor number they're advertising, and increment
347 967f97fa aliguori
     * the index we've seen. */
348 967f97fa aliguori
    head = vring_avail_ring(vq, idx % vq->vring.num);
349 967f97fa aliguori
350 967f97fa aliguori
    /* If their number is silly, that's a fatal mistake. */
351 bb6834cf aliguori
    if (head >= vq->vring.num) {
352 bb6834cf aliguori
        fprintf(stderr, "Guest says index %u is available", head);
353 bb6834cf aliguori
        exit(1);
354 bb6834cf aliguori
    }
355 967f97fa aliguori
356 967f97fa aliguori
    return head;
357 967f97fa aliguori
}
358 967f97fa aliguori
359 967f97fa aliguori
static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
360 967f97fa aliguori
{
361 967f97fa aliguori
    unsigned int next;
362 967f97fa aliguori
363 967f97fa aliguori
    /* If this descriptor says it doesn't chain, we're done. */
364 967f97fa aliguori
    if (!(vring_desc_flags(vq, i) & VRING_DESC_F_NEXT))
365 967f97fa aliguori
        return vq->vring.num;
366 967f97fa aliguori
367 967f97fa aliguori
    /* Check they're not leading us off end of descriptors. */
368 967f97fa aliguori
    next = vring_desc_next(vq, i);
369 967f97fa aliguori
    /* Make sure compiler knows to grab that: we don't want it changing! */
370 967f97fa aliguori
    wmb();
371 967f97fa aliguori
372 bb6834cf aliguori
    if (next >= vq->vring.num) {
373 bb6834cf aliguori
        fprintf(stderr, "Desc next is %u", next);
374 bb6834cf aliguori
        exit(1);
375 bb6834cf aliguori
    }
376 967f97fa aliguori
377 967f97fa aliguori
    return next;
378 967f97fa aliguori
}
379 967f97fa aliguori
380 967f97fa aliguori
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
381 967f97fa aliguori
{
382 967f97fa aliguori
    unsigned int idx;
383 967f97fa aliguori
    int num_bufs, in_total, out_total;
384 967f97fa aliguori
385 967f97fa aliguori
    idx = vq->last_avail_idx;
386 967f97fa aliguori
387 967f97fa aliguori
    num_bufs = in_total = out_total = 0;
388 967f97fa aliguori
    while (virtqueue_num_heads(vq, idx)) {
389 967f97fa aliguori
        int i;
390 967f97fa aliguori
391 967f97fa aliguori
        i = virtqueue_get_head(vq, idx++);
392 967f97fa aliguori
        do {
393 967f97fa aliguori
            /* If we've got too many, that implies a descriptor loop. */
394 bb6834cf aliguori
            if (++num_bufs > vq->vring.num) {
395 bb6834cf aliguori
                fprintf(stderr, "Looped descriptor");
396 bb6834cf aliguori
                exit(1);
397 bb6834cf aliguori
            }
398 967f97fa aliguori
399 967f97fa aliguori
            if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
400 967f97fa aliguori
                if (in_bytes > 0 &&
401 967f97fa aliguori
                    (in_total += vring_desc_len(vq, i)) >= in_bytes)
402 967f97fa aliguori
                    return 1;
403 967f97fa aliguori
            } else {
404 967f97fa aliguori
                if (out_bytes > 0 &&
405 967f97fa aliguori
                    (out_total += vring_desc_len(vq, i)) >= out_bytes)
406 967f97fa aliguori
                    return 1;
407 967f97fa aliguori
            }
408 967f97fa aliguori
        } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
409 967f97fa aliguori
    }
410 967f97fa aliguori
411 967f97fa aliguori
    return 0;
412 967f97fa aliguori
}
413 967f97fa aliguori
414 967f97fa aliguori
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
415 967f97fa aliguori
{
416 967f97fa aliguori
    unsigned int i, head;
417 967f97fa aliguori
418 967f97fa aliguori
    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
419 967f97fa aliguori
        return 0;
420 967f97fa aliguori
421 967f97fa aliguori
    /* When we start there are none of either input nor output. */
422 967f97fa aliguori
    elem->out_num = elem->in_num = 0;
423 967f97fa aliguori
424 967f97fa aliguori
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
425 967f97fa aliguori
    do {
426 967f97fa aliguori
        struct iovec *sg;
427 967f97fa aliguori
428 967f97fa aliguori
        if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
429 967f97fa aliguori
            elem->in_addr[elem->in_num] = vring_desc_addr(vq, i);
430 967f97fa aliguori
            sg = &elem->in_sg[elem->in_num++];
431 967f97fa aliguori
        } else
432 967f97fa aliguori
            sg = &elem->out_sg[elem->out_num++];
433 967f97fa aliguori
434 967f97fa aliguori
        /* Grab the first descriptor, and check it's OK. */
435 967f97fa aliguori
        sg->iov_len = vring_desc_len(vq, i);
436 967f97fa aliguori
437 967f97fa aliguori
#ifdef VIRTIO_ZERO_COPY
438 967f97fa aliguori
        sg->iov_base = virtio_map_gpa(vring_desc_addr(vq, i), sg->iov_len);
439 967f97fa aliguori
#else
440 967f97fa aliguori
        /* cap individual scatter element size to prevent unbounded allocations
441 967f97fa aliguori
           of memory from the guest.  Practically speaking, no virtio driver
442 967f97fa aliguori
           will ever pass more than a page in each element.  We set the cap to
443 967f97fa aliguori
           be 2MB in case for some reason a large page makes it way into the
444 967f97fa aliguori
           sg list.  When we implement a zero copy API, this limitation will
445 967f97fa aliguori
           disappear */
446 967f97fa aliguori
        if (sg->iov_len > (2 << 20))
447 967f97fa aliguori
            sg->iov_len = 2 << 20;
448 967f97fa aliguori
449 967f97fa aliguori
        sg->iov_base = qemu_malloc(sg->iov_len);
450 967f97fa aliguori
        if (sg->iov_base && 
451 967f97fa aliguori
            !(vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)) {
452 967f97fa aliguori
            cpu_physical_memory_read(vring_desc_addr(vq, i),
453 967f97fa aliguori
                                     sg->iov_base,
454 967f97fa aliguori
                                     sg->iov_len);
455 967f97fa aliguori
        }
456 967f97fa aliguori
#endif
457 bb6834cf aliguori
        if (sg->iov_base == NULL) {
458 bb6834cf aliguori
            fprintf(stderr, "Invalid mapping\n");
459 bb6834cf aliguori
            exit(1);
460 bb6834cf aliguori
        }
461 967f97fa aliguori
462 967f97fa aliguori
        /* If we've got too many, that implies a descriptor loop. */
463 bb6834cf aliguori
        if ((elem->in_num + elem->out_num) > vq->vring.num) {
464 bb6834cf aliguori
            fprintf(stderr, "Looped descriptor");
465 bb6834cf aliguori
            exit(1);
466 bb6834cf aliguori
        }
467 967f97fa aliguori
    } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
468 967f97fa aliguori
469 967f97fa aliguori
    elem->index = head;
470 967f97fa aliguori
471 967f97fa aliguori
    vq->inuse++;
472 967f97fa aliguori
473 967f97fa aliguori
    return elem->in_num + elem->out_num;
474 967f97fa aliguori
}
475 967f97fa aliguori
476 967f97fa aliguori
/* virtio device */
477 967f97fa aliguori
478 967f97fa aliguori
static VirtIODevice *to_virtio_device(PCIDevice *pci_dev)
479 967f97fa aliguori
{
480 967f97fa aliguori
    return (VirtIODevice *)pci_dev;
481 967f97fa aliguori
}
482 967f97fa aliguori
483 967f97fa aliguori
static void virtio_update_irq(VirtIODevice *vdev)
484 967f97fa aliguori
{
485 967f97fa aliguori
    qemu_set_irq(vdev->pci_dev.irq[0], vdev->isr & 1);
486 967f97fa aliguori
}
487 967f97fa aliguori
488 69d6451c blueswir1
static void virtio_reset(void *opaque)
489 967f97fa aliguori
{
490 967f97fa aliguori
    VirtIODevice *vdev = opaque;
491 967f97fa aliguori
    int i;
492 967f97fa aliguori
493 967f97fa aliguori
    if (vdev->reset)
494 967f97fa aliguori
        vdev->reset(vdev);
495 967f97fa aliguori
496 967f97fa aliguori
    vdev->features = 0;
497 967f97fa aliguori
    vdev->queue_sel = 0;
498 967f97fa aliguori
    vdev->status = 0;
499 967f97fa aliguori
    vdev->isr = 0;
500 967f97fa aliguori
    virtio_update_irq(vdev);
501 967f97fa aliguori
502 967f97fa aliguori
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
503 967f97fa aliguori
        vdev->vq[i].vring.desc = 0;
504 967f97fa aliguori
        vdev->vq[i].vring.avail = 0;
505 967f97fa aliguori
        vdev->vq[i].vring.used = 0;
506 967f97fa aliguori
        vdev->vq[i].last_avail_idx = 0;
507 967f97fa aliguori
        vdev->vq[i].pfn = 0;
508 967f97fa aliguori
    }
509 967f97fa aliguori
}
510 967f97fa aliguori
511 967f97fa aliguori
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
512 967f97fa aliguori
{
513 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(opaque);
514 967f97fa aliguori
    ram_addr_t pa;
515 967f97fa aliguori
516 967f97fa aliguori
    addr -= vdev->addr;
517 967f97fa aliguori
518 967f97fa aliguori
    switch (addr) {
519 967f97fa aliguori
    case VIRTIO_PCI_GUEST_FEATURES:
520 967f97fa aliguori
        if (vdev->set_features)
521 967f97fa aliguori
            vdev->set_features(vdev, val);
522 967f97fa aliguori
        vdev->features = val;
523 967f97fa aliguori
        break;
524 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_PFN:
525 f46f15bc aliguori
        pa = (ram_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
526 967f97fa aliguori
        vdev->vq[vdev->queue_sel].pfn = val;
527 967f97fa aliguori
        if (pa == 0) {
528 967f97fa aliguori
            virtio_reset(vdev);
529 967f97fa aliguori
        } else {
530 967f97fa aliguori
            virtqueue_init(&vdev->vq[vdev->queue_sel], pa);
531 967f97fa aliguori
        }
532 967f97fa aliguori
        break;
533 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_SEL:
534 967f97fa aliguori
        if (val < VIRTIO_PCI_QUEUE_MAX)
535 967f97fa aliguori
            vdev->queue_sel = val;
536 967f97fa aliguori
        break;
537 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_NOTIFY:
538 967f97fa aliguori
        if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
539 967f97fa aliguori
            vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
540 967f97fa aliguori
        break;
541 967f97fa aliguori
    case VIRTIO_PCI_STATUS:
542 967f97fa aliguori
        vdev->status = val & 0xFF;
543 967f97fa aliguori
        if (vdev->status == 0)
544 967f97fa aliguori
            virtio_reset(vdev);
545 967f97fa aliguori
        break;
546 967f97fa aliguori
    }
547 967f97fa aliguori
}
548 967f97fa aliguori
549 967f97fa aliguori
static uint32_t virtio_ioport_read(void *opaque, uint32_t addr)
550 967f97fa aliguori
{
551 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(opaque);
552 967f97fa aliguori
    uint32_t ret = 0xFFFFFFFF;
553 967f97fa aliguori
554 967f97fa aliguori
    addr -= vdev->addr;
555 967f97fa aliguori
556 967f97fa aliguori
    switch (addr) {
557 967f97fa aliguori
    case VIRTIO_PCI_HOST_FEATURES:
558 967f97fa aliguori
        ret = vdev->get_features(vdev);
559 967f97fa aliguori
        ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
560 967f97fa aliguori
        break;
561 967f97fa aliguori
    case VIRTIO_PCI_GUEST_FEATURES:
562 967f97fa aliguori
        ret = vdev->features;
563 967f97fa aliguori
        break;
564 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_PFN:
565 967f97fa aliguori
        ret = vdev->vq[vdev->queue_sel].pfn;
566 967f97fa aliguori
        break;
567 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_NUM:
568 967f97fa aliguori
        ret = vdev->vq[vdev->queue_sel].vring.num;
569 967f97fa aliguori
        break;
570 967f97fa aliguori
    case VIRTIO_PCI_QUEUE_SEL:
571 967f97fa aliguori
        ret = vdev->queue_sel;
572 967f97fa aliguori
        break;
573 967f97fa aliguori
    case VIRTIO_PCI_STATUS:
574 967f97fa aliguori
        ret = vdev->status;
575 967f97fa aliguori
        break;
576 967f97fa aliguori
    case VIRTIO_PCI_ISR:
577 967f97fa aliguori
        /* reading from the ISR also clears it. */
578 967f97fa aliguori
        ret = vdev->isr;
579 967f97fa aliguori
        vdev->isr = 0;
580 967f97fa aliguori
        virtio_update_irq(vdev);
581 967f97fa aliguori
        break;
582 967f97fa aliguori
    default:
583 967f97fa aliguori
        break;
584 967f97fa aliguori
    }
585 967f97fa aliguori
586 967f97fa aliguori
    return ret;
587 967f97fa aliguori
}
588 967f97fa aliguori
589 967f97fa aliguori
static uint32_t virtio_config_readb(void *opaque, uint32_t addr)
590 967f97fa aliguori
{
591 967f97fa aliguori
    VirtIODevice *vdev = opaque;
592 967f97fa aliguori
    uint8_t val;
593 967f97fa aliguori
594 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
595 967f97fa aliguori
596 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
597 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
598 967f97fa aliguori
        return (uint32_t)-1;
599 967f97fa aliguori
600 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
601 967f97fa aliguori
    return val;
602 967f97fa aliguori
}
603 967f97fa aliguori
604 967f97fa aliguori
static uint32_t virtio_config_readw(void *opaque, uint32_t addr)
605 967f97fa aliguori
{
606 967f97fa aliguori
    VirtIODevice *vdev = opaque;
607 967f97fa aliguori
    uint16_t val;
608 967f97fa aliguori
609 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
610 967f97fa aliguori
611 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
612 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
613 967f97fa aliguori
        return (uint32_t)-1;
614 967f97fa aliguori
615 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
616 967f97fa aliguori
    return val;
617 967f97fa aliguori
}
618 967f97fa aliguori
619 967f97fa aliguori
static uint32_t virtio_config_readl(void *opaque, uint32_t addr)
620 967f97fa aliguori
{
621 967f97fa aliguori
    VirtIODevice *vdev = opaque;
622 967f97fa aliguori
    uint32_t val;
623 967f97fa aliguori
624 967f97fa aliguori
    vdev->get_config(vdev, vdev->config);
625 967f97fa aliguori
626 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
627 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
628 967f97fa aliguori
        return (uint32_t)-1;
629 967f97fa aliguori
630 967f97fa aliguori
    memcpy(&val, vdev->config + addr, sizeof(val));
631 967f97fa aliguori
    return val;
632 967f97fa aliguori
}
633 967f97fa aliguori
634 967f97fa aliguori
static void virtio_config_writeb(void *opaque, uint32_t addr, uint32_t data)
635 967f97fa aliguori
{
636 967f97fa aliguori
    VirtIODevice *vdev = opaque;
637 967f97fa aliguori
    uint8_t val = data;
638 967f97fa aliguori
639 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
640 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
641 967f97fa aliguori
        return;
642 967f97fa aliguori
643 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
644 967f97fa aliguori
645 967f97fa aliguori
    if (vdev->set_config)
646 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
647 967f97fa aliguori
}
648 967f97fa aliguori
649 967f97fa aliguori
static void virtio_config_writew(void *opaque, uint32_t addr, uint32_t data)
650 967f97fa aliguori
{
651 967f97fa aliguori
    VirtIODevice *vdev = opaque;
652 967f97fa aliguori
    uint16_t val = data;
653 967f97fa aliguori
654 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
655 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
656 967f97fa aliguori
        return;
657 967f97fa aliguori
658 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
659 967f97fa aliguori
660 967f97fa aliguori
    if (vdev->set_config)
661 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
662 967f97fa aliguori
}
663 967f97fa aliguori
664 967f97fa aliguori
static void virtio_config_writel(void *opaque, uint32_t addr, uint32_t data)
665 967f97fa aliguori
{
666 967f97fa aliguori
    VirtIODevice *vdev = opaque;
667 967f97fa aliguori
    uint32_t val = data;
668 967f97fa aliguori
669 967f97fa aliguori
    addr -= vdev->addr + VIRTIO_PCI_CONFIG;
670 967f97fa aliguori
    if (addr > (vdev->config_len - sizeof(val)))
671 967f97fa aliguori
        return;
672 967f97fa aliguori
673 967f97fa aliguori
    memcpy(vdev->config + addr, &val, sizeof(val));
674 967f97fa aliguori
675 967f97fa aliguori
    if (vdev->set_config)
676 967f97fa aliguori
        vdev->set_config(vdev, vdev->config);
677 967f97fa aliguori
}
678 967f97fa aliguori
679 967f97fa aliguori
static void virtio_map(PCIDevice *pci_dev, int region_num,
680 967f97fa aliguori
                       uint32_t addr, uint32_t size, int type)
681 967f97fa aliguori
{
682 967f97fa aliguori
    VirtIODevice *vdev = to_virtio_device(pci_dev);
683 967f97fa aliguori
    int i;
684 967f97fa aliguori
685 967f97fa aliguori
    vdev->addr = addr;
686 967f97fa aliguori
    for (i = 0; i < 3; i++) {
687 967f97fa aliguori
        register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
688 967f97fa aliguori
        register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
689 967f97fa aliguori
    }
690 967f97fa aliguori
691 967f97fa aliguori
    if (vdev->config_len) {
692 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 1,
693 967f97fa aliguori
                              virtio_config_writeb, vdev);
694 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 2,
695 967f97fa aliguori
                              virtio_config_writew, vdev);
696 967f97fa aliguori
        register_ioport_write(addr + 20, vdev->config_len, 4,
697 967f97fa aliguori
                              virtio_config_writel, vdev);
698 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 1,
699 967f97fa aliguori
                             virtio_config_readb, vdev);
700 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 2,
701 967f97fa aliguori
                             virtio_config_readw, vdev);
702 967f97fa aliguori
        register_ioport_read(addr + 20, vdev->config_len, 4,
703 967f97fa aliguori
                             virtio_config_readl, vdev);
704 967f97fa aliguori
705 967f97fa aliguori
        vdev->get_config(vdev, vdev->config);
706 967f97fa aliguori
    }
707 967f97fa aliguori
}
708 967f97fa aliguori
709 967f97fa aliguori
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
710 967f97fa aliguori
                            void (*handle_output)(VirtIODevice *, VirtQueue *))
711 967f97fa aliguori
{
712 967f97fa aliguori
    int i;
713 967f97fa aliguori
714 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
715 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
716 967f97fa aliguori
            break;
717 967f97fa aliguori
    }
718 967f97fa aliguori
719 967f97fa aliguori
    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
720 967f97fa aliguori
        abort();
721 967f97fa aliguori
722 967f97fa aliguori
    vdev->vq[i].vring.num = queue_size;
723 967f97fa aliguori
    vdev->vq[i].handle_output = handle_output;
724 967f97fa aliguori
725 967f97fa aliguori
    return &vdev->vq[i];
726 967f97fa aliguori
}
727 967f97fa aliguori
728 967f97fa aliguori
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
729 967f97fa aliguori
{
730 967f97fa aliguori
    /* Always notify when queue is empty */
731 967f97fa aliguori
    if ((vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx) &&
732 967f97fa aliguori
        (vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT))
733 967f97fa aliguori
        return;
734 967f97fa aliguori
735 967f97fa aliguori
    vdev->isr |= 0x01;
736 967f97fa aliguori
    virtio_update_irq(vdev);
737 967f97fa aliguori
}
738 967f97fa aliguori
739 967f97fa aliguori
void virtio_notify_config(VirtIODevice *vdev)
740 967f97fa aliguori
{
741 967f97fa aliguori
    vdev->isr |= 0x03;
742 967f97fa aliguori
    virtio_update_irq(vdev);
743 967f97fa aliguori
}
744 967f97fa aliguori
745 967f97fa aliguori
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
746 967f97fa aliguori
{
747 967f97fa aliguori
    int i;
748 967f97fa aliguori
749 967f97fa aliguori
    pci_device_save(&vdev->pci_dev, f);
750 967f97fa aliguori
751 967f97fa aliguori
    qemu_put_be32s(f, &vdev->addr);
752 967f97fa aliguori
    qemu_put_8s(f, &vdev->status);
753 967f97fa aliguori
    qemu_put_8s(f, &vdev->isr);
754 967f97fa aliguori
    qemu_put_be16s(f, &vdev->queue_sel);
755 967f97fa aliguori
    qemu_put_be32s(f, &vdev->features);
756 967f97fa aliguori
    qemu_put_be32(f, vdev->config_len);
757 967f97fa aliguori
    qemu_put_buffer(f, vdev->config, vdev->config_len);
758 967f97fa aliguori
759 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
760 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
761 967f97fa aliguori
            break;
762 967f97fa aliguori
    }
763 967f97fa aliguori
764 967f97fa aliguori
    qemu_put_be32(f, i);
765 967f97fa aliguori
766 967f97fa aliguori
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
767 967f97fa aliguori
        if (vdev->vq[i].vring.num == 0)
768 967f97fa aliguori
            break;
769 967f97fa aliguori
770 967f97fa aliguori
        qemu_put_be32(f, vdev->vq[i].vring.num);
771 967f97fa aliguori
        qemu_put_be32s(f, &vdev->vq[i].pfn);
772 967f97fa aliguori
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
773 967f97fa aliguori
    }
774 967f97fa aliguori
}
775 967f97fa aliguori
776 967f97fa aliguori
void virtio_load(VirtIODevice *vdev, QEMUFile *f)
777 967f97fa aliguori
{
778 967f97fa aliguori
    int num, i;
779 967f97fa aliguori
780 967f97fa aliguori
    pci_device_load(&vdev->pci_dev, f);
781 967f97fa aliguori
782 967f97fa aliguori
    qemu_get_be32s(f, &vdev->addr);
783 967f97fa aliguori
    qemu_get_8s(f, &vdev->status);
784 967f97fa aliguori
    qemu_get_8s(f, &vdev->isr);
785 967f97fa aliguori
    qemu_get_be16s(f, &vdev->queue_sel);
786 967f97fa aliguori
    qemu_get_be32s(f, &vdev->features);
787 967f97fa aliguori
    vdev->config_len = qemu_get_be32(f);
788 967f97fa aliguori
    qemu_get_buffer(f, vdev->config, vdev->config_len);
789 967f97fa aliguori
790 967f97fa aliguori
    num = qemu_get_be32(f);
791 967f97fa aliguori
792 967f97fa aliguori
    for (i = 0; i < num; i++) {
793 967f97fa aliguori
        vdev->vq[i].vring.num = qemu_get_be32(f);
794 967f97fa aliguori
        qemu_get_be32s(f, &vdev->vq[i].pfn);
795 967f97fa aliguori
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
796 967f97fa aliguori
797 967f97fa aliguori
        if (vdev->vq[i].pfn) {
798 967f97fa aliguori
            target_phys_addr_t pa;
799 967f97fa aliguori
800 f46f15bc aliguori
            pa = (ram_addr_t)vdev->vq[i].pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
801 967f97fa aliguori
            virtqueue_init(&vdev->vq[i], pa);
802 967f97fa aliguori
        }
803 967f97fa aliguori
    }
804 967f97fa aliguori
805 967f97fa aliguori
    virtio_update_irq(vdev);
806 967f97fa aliguori
}
807 967f97fa aliguori
808 967f97fa aliguori
VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
809 967f97fa aliguori
                              uint16_t vendor, uint16_t device,
810 967f97fa aliguori
                              uint16_t subvendor, uint16_t subdevice,
811 967f97fa aliguori
                              uint8_t class_code, uint8_t subclass_code,
812 967f97fa aliguori
                              uint8_t pif, size_t config_size,
813 967f97fa aliguori
                              size_t struct_size)
814 967f97fa aliguori
{
815 967f97fa aliguori
    VirtIODevice *vdev;
816 967f97fa aliguori
    PCIDevice *pci_dev;
817 967f97fa aliguori
    uint8_t *config;
818 967f97fa aliguori
    uint32_t size;
819 967f97fa aliguori
820 967f97fa aliguori
    pci_dev = pci_register_device(bus, name, struct_size,
821 967f97fa aliguori
                                  -1, NULL, NULL);
822 967f97fa aliguori
    if (!pci_dev)
823 967f97fa aliguori
        return NULL;
824 967f97fa aliguori
825 967f97fa aliguori
    vdev = to_virtio_device(pci_dev);
826 967f97fa aliguori
827 967f97fa aliguori
    vdev->status = 0;
828 967f97fa aliguori
    vdev->isr = 0;
829 967f97fa aliguori
    vdev->queue_sel = 0;
830 967f97fa aliguori
    vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
831 967f97fa aliguori
832 967f97fa aliguori
    config = pci_dev->config;
833 967f97fa aliguori
    config[0x00] = vendor & 0xFF;
834 967f97fa aliguori
    config[0x01] = (vendor >> 8) & 0xFF;
835 967f97fa aliguori
    config[0x02] = device & 0xFF;
836 967f97fa aliguori
    config[0x03] = (device >> 8) & 0xFF;
837 967f97fa aliguori
838 967f97fa aliguori
    config[0x08] = VIRTIO_PCI_ABI_VERSION;
839 967f97fa aliguori
840 967f97fa aliguori
    config[0x09] = pif;
841 967f97fa aliguori
    config[0x0a] = subclass_code;
842 967f97fa aliguori
    config[0x0b] = class_code;
843 967f97fa aliguori
    config[0x0e] = 0x00;
844 967f97fa aliguori
845 967f97fa aliguori
    config[0x2c] = subvendor & 0xFF;
846 967f97fa aliguori
    config[0x2d] = (subvendor >> 8) & 0xFF;
847 967f97fa aliguori
    config[0x2e] = subdevice & 0xFF;
848 967f97fa aliguori
    config[0x2f] = (subdevice >> 8) & 0xFF;
849 967f97fa aliguori
850 967f97fa aliguori
    config[0x3d] = 1;
851 967f97fa aliguori
852 967f97fa aliguori
    vdev->name = name;
853 967f97fa aliguori
    vdev->config_len = config_size;
854 967f97fa aliguori
    if (vdev->config_len)
855 967f97fa aliguori
        vdev->config = qemu_mallocz(config_size);
856 967f97fa aliguori
    else
857 967f97fa aliguori
        vdev->config = NULL;
858 967f97fa aliguori
859 967f97fa aliguori
    size = 20 + config_size;
860 967f97fa aliguori
    if (size & (size-1))
861 ad46db9a blueswir1
        size = 1 << qemu_fls(size);
862 967f97fa aliguori
863 967f97fa aliguori
    pci_register_io_region(pci_dev, 0, size, PCI_ADDRESS_SPACE_IO,
864 967f97fa aliguori
                           virtio_map);
865 967f97fa aliguori
    qemu_register_reset(virtio_reset, vdev);
866 967f97fa aliguori
867 967f97fa aliguori
    return vdev;
868 967f97fa aliguori
}