root / hw / virtio.c @ b8193adb
History | View | Annotate | Download (18.4 kB)
1 |
/*
|
---|---|
2 |
* Virtio Support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include <inttypes.h> |
15 |
|
16 |
#include "virtio.h" |
17 |
#include "sysemu.h" |
18 |
|
19 |
/* The alignment to use between consumer and producer parts of vring.
|
20 |
* x86 pagesize again. */
|
21 |
#define VIRTIO_PCI_VRING_ALIGN 4096 |
22 |
|
23 |
/* QEMU doesn't strictly need write barriers since everything runs in
|
24 |
* lock-step. We'll leave the calls to wmb() in though to make it obvious for
|
25 |
* KVM or if kqemu gets SMP support.
|
26 |
*/
|
27 |
#define wmb() do { } while (0) |
28 |
|
29 |
typedef struct VRingDesc |
30 |
{ |
31 |
uint64_t addr; |
32 |
uint32_t len; |
33 |
uint16_t flags; |
34 |
uint16_t next; |
35 |
} VRingDesc; |
36 |
|
37 |
typedef struct VRingAvail |
38 |
{ |
39 |
uint16_t flags; |
40 |
uint16_t idx; |
41 |
uint16_t ring[0];
|
42 |
} VRingAvail; |
43 |
|
44 |
typedef struct VRingUsedElem |
45 |
{ |
46 |
uint32_t id; |
47 |
uint32_t len; |
48 |
} VRingUsedElem; |
49 |
|
50 |
typedef struct VRingUsed |
51 |
{ |
52 |
uint16_t flags; |
53 |
uint16_t idx; |
54 |
VRingUsedElem ring[0];
|
55 |
} VRingUsed; |
56 |
|
57 |
typedef struct VRing |
58 |
{ |
59 |
unsigned int num; |
60 |
target_phys_addr_t desc; |
61 |
target_phys_addr_t avail; |
62 |
target_phys_addr_t used; |
63 |
} VRing; |
64 |
|
65 |
struct VirtQueue
|
66 |
{ |
67 |
VRing vring; |
68 |
target_phys_addr_t pa; |
69 |
uint16_t last_avail_idx; |
70 |
int inuse;
|
71 |
uint16_t vector; |
72 |
void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
|
73 |
}; |
74 |
|
75 |
#define VIRTIO_PCI_QUEUE_MAX 16 |
76 |
|
77 |
/* virt queue functions */
|
78 |
static void virtqueue_init(VirtQueue *vq) |
79 |
{ |
80 |
target_phys_addr_t pa = vq->pa; |
81 |
|
82 |
vq->vring.desc = pa; |
83 |
vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
|
84 |
vq->vring.used = vring_align(vq->vring.avail + |
85 |
offsetof(VRingAvail, ring[vq->vring.num]), |
86 |
VIRTIO_PCI_VRING_ALIGN); |
87 |
} |
88 |
|
89 |
static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i) |
90 |
{ |
91 |
target_phys_addr_t pa; |
92 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
|
93 |
return ldq_phys(pa);
|
94 |
} |
95 |
|
96 |
static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i) |
97 |
{ |
98 |
target_phys_addr_t pa; |
99 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
|
100 |
return ldl_phys(pa);
|
101 |
} |
102 |
|
103 |
static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i) |
104 |
{ |
105 |
target_phys_addr_t pa; |
106 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
|
107 |
return lduw_phys(pa);
|
108 |
} |
109 |
|
110 |
static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i) |
111 |
{ |
112 |
target_phys_addr_t pa; |
113 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
|
114 |
return lduw_phys(pa);
|
115 |
} |
116 |
|
117 |
static inline uint16_t vring_avail_flags(VirtQueue *vq) |
118 |
{ |
119 |
target_phys_addr_t pa; |
120 |
pa = vq->vring.avail + offsetof(VRingAvail, flags); |
121 |
return lduw_phys(pa);
|
122 |
} |
123 |
|
124 |
static inline uint16_t vring_avail_idx(VirtQueue *vq) |
125 |
{ |
126 |
target_phys_addr_t pa; |
127 |
pa = vq->vring.avail + offsetof(VRingAvail, idx); |
128 |
return lduw_phys(pa);
|
129 |
} |
130 |
|
131 |
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) |
132 |
{ |
133 |
target_phys_addr_t pa; |
134 |
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); |
135 |
return lduw_phys(pa);
|
136 |
} |
137 |
|
138 |
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) |
139 |
{ |
140 |
target_phys_addr_t pa; |
141 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); |
142 |
stl_phys(pa, val); |
143 |
} |
144 |
|
145 |
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) |
146 |
{ |
147 |
target_phys_addr_t pa; |
148 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); |
149 |
stl_phys(pa, val); |
150 |
} |
151 |
|
152 |
static uint16_t vring_used_idx(VirtQueue *vq)
|
153 |
{ |
154 |
target_phys_addr_t pa; |
155 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
156 |
return lduw_phys(pa);
|
157 |
} |
158 |
|
159 |
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val) |
160 |
{ |
161 |
target_phys_addr_t pa; |
162 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
163 |
stw_phys(pa, vring_used_idx(vq) + val); |
164 |
} |
165 |
|
166 |
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) |
167 |
{ |
168 |
target_phys_addr_t pa; |
169 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
170 |
stw_phys(pa, lduw_phys(pa) | mask); |
171 |
} |
172 |
|
173 |
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) |
174 |
{ |
175 |
target_phys_addr_t pa; |
176 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
177 |
stw_phys(pa, lduw_phys(pa) & ~mask); |
178 |
} |
179 |
|
180 |
void virtio_queue_set_notification(VirtQueue *vq, int enable) |
181 |
{ |
182 |
if (enable)
|
183 |
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
184 |
else
|
185 |
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
186 |
} |
187 |
|
188 |
int virtio_queue_ready(VirtQueue *vq)
|
189 |
{ |
190 |
return vq->vring.avail != 0; |
191 |
} |
192 |
|
193 |
int virtio_queue_empty(VirtQueue *vq)
|
194 |
{ |
195 |
return vring_avail_idx(vq) == vq->last_avail_idx;
|
196 |
} |
197 |
|
198 |
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, |
199 |
unsigned int len, unsigned int idx) |
200 |
{ |
201 |
unsigned int offset; |
202 |
int i;
|
203 |
|
204 |
offset = 0;
|
205 |
for (i = 0; i < elem->in_num; i++) { |
206 |
size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
207 |
|
208 |
cpu_physical_memory_unmap(elem->in_sg[i].iov_base, |
209 |
elem->in_sg[i].iov_len, |
210 |
1, size);
|
211 |
|
212 |
offset += elem->in_sg[i].iov_len; |
213 |
} |
214 |
|
215 |
for (i = 0; i < elem->out_num; i++) |
216 |
cpu_physical_memory_unmap(elem->out_sg[i].iov_base, |
217 |
elem->out_sg[i].iov_len, |
218 |
0, elem->out_sg[i].iov_len);
|
219 |
|
220 |
idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
221 |
|
222 |
/* Get a pointer to the next entry in the used ring. */
|
223 |
vring_used_ring_id(vq, idx, elem->index); |
224 |
vring_used_ring_len(vq, idx, len); |
225 |
} |
226 |
|
227 |
void virtqueue_flush(VirtQueue *vq, unsigned int count) |
228 |
{ |
229 |
/* Make sure buffer is written before we update index. */
|
230 |
wmb(); |
231 |
vring_used_idx_increment(vq, count); |
232 |
vq->inuse -= count; |
233 |
} |
234 |
|
235 |
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, |
236 |
unsigned int len) |
237 |
{ |
238 |
virtqueue_fill(vq, elem, len, 0);
|
239 |
virtqueue_flush(vq, 1);
|
240 |
} |
241 |
|
242 |
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) |
243 |
{ |
244 |
uint16_t num_heads = vring_avail_idx(vq) - idx; |
245 |
|
246 |
/* Check it isn't doing very strange things with descriptor numbers. */
|
247 |
if (num_heads > vq->vring.num) {
|
248 |
fprintf(stderr, "Guest moved used index from %u to %u",
|
249 |
idx, vring_avail_idx(vq)); |
250 |
exit(1);
|
251 |
} |
252 |
|
253 |
return num_heads;
|
254 |
} |
255 |
|
256 |
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) |
257 |
{ |
258 |
unsigned int head; |
259 |
|
260 |
/* Grab the next descriptor number they're advertising, and increment
|
261 |
* the index we've seen. */
|
262 |
head = vring_avail_ring(vq, idx % vq->vring.num); |
263 |
|
264 |
/* If their number is silly, that's a fatal mistake. */
|
265 |
if (head >= vq->vring.num) {
|
266 |
fprintf(stderr, "Guest says index %u is available", head);
|
267 |
exit(1);
|
268 |
} |
269 |
|
270 |
return head;
|
271 |
} |
272 |
|
273 |
static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa, |
274 |
unsigned int i, unsigned int max) |
275 |
{ |
276 |
unsigned int next; |
277 |
|
278 |
/* If this descriptor says it doesn't chain, we're done. */
|
279 |
if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
|
280 |
return max;
|
281 |
|
282 |
/* Check they're not leading us off end of descriptors. */
|
283 |
next = vring_desc_next(desc_pa, i); |
284 |
/* Make sure compiler knows to grab that: we don't want it changing! */
|
285 |
wmb(); |
286 |
|
287 |
if (next >= max) {
|
288 |
fprintf(stderr, "Desc next is %u", next);
|
289 |
exit(1);
|
290 |
} |
291 |
|
292 |
return next;
|
293 |
} |
294 |
|
295 |
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) |
296 |
{ |
297 |
unsigned int idx; |
298 |
int total_bufs, in_total, out_total;
|
299 |
|
300 |
idx = vq->last_avail_idx; |
301 |
|
302 |
total_bufs = in_total = out_total = 0;
|
303 |
while (virtqueue_num_heads(vq, idx)) {
|
304 |
unsigned int max, num_bufs, indirect = 0; |
305 |
target_phys_addr_t desc_pa; |
306 |
int i;
|
307 |
|
308 |
max = vq->vring.num; |
309 |
num_bufs = total_bufs; |
310 |
i = virtqueue_get_head(vq, idx++); |
311 |
desc_pa = vq->vring.desc; |
312 |
|
313 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
314 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
315 |
fprintf(stderr, "Invalid size for indirect buffer table\n");
|
316 |
exit(1);
|
317 |
} |
318 |
|
319 |
/* If we've got too many, that implies a descriptor loop. */
|
320 |
if (num_bufs >= max) {
|
321 |
fprintf(stderr, "Looped descriptor");
|
322 |
exit(1);
|
323 |
} |
324 |
|
325 |
/* loop over the indirect descriptor table */
|
326 |
indirect = 1;
|
327 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
328 |
num_bufs = i = 0;
|
329 |
desc_pa = vring_desc_addr(desc_pa, i); |
330 |
} |
331 |
|
332 |
do {
|
333 |
/* If we've got too many, that implies a descriptor loop. */
|
334 |
if (++num_bufs > max) {
|
335 |
fprintf(stderr, "Looped descriptor");
|
336 |
exit(1);
|
337 |
} |
338 |
|
339 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
340 |
if (in_bytes > 0 && |
341 |
(in_total += vring_desc_len(desc_pa, i)) >= in_bytes) |
342 |
return 1; |
343 |
} else {
|
344 |
if (out_bytes > 0 && |
345 |
(out_total += vring_desc_len(desc_pa, i)) >= out_bytes) |
346 |
return 1; |
347 |
} |
348 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
349 |
|
350 |
if (!indirect)
|
351 |
total_bufs = num_bufs; |
352 |
else
|
353 |
total_bufs++; |
354 |
} |
355 |
|
356 |
return 0; |
357 |
} |
358 |
|
359 |
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
360 |
{ |
361 |
unsigned int i, head, max; |
362 |
target_phys_addr_t desc_pa = vq->vring.desc; |
363 |
target_phys_addr_t len; |
364 |
|
365 |
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
|
366 |
return 0; |
367 |
|
368 |
/* When we start there are none of either input nor output. */
|
369 |
elem->out_num = elem->in_num = 0;
|
370 |
|
371 |
max = vq->vring.num; |
372 |
|
373 |
i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
374 |
|
375 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
376 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
377 |
fprintf(stderr, "Invalid size for indirect buffer table\n");
|
378 |
exit(1);
|
379 |
} |
380 |
|
381 |
/* loop over the indirect descriptor table */
|
382 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
383 |
desc_pa = vring_desc_addr(desc_pa, i); |
384 |
i = 0;
|
385 |
} |
386 |
|
387 |
do {
|
388 |
struct iovec *sg;
|
389 |
int is_write = 0; |
390 |
|
391 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
392 |
elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); |
393 |
sg = &elem->in_sg[elem->in_num++]; |
394 |
is_write = 1;
|
395 |
} else
|
396 |
sg = &elem->out_sg[elem->out_num++]; |
397 |
|
398 |
/* Grab the first descriptor, and check it's OK. */
|
399 |
sg->iov_len = vring_desc_len(desc_pa, i); |
400 |
len = sg->iov_len; |
401 |
|
402 |
sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i), |
403 |
&len, is_write); |
404 |
|
405 |
if (sg->iov_base == NULL || len != sg->iov_len) { |
406 |
fprintf(stderr, "virtio: trying to map MMIO memory\n");
|
407 |
exit(1);
|
408 |
} |
409 |
|
410 |
/* If we've got too many, that implies a descriptor loop. */
|
411 |
if ((elem->in_num + elem->out_num) > max) {
|
412 |
fprintf(stderr, "Looped descriptor");
|
413 |
exit(1);
|
414 |
} |
415 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
416 |
|
417 |
elem->index = head; |
418 |
|
419 |
vq->inuse++; |
420 |
|
421 |
return elem->in_num + elem->out_num;
|
422 |
} |
423 |
|
424 |
/* virtio device */
|
425 |
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) |
426 |
{ |
427 |
if (vdev->binding->notify) {
|
428 |
vdev->binding->notify(vdev->binding_opaque, vector); |
429 |
} |
430 |
} |
431 |
|
432 |
void virtio_update_irq(VirtIODevice *vdev)
|
433 |
{ |
434 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
435 |
} |
436 |
|
437 |
void virtio_reset(void *opaque) |
438 |
{ |
439 |
VirtIODevice *vdev = opaque; |
440 |
int i;
|
441 |
|
442 |
if (vdev->reset)
|
443 |
vdev->reset(vdev); |
444 |
|
445 |
vdev->features = 0;
|
446 |
vdev->queue_sel = 0;
|
447 |
vdev->status = 0;
|
448 |
vdev->isr = 0;
|
449 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
450 |
virtio_notify_vector(vdev, vdev->config_vector); |
451 |
|
452 |
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
453 |
vdev->vq[i].vring.desc = 0;
|
454 |
vdev->vq[i].vring.avail = 0;
|
455 |
vdev->vq[i].vring.used = 0;
|
456 |
vdev->vq[i].last_avail_idx = 0;
|
457 |
vdev->vq[i].pa = 0;
|
458 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
459 |
} |
460 |
} |
461 |
|
462 |
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) |
463 |
{ |
464 |
uint8_t val; |
465 |
|
466 |
vdev->get_config(vdev, vdev->config); |
467 |
|
468 |
if (addr > (vdev->config_len - sizeof(val))) |
469 |
return (uint32_t)-1; |
470 |
|
471 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
472 |
return val;
|
473 |
} |
474 |
|
475 |
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) |
476 |
{ |
477 |
uint16_t val; |
478 |
|
479 |
vdev->get_config(vdev, vdev->config); |
480 |
|
481 |
if (addr > (vdev->config_len - sizeof(val))) |
482 |
return (uint32_t)-1; |
483 |
|
484 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
485 |
return val;
|
486 |
} |
487 |
|
488 |
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) |
489 |
{ |
490 |
uint32_t val; |
491 |
|
492 |
vdev->get_config(vdev, vdev->config); |
493 |
|
494 |
if (addr > (vdev->config_len - sizeof(val))) |
495 |
return (uint32_t)-1; |
496 |
|
497 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
498 |
return val;
|
499 |
} |
500 |
|
501 |
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
502 |
{ |
503 |
uint8_t val = data; |
504 |
|
505 |
if (addr > (vdev->config_len - sizeof(val))) |
506 |
return;
|
507 |
|
508 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
509 |
|
510 |
if (vdev->set_config)
|
511 |
vdev->set_config(vdev, vdev->config); |
512 |
} |
513 |
|
514 |
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
515 |
{ |
516 |
uint16_t val = data; |
517 |
|
518 |
if (addr > (vdev->config_len - sizeof(val))) |
519 |
return;
|
520 |
|
521 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
522 |
|
523 |
if (vdev->set_config)
|
524 |
vdev->set_config(vdev, vdev->config); |
525 |
} |
526 |
|
527 |
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
528 |
{ |
529 |
uint32_t val = data; |
530 |
|
531 |
if (addr > (vdev->config_len - sizeof(val))) |
532 |
return;
|
533 |
|
534 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
535 |
|
536 |
if (vdev->set_config)
|
537 |
vdev->set_config(vdev, vdev->config); |
538 |
} |
539 |
|
540 |
void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr) |
541 |
{ |
542 |
vdev->vq[n].pa = addr; |
543 |
virtqueue_init(&vdev->vq[n]); |
544 |
} |
545 |
|
546 |
target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
|
547 |
{ |
548 |
return vdev->vq[n].pa;
|
549 |
} |
550 |
|
551 |
int virtio_queue_get_num(VirtIODevice *vdev, int n) |
552 |
{ |
553 |
return vdev->vq[n].vring.num;
|
554 |
} |
555 |
|
556 |
void virtio_queue_notify(VirtIODevice *vdev, int n) |
557 |
{ |
558 |
if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
|
559 |
vdev->vq[n].handle_output(vdev, &vdev->vq[n]); |
560 |
} |
561 |
} |
562 |
|
563 |
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
|
564 |
{ |
565 |
return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
|
566 |
VIRTIO_NO_VECTOR; |
567 |
} |
568 |
|
569 |
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) |
570 |
{ |
571 |
if (n < VIRTIO_PCI_QUEUE_MAX)
|
572 |
vdev->vq[n].vector = vector; |
573 |
} |
574 |
|
575 |
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
|
576 |
void (*handle_output)(VirtIODevice *, VirtQueue *))
|
577 |
{ |
578 |
int i;
|
579 |
|
580 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
581 |
if (vdev->vq[i].vring.num == 0) |
582 |
break;
|
583 |
} |
584 |
|
585 |
if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
|
586 |
abort(); |
587 |
|
588 |
vdev->vq[i].vring.num = queue_size; |
589 |
vdev->vq[i].handle_output = handle_output; |
590 |
|
591 |
return &vdev->vq[i];
|
592 |
} |
593 |
|
594 |
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
595 |
{ |
596 |
/* Always notify when queue is empty (when feature acknowledge) */
|
597 |
if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
|
598 |
(!(vdev->features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
|
599 |
(vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx))) |
600 |
return;
|
601 |
|
602 |
vdev->isr |= 0x01;
|
603 |
virtio_notify_vector(vdev, vq->vector); |
604 |
} |
605 |
|
606 |
void virtio_notify_config(VirtIODevice *vdev)
|
607 |
{ |
608 |
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
609 |
return;
|
610 |
|
611 |
vdev->isr |= 0x03;
|
612 |
virtio_notify_vector(vdev, vdev->config_vector); |
613 |
} |
614 |
|
615 |
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
|
616 |
{ |
617 |
int i;
|
618 |
|
619 |
if (vdev->binding->save_config)
|
620 |
vdev->binding->save_config(vdev->binding_opaque, f); |
621 |
|
622 |
qemu_put_8s(f, &vdev->status); |
623 |
qemu_put_8s(f, &vdev->isr); |
624 |
qemu_put_be16s(f, &vdev->queue_sel); |
625 |
qemu_put_be32s(f, &vdev->features); |
626 |
qemu_put_be32(f, vdev->config_len); |
627 |
qemu_put_buffer(f, vdev->config, vdev->config_len); |
628 |
|
629 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
630 |
if (vdev->vq[i].vring.num == 0) |
631 |
break;
|
632 |
} |
633 |
|
634 |
qemu_put_be32(f, i); |
635 |
|
636 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
637 |
if (vdev->vq[i].vring.num == 0) |
638 |
break;
|
639 |
|
640 |
qemu_put_be32(f, vdev->vq[i].vring.num); |
641 |
qemu_put_be64(f, vdev->vq[i].pa); |
642 |
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
643 |
if (vdev->binding->save_queue)
|
644 |
vdev->binding->save_queue(vdev->binding_opaque, i, f); |
645 |
} |
646 |
} |
647 |
|
648 |
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
649 |
{ |
650 |
int num, i, ret;
|
651 |
|
652 |
if (vdev->binding->load_config) {
|
653 |
ret = vdev->binding->load_config(vdev->binding_opaque, f); |
654 |
if (ret)
|
655 |
return ret;
|
656 |
} |
657 |
|
658 |
qemu_get_8s(f, &vdev->status); |
659 |
qemu_get_8s(f, &vdev->isr); |
660 |
qemu_get_be16s(f, &vdev->queue_sel); |
661 |
qemu_get_be32s(f, &vdev->features); |
662 |
vdev->config_len = qemu_get_be32(f); |
663 |
qemu_get_buffer(f, vdev->config, vdev->config_len); |
664 |
|
665 |
num = qemu_get_be32(f); |
666 |
|
667 |
for (i = 0; i < num; i++) { |
668 |
vdev->vq[i].vring.num = qemu_get_be32(f); |
669 |
vdev->vq[i].pa = qemu_get_be64(f); |
670 |
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
671 |
|
672 |
if (vdev->vq[i].pa) {
|
673 |
virtqueue_init(&vdev->vq[i]); |
674 |
} |
675 |
if (vdev->binding->load_queue) {
|
676 |
ret = vdev->binding->load_queue(vdev->binding_opaque, i, f); |
677 |
if (ret)
|
678 |
return ret;
|
679 |
} |
680 |
} |
681 |
|
682 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
683 |
return 0; |
684 |
} |
685 |
|
686 |
void virtio_cleanup(VirtIODevice *vdev)
|
687 |
{ |
688 |
if (vdev->config)
|
689 |
qemu_free(vdev->config); |
690 |
qemu_free(vdev->vq); |
691 |
} |
692 |
|
693 |
VirtIODevice *virtio_common_init(const char *name, uint16_t device_id, |
694 |
size_t config_size, size_t struct_size) |
695 |
{ |
696 |
VirtIODevice *vdev; |
697 |
int i;
|
698 |
|
699 |
vdev = qemu_mallocz(struct_size); |
700 |
|
701 |
vdev->device_id = device_id; |
702 |
vdev->status = 0;
|
703 |
vdev->isr = 0;
|
704 |
vdev->queue_sel = 0;
|
705 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
706 |
vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
|
707 |
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) |
708 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
709 |
|
710 |
vdev->name = name; |
711 |
vdev->config_len = config_size; |
712 |
if (vdev->config_len)
|
713 |
vdev->config = qemu_mallocz(config_size); |
714 |
else
|
715 |
vdev->config = NULL;
|
716 |
|
717 |
return vdev;
|
718 |
} |
719 |
|
720 |
void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding, |
721 |
void *opaque)
|
722 |
{ |
723 |
vdev->binding = binding; |
724 |
vdev->binding_opaque = opaque; |
725 |
} |