root / hw / virtio.c @ 5c3234c6
History | View | Annotate | Download (22.9 kB)
1 |
/*
|
---|---|
2 |
* Virtio Support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include <inttypes.h> |
15 |
|
16 |
#include "trace.h" |
17 |
#include "qemu-error.h" |
18 |
#include "virtio.h" |
19 |
|
20 |
/* The alignment to use between consumer and producer parts of vring.
|
21 |
* x86 pagesize again. */
|
22 |
#define VIRTIO_PCI_VRING_ALIGN 4096 |
23 |
|
24 |
/* QEMU doesn't strictly need write barriers since everything runs in
|
25 |
* lock-step. We'll leave the calls to wmb() in though to make it obvious for
|
26 |
* KVM or if kqemu gets SMP support.
|
27 |
* In any case, we must prevent the compiler from reordering the code.
|
28 |
* TODO: we likely need some rmb()/mb() as well.
|
29 |
*/
|
30 |
|
31 |
#define wmb() __asm__ __volatile__("": : :"memory") |
32 |
|
33 |
typedef struct VRingDesc |
34 |
{ |
35 |
uint64_t addr; |
36 |
uint32_t len; |
37 |
uint16_t flags; |
38 |
uint16_t next; |
39 |
} VRingDesc; |
40 |
|
41 |
typedef struct VRingAvail |
42 |
{ |
43 |
uint16_t flags; |
44 |
uint16_t idx; |
45 |
uint16_t ring[0];
|
46 |
} VRingAvail; |
47 |
|
48 |
typedef struct VRingUsedElem |
49 |
{ |
50 |
uint32_t id; |
51 |
uint32_t len; |
52 |
} VRingUsedElem; |
53 |
|
54 |
typedef struct VRingUsed |
55 |
{ |
56 |
uint16_t flags; |
57 |
uint16_t idx; |
58 |
VRingUsedElem ring[0];
|
59 |
} VRingUsed; |
60 |
|
61 |
typedef struct VRing |
62 |
{ |
63 |
unsigned int num; |
64 |
target_phys_addr_t desc; |
65 |
target_phys_addr_t avail; |
66 |
target_phys_addr_t used; |
67 |
} VRing; |
68 |
|
69 |
struct VirtQueue
|
70 |
{ |
71 |
VRing vring; |
72 |
target_phys_addr_t pa; |
73 |
uint16_t last_avail_idx; |
74 |
int inuse;
|
75 |
uint16_t vector; |
76 |
void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
|
77 |
VirtIODevice *vdev; |
78 |
EventNotifier guest_notifier; |
79 |
EventNotifier host_notifier; |
80 |
}; |
81 |
|
82 |
/* virt queue functions */
|
83 |
static void virtqueue_init(VirtQueue *vq) |
84 |
{ |
85 |
target_phys_addr_t pa = vq->pa; |
86 |
|
87 |
vq->vring.desc = pa; |
88 |
vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
|
89 |
vq->vring.used = vring_align(vq->vring.avail + |
90 |
offsetof(VRingAvail, ring[vq->vring.num]), |
91 |
VIRTIO_PCI_VRING_ALIGN); |
92 |
} |
93 |
|
94 |
static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i) |
95 |
{ |
96 |
target_phys_addr_t pa; |
97 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
|
98 |
return ldq_phys(pa);
|
99 |
} |
100 |
|
101 |
static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i) |
102 |
{ |
103 |
target_phys_addr_t pa; |
104 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
|
105 |
return ldl_phys(pa);
|
106 |
} |
107 |
|
108 |
static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i) |
109 |
{ |
110 |
target_phys_addr_t pa; |
111 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
|
112 |
return lduw_phys(pa);
|
113 |
} |
114 |
|
115 |
static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i) |
116 |
{ |
117 |
target_phys_addr_t pa; |
118 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
|
119 |
return lduw_phys(pa);
|
120 |
} |
121 |
|
122 |
static inline uint16_t vring_avail_flags(VirtQueue *vq) |
123 |
{ |
124 |
target_phys_addr_t pa; |
125 |
pa = vq->vring.avail + offsetof(VRingAvail, flags); |
126 |
return lduw_phys(pa);
|
127 |
} |
128 |
|
129 |
static inline uint16_t vring_avail_idx(VirtQueue *vq) |
130 |
{ |
131 |
target_phys_addr_t pa; |
132 |
pa = vq->vring.avail + offsetof(VRingAvail, idx); |
133 |
return lduw_phys(pa);
|
134 |
} |
135 |
|
136 |
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) |
137 |
{ |
138 |
target_phys_addr_t pa; |
139 |
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); |
140 |
return lduw_phys(pa);
|
141 |
} |
142 |
|
143 |
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) |
144 |
{ |
145 |
target_phys_addr_t pa; |
146 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); |
147 |
stl_phys(pa, val); |
148 |
} |
149 |
|
150 |
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) |
151 |
{ |
152 |
target_phys_addr_t pa; |
153 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); |
154 |
stl_phys(pa, val); |
155 |
} |
156 |
|
157 |
static uint16_t vring_used_idx(VirtQueue *vq)
|
158 |
{ |
159 |
target_phys_addr_t pa; |
160 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
161 |
return lduw_phys(pa);
|
162 |
} |
163 |
|
164 |
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val) |
165 |
{ |
166 |
target_phys_addr_t pa; |
167 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
168 |
stw_phys(pa, vring_used_idx(vq) + val); |
169 |
} |
170 |
|
171 |
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) |
172 |
{ |
173 |
target_phys_addr_t pa; |
174 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
175 |
stw_phys(pa, lduw_phys(pa) | mask); |
176 |
} |
177 |
|
178 |
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) |
179 |
{ |
180 |
target_phys_addr_t pa; |
181 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
182 |
stw_phys(pa, lduw_phys(pa) & ~mask); |
183 |
} |
184 |
|
185 |
void virtio_queue_set_notification(VirtQueue *vq, int enable) |
186 |
{ |
187 |
if (enable)
|
188 |
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
189 |
else
|
190 |
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
191 |
} |
192 |
|
193 |
int virtio_queue_ready(VirtQueue *vq)
|
194 |
{ |
195 |
return vq->vring.avail != 0; |
196 |
} |
197 |
|
198 |
int virtio_queue_empty(VirtQueue *vq)
|
199 |
{ |
200 |
return vring_avail_idx(vq) == vq->last_avail_idx;
|
201 |
} |
202 |
|
203 |
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, |
204 |
unsigned int len, unsigned int idx) |
205 |
{ |
206 |
unsigned int offset; |
207 |
int i;
|
208 |
|
209 |
trace_virtqueue_fill(vq, elem, len, idx); |
210 |
|
211 |
offset = 0;
|
212 |
for (i = 0; i < elem->in_num; i++) { |
213 |
size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
214 |
|
215 |
cpu_physical_memory_unmap(elem->in_sg[i].iov_base, |
216 |
elem->in_sg[i].iov_len, |
217 |
1, size);
|
218 |
|
219 |
offset += elem->in_sg[i].iov_len; |
220 |
} |
221 |
|
222 |
for (i = 0; i < elem->out_num; i++) |
223 |
cpu_physical_memory_unmap(elem->out_sg[i].iov_base, |
224 |
elem->out_sg[i].iov_len, |
225 |
0, elem->out_sg[i].iov_len);
|
226 |
|
227 |
idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
228 |
|
229 |
/* Get a pointer to the next entry in the used ring. */
|
230 |
vring_used_ring_id(vq, idx, elem->index); |
231 |
vring_used_ring_len(vq, idx, len); |
232 |
} |
233 |
|
234 |
void virtqueue_flush(VirtQueue *vq, unsigned int count) |
235 |
{ |
236 |
/* Make sure buffer is written before we update index. */
|
237 |
wmb(); |
238 |
trace_virtqueue_flush(vq, count); |
239 |
vring_used_idx_increment(vq, count); |
240 |
vq->inuse -= count; |
241 |
} |
242 |
|
243 |
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, |
244 |
unsigned int len) |
245 |
{ |
246 |
virtqueue_fill(vq, elem, len, 0);
|
247 |
virtqueue_flush(vq, 1);
|
248 |
} |
249 |
|
250 |
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) |
251 |
{ |
252 |
uint16_t num_heads = vring_avail_idx(vq) - idx; |
253 |
|
254 |
/* Check it isn't doing very strange things with descriptor numbers. */
|
255 |
if (num_heads > vq->vring.num) {
|
256 |
error_report("Guest moved used index from %u to %u",
|
257 |
idx, vring_avail_idx(vq)); |
258 |
exit(1);
|
259 |
} |
260 |
|
261 |
return num_heads;
|
262 |
} |
263 |
|
264 |
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) |
265 |
{ |
266 |
unsigned int head; |
267 |
|
268 |
/* Grab the next descriptor number they're advertising, and increment
|
269 |
* the index we've seen. */
|
270 |
head = vring_avail_ring(vq, idx % vq->vring.num); |
271 |
|
272 |
/* If their number is silly, that's a fatal mistake. */
|
273 |
if (head >= vq->vring.num) {
|
274 |
error_report("Guest says index %u is available", head);
|
275 |
exit(1);
|
276 |
} |
277 |
|
278 |
return head;
|
279 |
} |
280 |
|
281 |
static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa, |
282 |
unsigned int i, unsigned int max) |
283 |
{ |
284 |
unsigned int next; |
285 |
|
286 |
/* If this descriptor says it doesn't chain, we're done. */
|
287 |
if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
|
288 |
return max;
|
289 |
|
290 |
/* Check they're not leading us off end of descriptors. */
|
291 |
next = vring_desc_next(desc_pa, i); |
292 |
/* Make sure compiler knows to grab that: we don't want it changing! */
|
293 |
wmb(); |
294 |
|
295 |
if (next >= max) {
|
296 |
error_report("Desc next is %u", next);
|
297 |
exit(1);
|
298 |
} |
299 |
|
300 |
return next;
|
301 |
} |
302 |
|
303 |
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) |
304 |
{ |
305 |
unsigned int idx; |
306 |
int total_bufs, in_total, out_total;
|
307 |
|
308 |
idx = vq->last_avail_idx; |
309 |
|
310 |
total_bufs = in_total = out_total = 0;
|
311 |
while (virtqueue_num_heads(vq, idx)) {
|
312 |
unsigned int max, num_bufs, indirect = 0; |
313 |
target_phys_addr_t desc_pa; |
314 |
int i;
|
315 |
|
316 |
max = vq->vring.num; |
317 |
num_bufs = total_bufs; |
318 |
i = virtqueue_get_head(vq, idx++); |
319 |
desc_pa = vq->vring.desc; |
320 |
|
321 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
322 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
323 |
error_report("Invalid size for indirect buffer table");
|
324 |
exit(1);
|
325 |
} |
326 |
|
327 |
/* If we've got too many, that implies a descriptor loop. */
|
328 |
if (num_bufs >= max) {
|
329 |
error_report("Looped descriptor");
|
330 |
exit(1);
|
331 |
} |
332 |
|
333 |
/* loop over the indirect descriptor table */
|
334 |
indirect = 1;
|
335 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
336 |
num_bufs = i = 0;
|
337 |
desc_pa = vring_desc_addr(desc_pa, i); |
338 |
} |
339 |
|
340 |
do {
|
341 |
/* If we've got too many, that implies a descriptor loop. */
|
342 |
if (++num_bufs > max) {
|
343 |
error_report("Looped descriptor");
|
344 |
exit(1);
|
345 |
} |
346 |
|
347 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
348 |
if (in_bytes > 0 && |
349 |
(in_total += vring_desc_len(desc_pa, i)) >= in_bytes) |
350 |
return 1; |
351 |
} else {
|
352 |
if (out_bytes > 0 && |
353 |
(out_total += vring_desc_len(desc_pa, i)) >= out_bytes) |
354 |
return 1; |
355 |
} |
356 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
357 |
|
358 |
if (!indirect)
|
359 |
total_bufs = num_bufs; |
360 |
else
|
361 |
total_bufs++; |
362 |
} |
363 |
|
364 |
return 0; |
365 |
} |
366 |
|
367 |
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr, |
368 |
size_t num_sg, int is_write)
|
369 |
{ |
370 |
unsigned int i; |
371 |
target_phys_addr_t len; |
372 |
|
373 |
for (i = 0; i < num_sg; i++) { |
374 |
len = sg[i].iov_len; |
375 |
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); |
376 |
if (sg[i].iov_base == NULL || len != sg[i].iov_len) { |
377 |
error_report("virtio: trying to map MMIO memory");
|
378 |
exit(1);
|
379 |
} |
380 |
} |
381 |
} |
382 |
|
383 |
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
384 |
{ |
385 |
unsigned int i, head, max; |
386 |
target_phys_addr_t desc_pa = vq->vring.desc; |
387 |
|
388 |
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
|
389 |
return 0; |
390 |
|
391 |
/* When we start there are none of either input nor output. */
|
392 |
elem->out_num = elem->in_num = 0;
|
393 |
|
394 |
max = vq->vring.num; |
395 |
|
396 |
i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
397 |
|
398 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
399 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
400 |
error_report("Invalid size for indirect buffer table");
|
401 |
exit(1);
|
402 |
} |
403 |
|
404 |
/* loop over the indirect descriptor table */
|
405 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
406 |
desc_pa = vring_desc_addr(desc_pa, i); |
407 |
i = 0;
|
408 |
} |
409 |
|
410 |
/* Collect all the descriptors */
|
411 |
do {
|
412 |
struct iovec *sg;
|
413 |
|
414 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
415 |
elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); |
416 |
sg = &elem->in_sg[elem->in_num++]; |
417 |
} else {
|
418 |
elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i); |
419 |
sg = &elem->out_sg[elem->out_num++]; |
420 |
} |
421 |
|
422 |
sg->iov_len = vring_desc_len(desc_pa, i); |
423 |
|
424 |
/* If we've got too many, that implies a descriptor loop. */
|
425 |
if ((elem->in_num + elem->out_num) > max) {
|
426 |
error_report("Looped descriptor");
|
427 |
exit(1);
|
428 |
} |
429 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
430 |
|
431 |
/* Now map what we have collected */
|
432 |
virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
|
433 |
virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
|
434 |
|
435 |
elem->index = head; |
436 |
|
437 |
vq->inuse++; |
438 |
|
439 |
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); |
440 |
return elem->in_num + elem->out_num;
|
441 |
} |
442 |
|
443 |
/* virtio device */
|
444 |
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) |
445 |
{ |
446 |
if (vdev->binding->notify) {
|
447 |
vdev->binding->notify(vdev->binding_opaque, vector); |
448 |
} |
449 |
} |
450 |
|
451 |
void virtio_update_irq(VirtIODevice *vdev)
|
452 |
{ |
453 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
454 |
} |
455 |
|
456 |
void virtio_reset(void *opaque) |
457 |
{ |
458 |
VirtIODevice *vdev = opaque; |
459 |
int i;
|
460 |
|
461 |
virtio_set_status(vdev, 0);
|
462 |
|
463 |
if (vdev->reset)
|
464 |
vdev->reset(vdev); |
465 |
|
466 |
vdev->guest_features = 0;
|
467 |
vdev->queue_sel = 0;
|
468 |
vdev->status = 0;
|
469 |
vdev->isr = 0;
|
470 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
471 |
virtio_notify_vector(vdev, vdev->config_vector); |
472 |
|
473 |
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
474 |
vdev->vq[i].vring.desc = 0;
|
475 |
vdev->vq[i].vring.avail = 0;
|
476 |
vdev->vq[i].vring.used = 0;
|
477 |
vdev->vq[i].last_avail_idx = 0;
|
478 |
vdev->vq[i].pa = 0;
|
479 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
480 |
} |
481 |
} |
482 |
|
483 |
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) |
484 |
{ |
485 |
uint8_t val; |
486 |
|
487 |
vdev->get_config(vdev, vdev->config); |
488 |
|
489 |
if (addr > (vdev->config_len - sizeof(val))) |
490 |
return (uint32_t)-1; |
491 |
|
492 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
493 |
return val;
|
494 |
} |
495 |
|
496 |
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) |
497 |
{ |
498 |
uint16_t val; |
499 |
|
500 |
vdev->get_config(vdev, vdev->config); |
501 |
|
502 |
if (addr > (vdev->config_len - sizeof(val))) |
503 |
return (uint32_t)-1; |
504 |
|
505 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
506 |
return val;
|
507 |
} |
508 |
|
509 |
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) |
510 |
{ |
511 |
uint32_t val; |
512 |
|
513 |
vdev->get_config(vdev, vdev->config); |
514 |
|
515 |
if (addr > (vdev->config_len - sizeof(val))) |
516 |
return (uint32_t)-1; |
517 |
|
518 |
memcpy(&val, vdev->config + addr, sizeof(val));
|
519 |
return val;
|
520 |
} |
521 |
|
522 |
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
523 |
{ |
524 |
uint8_t val = data; |
525 |
|
526 |
if (addr > (vdev->config_len - sizeof(val))) |
527 |
return;
|
528 |
|
529 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
530 |
|
531 |
if (vdev->set_config)
|
532 |
vdev->set_config(vdev, vdev->config); |
533 |
} |
534 |
|
535 |
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
536 |
{ |
537 |
uint16_t val = data; |
538 |
|
539 |
if (addr > (vdev->config_len - sizeof(val))) |
540 |
return;
|
541 |
|
542 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
543 |
|
544 |
if (vdev->set_config)
|
545 |
vdev->set_config(vdev, vdev->config); |
546 |
} |
547 |
|
548 |
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
549 |
{ |
550 |
uint32_t val = data; |
551 |
|
552 |
if (addr > (vdev->config_len - sizeof(val))) |
553 |
return;
|
554 |
|
555 |
memcpy(vdev->config + addr, &val, sizeof(val));
|
556 |
|
557 |
if (vdev->set_config)
|
558 |
vdev->set_config(vdev, vdev->config); |
559 |
} |
560 |
|
561 |
void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr) |
562 |
{ |
563 |
vdev->vq[n].pa = addr; |
564 |
virtqueue_init(&vdev->vq[n]); |
565 |
} |
566 |
|
567 |
target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
|
568 |
{ |
569 |
return vdev->vq[n].pa;
|
570 |
} |
571 |
|
572 |
int virtio_queue_get_num(VirtIODevice *vdev, int n) |
573 |
{ |
574 |
return vdev->vq[n].vring.num;
|
575 |
} |
576 |
|
577 |
void virtio_queue_notify_vq(VirtQueue *vq)
|
578 |
{ |
579 |
if (vq->vring.desc) {
|
580 |
VirtIODevice *vdev = vq->vdev; |
581 |
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); |
582 |
vq->handle_output(vdev, vq); |
583 |
} |
584 |
} |
585 |
|
586 |
void virtio_queue_notify(VirtIODevice *vdev, int n) |
587 |
{ |
588 |
if (n < VIRTIO_PCI_QUEUE_MAX) {
|
589 |
virtio_queue_notify_vq(&vdev->vq[n]); |
590 |
} |
591 |
} |
592 |
|
593 |
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
|
594 |
{ |
595 |
return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
|
596 |
VIRTIO_NO_VECTOR; |
597 |
} |
598 |
|
599 |
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) |
600 |
{ |
601 |
if (n < VIRTIO_PCI_QUEUE_MAX)
|
602 |
vdev->vq[n].vector = vector; |
603 |
} |
604 |
|
605 |
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
|
606 |
void (*handle_output)(VirtIODevice *, VirtQueue *))
|
607 |
{ |
608 |
int i;
|
609 |
|
610 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
611 |
if (vdev->vq[i].vring.num == 0) |
612 |
break;
|
613 |
} |
614 |
|
615 |
if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
|
616 |
abort(); |
617 |
|
618 |
vdev->vq[i].vring.num = queue_size; |
619 |
vdev->vq[i].handle_output = handle_output; |
620 |
|
621 |
return &vdev->vq[i];
|
622 |
} |
623 |
|
624 |
void virtio_irq(VirtQueue *vq)
|
625 |
{ |
626 |
trace_virtio_irq(vq); |
627 |
vq->vdev->isr |= 0x01;
|
628 |
virtio_notify_vector(vq->vdev, vq->vector); |
629 |
} |
630 |
|
631 |
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
632 |
{ |
633 |
/* Always notify when queue is empty (when feature acknowledge) */
|
634 |
if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
|
635 |
(!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
|
636 |
(vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx))) |
637 |
return;
|
638 |
|
639 |
trace_virtio_notify(vdev, vq); |
640 |
vdev->isr |= 0x01;
|
641 |
virtio_notify_vector(vdev, vq->vector); |
642 |
} |
643 |
|
644 |
void virtio_notify_config(VirtIODevice *vdev)
|
645 |
{ |
646 |
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
647 |
return;
|
648 |
|
649 |
vdev->isr |= 0x03;
|
650 |
virtio_notify_vector(vdev, vdev->config_vector); |
651 |
} |
652 |
|
653 |
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
|
654 |
{ |
655 |
int i;
|
656 |
|
657 |
if (vdev->binding->save_config)
|
658 |
vdev->binding->save_config(vdev->binding_opaque, f); |
659 |
|
660 |
qemu_put_8s(f, &vdev->status); |
661 |
qemu_put_8s(f, &vdev->isr); |
662 |
qemu_put_be16s(f, &vdev->queue_sel); |
663 |
qemu_put_be32s(f, &vdev->guest_features); |
664 |
qemu_put_be32(f, vdev->config_len); |
665 |
qemu_put_buffer(f, vdev->config, vdev->config_len); |
666 |
|
667 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
668 |
if (vdev->vq[i].vring.num == 0) |
669 |
break;
|
670 |
} |
671 |
|
672 |
qemu_put_be32(f, i); |
673 |
|
674 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
675 |
if (vdev->vq[i].vring.num == 0) |
676 |
break;
|
677 |
|
678 |
qemu_put_be32(f, vdev->vq[i].vring.num); |
679 |
qemu_put_be64(f, vdev->vq[i].pa); |
680 |
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
681 |
if (vdev->binding->save_queue)
|
682 |
vdev->binding->save_queue(vdev->binding_opaque, i, f); |
683 |
} |
684 |
} |
685 |
|
686 |
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
687 |
{ |
688 |
int num, i, ret;
|
689 |
uint32_t features; |
690 |
uint32_t supported_features = |
691 |
vdev->binding->get_features(vdev->binding_opaque); |
692 |
|
693 |
if (vdev->binding->load_config) {
|
694 |
ret = vdev->binding->load_config(vdev->binding_opaque, f); |
695 |
if (ret)
|
696 |
return ret;
|
697 |
} |
698 |
|
699 |
qemu_get_8s(f, &vdev->status); |
700 |
qemu_get_8s(f, &vdev->isr); |
701 |
qemu_get_be16s(f, &vdev->queue_sel); |
702 |
qemu_get_be32s(f, &features); |
703 |
if (features & ~supported_features) {
|
704 |
error_report("Features 0x%x unsupported. Allowed features: 0x%x",
|
705 |
features, supported_features); |
706 |
return -1; |
707 |
} |
708 |
if (vdev->set_features)
|
709 |
vdev->set_features(vdev, features); |
710 |
vdev->guest_features = features; |
711 |
vdev->config_len = qemu_get_be32(f); |
712 |
qemu_get_buffer(f, vdev->config, vdev->config_len); |
713 |
|
714 |
num = qemu_get_be32(f); |
715 |
|
716 |
for (i = 0; i < num; i++) { |
717 |
vdev->vq[i].vring.num = qemu_get_be32(f); |
718 |
vdev->vq[i].pa = qemu_get_be64(f); |
719 |
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
720 |
|
721 |
if (vdev->vq[i].pa) {
|
722 |
uint16_t nheads; |
723 |
virtqueue_init(&vdev->vq[i]); |
724 |
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; |
725 |
/* Check it isn't doing very strange things with descriptor numbers. */
|
726 |
if (nheads > vdev->vq[i].vring.num) {
|
727 |
error_report("VQ %d size 0x%x Guest index 0x%x "
|
728 |
"inconsistent with Host index 0x%x: delta 0x%x\n",
|
729 |
i, vdev->vq[i].vring.num, |
730 |
vring_avail_idx(&vdev->vq[i]), |
731 |
vdev->vq[i].last_avail_idx, nheads); |
732 |
return -1; |
733 |
} |
734 |
} else if (vdev->vq[i].last_avail_idx) { |
735 |
error_report("VQ %d address 0x0 "
|
736 |
"inconsistent with Host index 0x%x\n",
|
737 |
i, vdev->vq[i].last_avail_idx); |
738 |
return -1; |
739 |
} |
740 |
if (vdev->binding->load_queue) {
|
741 |
ret = vdev->binding->load_queue(vdev->binding_opaque, i, f); |
742 |
if (ret)
|
743 |
return ret;
|
744 |
} |
745 |
} |
746 |
|
747 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
748 |
return 0; |
749 |
} |
750 |
|
751 |
void virtio_cleanup(VirtIODevice *vdev)
|
752 |
{ |
753 |
qemu_del_vm_change_state_handler(vdev->vmstate); |
754 |
if (vdev->config)
|
755 |
qemu_free(vdev->config); |
756 |
qemu_free(vdev->vq); |
757 |
} |
758 |
|
759 |
static void virtio_vmstate_change(void *opaque, int running, int reason) |
760 |
{ |
761 |
VirtIODevice *vdev = opaque; |
762 |
bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
|
763 |
vdev->vm_running = running; |
764 |
|
765 |
if (backend_run) {
|
766 |
virtio_set_status(vdev, vdev->status); |
767 |
} |
768 |
|
769 |
if (vdev->binding->vmstate_change) {
|
770 |
vdev->binding->vmstate_change(vdev->binding_opaque, backend_run); |
771 |
} |
772 |
|
773 |
if (!backend_run) {
|
774 |
virtio_set_status(vdev, vdev->status); |
775 |
} |
776 |
} |
777 |
|
778 |
VirtIODevice *virtio_common_init(const char *name, uint16_t device_id, |
779 |
size_t config_size, size_t struct_size) |
780 |
{ |
781 |
VirtIODevice *vdev; |
782 |
int i;
|
783 |
|
784 |
vdev = qemu_mallocz(struct_size); |
785 |
|
786 |
vdev->device_id = device_id; |
787 |
vdev->status = 0;
|
788 |
vdev->isr = 0;
|
789 |
vdev->queue_sel = 0;
|
790 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
791 |
vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
|
792 |
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
793 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
794 |
vdev->vq[i].vdev = vdev; |
795 |
} |
796 |
|
797 |
vdev->name = name; |
798 |
vdev->config_len = config_size; |
799 |
if (vdev->config_len)
|
800 |
vdev->config = qemu_mallocz(config_size); |
801 |
else
|
802 |
vdev->config = NULL;
|
803 |
|
804 |
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, vdev); |
805 |
|
806 |
return vdev;
|
807 |
} |
808 |
|
809 |
void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding, |
810 |
void *opaque)
|
811 |
{ |
812 |
vdev->binding = binding; |
813 |
vdev->binding_opaque = opaque; |
814 |
} |
815 |
|
816 |
target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
|
817 |
{ |
818 |
return vdev->vq[n].vring.desc;
|
819 |
} |
820 |
|
821 |
target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
|
822 |
{ |
823 |
return vdev->vq[n].vring.avail;
|
824 |
} |
825 |
|
826 |
target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
|
827 |
{ |
828 |
return vdev->vq[n].vring.used;
|
829 |
} |
830 |
|
831 |
target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
|
832 |
{ |
833 |
return vdev->vq[n].vring.desc;
|
834 |
} |
835 |
|
836 |
target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
|
837 |
{ |
838 |
return sizeof(VRingDesc) * vdev->vq[n].vring.num; |
839 |
} |
840 |
|
841 |
target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
|
842 |
{ |
843 |
return offsetof(VRingAvail, ring) +
|
844 |
sizeof(uint64_t) * vdev->vq[n].vring.num;
|
845 |
} |
846 |
|
847 |
target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
|
848 |
{ |
849 |
return offsetof(VRingUsed, ring) +
|
850 |
sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
|
851 |
} |
852 |
|
853 |
target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
|
854 |
{ |
855 |
return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
|
856 |
virtio_queue_get_used_size(vdev, n); |
857 |
} |
858 |
|
859 |
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
|
860 |
{ |
861 |
return vdev->vq[n].last_avail_idx;
|
862 |
} |
863 |
|
864 |
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) |
865 |
{ |
866 |
vdev->vq[n].last_avail_idx = idx; |
867 |
} |
868 |
|
869 |
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
|
870 |
{ |
871 |
return vdev->vq + n;
|
872 |
} |
873 |
|
874 |
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) |
875 |
{ |
876 |
return &vq->guest_notifier;
|
877 |
} |
878 |
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) |
879 |
{ |
880 |
return &vq->host_notifier;
|
881 |
} |