root / hw / virtio.c @ 0d09e41a
History | View | Annotate | Download (29.2 kB)
1 |
/*
|
---|---|
2 |
* Virtio Support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include <inttypes.h> |
15 |
|
16 |
#include "trace.h" |
17 |
#include "qemu/error-report.h" |
18 |
#include "hw/virtio/virtio.h" |
19 |
#include "qemu/atomic.h" |
20 |
#include "hw/virtio/virtio-bus.h" |
21 |
|
22 |
/* The alignment to use between consumer and producer parts of vring.
|
23 |
* x86 pagesize again. */
|
24 |
#define VIRTIO_PCI_VRING_ALIGN 4096 |
25 |
|
26 |
typedef struct VRingDesc |
27 |
{ |
28 |
uint64_t addr; |
29 |
uint32_t len; |
30 |
uint16_t flags; |
31 |
uint16_t next; |
32 |
} VRingDesc; |
33 |
|
34 |
typedef struct VRingAvail |
35 |
{ |
36 |
uint16_t flags; |
37 |
uint16_t idx; |
38 |
uint16_t ring[0];
|
39 |
} VRingAvail; |
40 |
|
41 |
typedef struct VRingUsedElem |
42 |
{ |
43 |
uint32_t id; |
44 |
uint32_t len; |
45 |
} VRingUsedElem; |
46 |
|
47 |
typedef struct VRingUsed |
48 |
{ |
49 |
uint16_t flags; |
50 |
uint16_t idx; |
51 |
VRingUsedElem ring[0];
|
52 |
} VRingUsed; |
53 |
|
54 |
typedef struct VRing |
55 |
{ |
56 |
unsigned int num; |
57 |
hwaddr desc; |
58 |
hwaddr avail; |
59 |
hwaddr used; |
60 |
} VRing; |
61 |
|
62 |
struct VirtQueue
|
63 |
{ |
64 |
VRing vring; |
65 |
hwaddr pa; |
66 |
uint16_t last_avail_idx; |
67 |
/* Last used index value we have signalled on */
|
68 |
uint16_t signalled_used; |
69 |
|
70 |
/* Last used index value we have signalled on */
|
71 |
bool signalled_used_valid;
|
72 |
|
73 |
/* Notification enabled? */
|
74 |
bool notification;
|
75 |
|
76 |
uint16_t queue_index; |
77 |
|
78 |
int inuse;
|
79 |
|
80 |
uint16_t vector; |
81 |
void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
|
82 |
VirtIODevice *vdev; |
83 |
EventNotifier guest_notifier; |
84 |
EventNotifier host_notifier; |
85 |
}; |
86 |
|
87 |
/* virt queue functions */
|
88 |
static void virtqueue_init(VirtQueue *vq) |
89 |
{ |
90 |
hwaddr pa = vq->pa; |
91 |
|
92 |
vq->vring.desc = pa; |
93 |
vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
|
94 |
vq->vring.used = vring_align(vq->vring.avail + |
95 |
offsetof(VRingAvail, ring[vq->vring.num]), |
96 |
VIRTIO_PCI_VRING_ALIGN); |
97 |
} |
98 |
|
99 |
static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i) |
100 |
{ |
101 |
hwaddr pa; |
102 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
|
103 |
return ldq_phys(pa);
|
104 |
} |
105 |
|
106 |
static inline uint32_t vring_desc_len(hwaddr desc_pa, int i) |
107 |
{ |
108 |
hwaddr pa; |
109 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
|
110 |
return ldl_phys(pa);
|
111 |
} |
112 |
|
113 |
static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i) |
114 |
{ |
115 |
hwaddr pa; |
116 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
|
117 |
return lduw_phys(pa);
|
118 |
} |
119 |
|
120 |
static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) |
121 |
{ |
122 |
hwaddr pa; |
123 |
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
|
124 |
return lduw_phys(pa);
|
125 |
} |
126 |
|
127 |
static inline uint16_t vring_avail_flags(VirtQueue *vq) |
128 |
{ |
129 |
hwaddr pa; |
130 |
pa = vq->vring.avail + offsetof(VRingAvail, flags); |
131 |
return lduw_phys(pa);
|
132 |
} |
133 |
|
134 |
static inline uint16_t vring_avail_idx(VirtQueue *vq) |
135 |
{ |
136 |
hwaddr pa; |
137 |
pa = vq->vring.avail + offsetof(VRingAvail, idx); |
138 |
return lduw_phys(pa);
|
139 |
} |
140 |
|
141 |
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) |
142 |
{ |
143 |
hwaddr pa; |
144 |
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); |
145 |
return lduw_phys(pa);
|
146 |
} |
147 |
|
148 |
static inline uint16_t vring_used_event(VirtQueue *vq) |
149 |
{ |
150 |
return vring_avail_ring(vq, vq->vring.num);
|
151 |
} |
152 |
|
153 |
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) |
154 |
{ |
155 |
hwaddr pa; |
156 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); |
157 |
stl_phys(pa, val); |
158 |
} |
159 |
|
160 |
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) |
161 |
{ |
162 |
hwaddr pa; |
163 |
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); |
164 |
stl_phys(pa, val); |
165 |
} |
166 |
|
167 |
static uint16_t vring_used_idx(VirtQueue *vq)
|
168 |
{ |
169 |
hwaddr pa; |
170 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
171 |
return lduw_phys(pa);
|
172 |
} |
173 |
|
174 |
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) |
175 |
{ |
176 |
hwaddr pa; |
177 |
pa = vq->vring.used + offsetof(VRingUsed, idx); |
178 |
stw_phys(pa, val); |
179 |
} |
180 |
|
181 |
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) |
182 |
{ |
183 |
hwaddr pa; |
184 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
185 |
stw_phys(pa, lduw_phys(pa) | mask); |
186 |
} |
187 |
|
188 |
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) |
189 |
{ |
190 |
hwaddr pa; |
191 |
pa = vq->vring.used + offsetof(VRingUsed, flags); |
192 |
stw_phys(pa, lduw_phys(pa) & ~mask); |
193 |
} |
194 |
|
195 |
static inline void vring_avail_event(VirtQueue *vq, uint16_t val) |
196 |
{ |
197 |
hwaddr pa; |
198 |
if (!vq->notification) {
|
199 |
return;
|
200 |
} |
201 |
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); |
202 |
stw_phys(pa, val); |
203 |
} |
204 |
|
205 |
void virtio_queue_set_notification(VirtQueue *vq, int enable) |
206 |
{ |
207 |
vq->notification = enable; |
208 |
if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { |
209 |
vring_avail_event(vq, vring_avail_idx(vq)); |
210 |
} else if (enable) { |
211 |
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
212 |
} else {
|
213 |
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
214 |
} |
215 |
if (enable) {
|
216 |
/* Expose avail event/used flags before caller checks the avail idx. */
|
217 |
smp_mb(); |
218 |
} |
219 |
} |
220 |
|
221 |
int virtio_queue_ready(VirtQueue *vq)
|
222 |
{ |
223 |
return vq->vring.avail != 0; |
224 |
} |
225 |
|
226 |
int virtio_queue_empty(VirtQueue *vq)
|
227 |
{ |
228 |
return vring_avail_idx(vq) == vq->last_avail_idx;
|
229 |
} |
230 |
|
231 |
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, |
232 |
unsigned int len, unsigned int idx) |
233 |
{ |
234 |
unsigned int offset; |
235 |
int i;
|
236 |
|
237 |
trace_virtqueue_fill(vq, elem, len, idx); |
238 |
|
239 |
offset = 0;
|
240 |
for (i = 0; i < elem->in_num; i++) { |
241 |
size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
242 |
|
243 |
cpu_physical_memory_unmap(elem->in_sg[i].iov_base, |
244 |
elem->in_sg[i].iov_len, |
245 |
1, size);
|
246 |
|
247 |
offset += size; |
248 |
} |
249 |
|
250 |
for (i = 0; i < elem->out_num; i++) |
251 |
cpu_physical_memory_unmap(elem->out_sg[i].iov_base, |
252 |
elem->out_sg[i].iov_len, |
253 |
0, elem->out_sg[i].iov_len);
|
254 |
|
255 |
idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
256 |
|
257 |
/* Get a pointer to the next entry in the used ring. */
|
258 |
vring_used_ring_id(vq, idx, elem->index); |
259 |
vring_used_ring_len(vq, idx, len); |
260 |
} |
261 |
|
262 |
void virtqueue_flush(VirtQueue *vq, unsigned int count) |
263 |
{ |
264 |
uint16_t old, new; |
265 |
/* Make sure buffer is written before we update index. */
|
266 |
smp_wmb(); |
267 |
trace_virtqueue_flush(vq, count); |
268 |
old = vring_used_idx(vq); |
269 |
new = old + count; |
270 |
vring_used_idx_set(vq, new); |
271 |
vq->inuse -= count; |
272 |
if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
|
273 |
vq->signalled_used_valid = false;
|
274 |
} |
275 |
|
276 |
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, |
277 |
unsigned int len) |
278 |
{ |
279 |
virtqueue_fill(vq, elem, len, 0);
|
280 |
virtqueue_flush(vq, 1);
|
281 |
} |
282 |
|
283 |
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) |
284 |
{ |
285 |
uint16_t num_heads = vring_avail_idx(vq) - idx; |
286 |
|
287 |
/* Check it isn't doing very strange things with descriptor numbers. */
|
288 |
if (num_heads > vq->vring.num) {
|
289 |
error_report("Guest moved used index from %u to %u",
|
290 |
idx, vring_avail_idx(vq)); |
291 |
exit(1);
|
292 |
} |
293 |
/* On success, callers read a descriptor at vq->last_avail_idx.
|
294 |
* Make sure descriptor read does not bypass avail index read. */
|
295 |
if (num_heads) {
|
296 |
smp_rmb(); |
297 |
} |
298 |
|
299 |
return num_heads;
|
300 |
} |
301 |
|
302 |
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) |
303 |
{ |
304 |
unsigned int head; |
305 |
|
306 |
/* Grab the next descriptor number they're advertising, and increment
|
307 |
* the index we've seen. */
|
308 |
head = vring_avail_ring(vq, idx % vq->vring.num); |
309 |
|
310 |
/* If their number is silly, that's a fatal mistake. */
|
311 |
if (head >= vq->vring.num) {
|
312 |
error_report("Guest says index %u is available", head);
|
313 |
exit(1);
|
314 |
} |
315 |
|
316 |
return head;
|
317 |
} |
318 |
|
319 |
static unsigned virtqueue_next_desc(hwaddr desc_pa, |
320 |
unsigned int i, unsigned int max) |
321 |
{ |
322 |
unsigned int next; |
323 |
|
324 |
/* If this descriptor says it doesn't chain, we're done. */
|
325 |
if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
|
326 |
return max;
|
327 |
|
328 |
/* Check they're not leading us off end of descriptors. */
|
329 |
next = vring_desc_next(desc_pa, i); |
330 |
/* Make sure compiler knows to grab that: we don't want it changing! */
|
331 |
smp_wmb(); |
332 |
|
333 |
if (next >= max) {
|
334 |
error_report("Desc next is %u", next);
|
335 |
exit(1);
|
336 |
} |
337 |
|
338 |
return next;
|
339 |
} |
340 |
|
341 |
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, |
342 |
unsigned int *out_bytes, |
343 |
unsigned max_in_bytes, unsigned max_out_bytes) |
344 |
{ |
345 |
unsigned int idx; |
346 |
unsigned int total_bufs, in_total, out_total; |
347 |
|
348 |
idx = vq->last_avail_idx; |
349 |
|
350 |
total_bufs = in_total = out_total = 0;
|
351 |
while (virtqueue_num_heads(vq, idx)) {
|
352 |
unsigned int max, num_bufs, indirect = 0; |
353 |
hwaddr desc_pa; |
354 |
int i;
|
355 |
|
356 |
max = vq->vring.num; |
357 |
num_bufs = total_bufs; |
358 |
i = virtqueue_get_head(vq, idx++); |
359 |
desc_pa = vq->vring.desc; |
360 |
|
361 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
362 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
363 |
error_report("Invalid size for indirect buffer table");
|
364 |
exit(1);
|
365 |
} |
366 |
|
367 |
/* If we've got too many, that implies a descriptor loop. */
|
368 |
if (num_bufs >= max) {
|
369 |
error_report("Looped descriptor");
|
370 |
exit(1);
|
371 |
} |
372 |
|
373 |
/* loop over the indirect descriptor table */
|
374 |
indirect = 1;
|
375 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
376 |
num_bufs = i = 0;
|
377 |
desc_pa = vring_desc_addr(desc_pa, i); |
378 |
} |
379 |
|
380 |
do {
|
381 |
/* If we've got too many, that implies a descriptor loop. */
|
382 |
if (++num_bufs > max) {
|
383 |
error_report("Looped descriptor");
|
384 |
exit(1);
|
385 |
} |
386 |
|
387 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
388 |
in_total += vring_desc_len(desc_pa, i); |
389 |
} else {
|
390 |
out_total += vring_desc_len(desc_pa, i); |
391 |
} |
392 |
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
|
393 |
goto done;
|
394 |
} |
395 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
396 |
|
397 |
if (!indirect)
|
398 |
total_bufs = num_bufs; |
399 |
else
|
400 |
total_bufs++; |
401 |
} |
402 |
done:
|
403 |
if (in_bytes) {
|
404 |
*in_bytes = in_total; |
405 |
} |
406 |
if (out_bytes) {
|
407 |
*out_bytes = out_total; |
408 |
} |
409 |
} |
410 |
|
411 |
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, |
412 |
unsigned int out_bytes) |
413 |
{ |
414 |
unsigned int in_total, out_total; |
415 |
|
416 |
virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); |
417 |
return in_bytes <= in_total && out_bytes <= out_total;
|
418 |
} |
419 |
|
420 |
void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, |
421 |
size_t num_sg, int is_write)
|
422 |
{ |
423 |
unsigned int i; |
424 |
hwaddr len; |
425 |
|
426 |
for (i = 0; i < num_sg; i++) { |
427 |
len = sg[i].iov_len; |
428 |
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); |
429 |
if (sg[i].iov_base == NULL || len != sg[i].iov_len) { |
430 |
error_report("virtio: trying to map MMIO memory");
|
431 |
exit(1);
|
432 |
} |
433 |
} |
434 |
} |
435 |
|
436 |
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
437 |
{ |
438 |
unsigned int i, head, max; |
439 |
hwaddr desc_pa = vq->vring.desc; |
440 |
|
441 |
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
|
442 |
return 0; |
443 |
|
444 |
/* When we start there are none of either input nor output. */
|
445 |
elem->out_num = elem->in_num = 0;
|
446 |
|
447 |
max = vq->vring.num; |
448 |
|
449 |
i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
450 |
if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { |
451 |
vring_avail_event(vq, vring_avail_idx(vq)); |
452 |
} |
453 |
|
454 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
|
455 |
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { |
456 |
error_report("Invalid size for indirect buffer table");
|
457 |
exit(1);
|
458 |
} |
459 |
|
460 |
/* loop over the indirect descriptor table */
|
461 |
max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
|
462 |
desc_pa = vring_desc_addr(desc_pa, i); |
463 |
i = 0;
|
464 |
} |
465 |
|
466 |
/* Collect all the descriptors */
|
467 |
do {
|
468 |
struct iovec *sg;
|
469 |
|
470 |
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
471 |
if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
|
472 |
error_report("Too many write descriptors in indirect table");
|
473 |
exit(1);
|
474 |
} |
475 |
elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); |
476 |
sg = &elem->in_sg[elem->in_num++]; |
477 |
} else {
|
478 |
if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
|
479 |
error_report("Too many read descriptors in indirect table");
|
480 |
exit(1);
|
481 |
} |
482 |
elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i); |
483 |
sg = &elem->out_sg[elem->out_num++]; |
484 |
} |
485 |
|
486 |
sg->iov_len = vring_desc_len(desc_pa, i); |
487 |
|
488 |
/* If we've got too many, that implies a descriptor loop. */
|
489 |
if ((elem->in_num + elem->out_num) > max) {
|
490 |
error_report("Looped descriptor");
|
491 |
exit(1);
|
492 |
} |
493 |
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
494 |
|
495 |
/* Now map what we have collected */
|
496 |
virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
|
497 |
virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
|
498 |
|
499 |
elem->index = head; |
500 |
|
501 |
vq->inuse++; |
502 |
|
503 |
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); |
504 |
return elem->in_num + elem->out_num;
|
505 |
} |
506 |
|
507 |
/* virtio device */
|
508 |
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) |
509 |
{ |
510 |
if (vdev->binding->notify) {
|
511 |
vdev->binding->notify(vdev->binding_opaque, vector); |
512 |
} |
513 |
} |
514 |
|
515 |
void virtio_update_irq(VirtIODevice *vdev)
|
516 |
{ |
517 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
518 |
} |
519 |
|
520 |
void virtio_set_status(VirtIODevice *vdev, uint8_t val)
|
521 |
{ |
522 |
trace_virtio_set_status(vdev, val); |
523 |
|
524 |
if (vdev->set_status) {
|
525 |
vdev->set_status(vdev, val); |
526 |
} |
527 |
vdev->status = val; |
528 |
} |
529 |
|
530 |
void virtio_reset(void *opaque) |
531 |
{ |
532 |
VirtIODevice *vdev = opaque; |
533 |
int i;
|
534 |
|
535 |
virtio_set_status(vdev, 0);
|
536 |
|
537 |
if (vdev->reset)
|
538 |
vdev->reset(vdev); |
539 |
|
540 |
vdev->guest_features = 0;
|
541 |
vdev->queue_sel = 0;
|
542 |
vdev->status = 0;
|
543 |
vdev->isr = 0;
|
544 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
545 |
virtio_notify_vector(vdev, vdev->config_vector); |
546 |
|
547 |
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
548 |
vdev->vq[i].vring.desc = 0;
|
549 |
vdev->vq[i].vring.avail = 0;
|
550 |
vdev->vq[i].vring.used = 0;
|
551 |
vdev->vq[i].last_avail_idx = 0;
|
552 |
vdev->vq[i].pa = 0;
|
553 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
554 |
vdev->vq[i].signalled_used = 0;
|
555 |
vdev->vq[i].signalled_used_valid = false;
|
556 |
vdev->vq[i].notification = true;
|
557 |
} |
558 |
} |
559 |
|
560 |
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) |
561 |
{ |
562 |
uint8_t val; |
563 |
|
564 |
vdev->get_config(vdev, vdev->config); |
565 |
|
566 |
if (addr > (vdev->config_len - sizeof(val))) |
567 |
return (uint32_t)-1; |
568 |
|
569 |
val = ldub_p(vdev->config + addr); |
570 |
return val;
|
571 |
} |
572 |
|
573 |
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) |
574 |
{ |
575 |
uint16_t val; |
576 |
|
577 |
vdev->get_config(vdev, vdev->config); |
578 |
|
579 |
if (addr > (vdev->config_len - sizeof(val))) |
580 |
return (uint32_t)-1; |
581 |
|
582 |
val = lduw_p(vdev->config + addr); |
583 |
return val;
|
584 |
} |
585 |
|
586 |
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) |
587 |
{ |
588 |
uint32_t val; |
589 |
|
590 |
vdev->get_config(vdev, vdev->config); |
591 |
|
592 |
if (addr > (vdev->config_len - sizeof(val))) |
593 |
return (uint32_t)-1; |
594 |
|
595 |
val = ldl_p(vdev->config + addr); |
596 |
return val;
|
597 |
} |
598 |
|
599 |
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
600 |
{ |
601 |
uint8_t val = data; |
602 |
|
603 |
if (addr > (vdev->config_len - sizeof(val))) |
604 |
return;
|
605 |
|
606 |
stb_p(vdev->config + addr, val); |
607 |
|
608 |
if (vdev->set_config)
|
609 |
vdev->set_config(vdev, vdev->config); |
610 |
} |
611 |
|
612 |
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
613 |
{ |
614 |
uint16_t val = data; |
615 |
|
616 |
if (addr > (vdev->config_len - sizeof(val))) |
617 |
return;
|
618 |
|
619 |
stw_p(vdev->config + addr, val); |
620 |
|
621 |
if (vdev->set_config)
|
622 |
vdev->set_config(vdev, vdev->config); |
623 |
} |
624 |
|
625 |
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
626 |
{ |
627 |
uint32_t val = data; |
628 |
|
629 |
if (addr > (vdev->config_len - sizeof(val))) |
630 |
return;
|
631 |
|
632 |
stl_p(vdev->config + addr, val); |
633 |
|
634 |
if (vdev->set_config)
|
635 |
vdev->set_config(vdev, vdev->config); |
636 |
} |
637 |
|
638 |
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) |
639 |
{ |
640 |
vdev->vq[n].pa = addr; |
641 |
virtqueue_init(&vdev->vq[n]); |
642 |
} |
643 |
|
644 |
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
|
645 |
{ |
646 |
return vdev->vq[n].pa;
|
647 |
} |
648 |
|
649 |
int virtio_queue_get_num(VirtIODevice *vdev, int n) |
650 |
{ |
651 |
return vdev->vq[n].vring.num;
|
652 |
} |
653 |
|
654 |
int virtio_queue_get_id(VirtQueue *vq)
|
655 |
{ |
656 |
VirtIODevice *vdev = vq->vdev; |
657 |
assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
|
658 |
return vq - &vdev->vq[0]; |
659 |
} |
660 |
|
661 |
void virtio_queue_notify_vq(VirtQueue *vq)
|
662 |
{ |
663 |
if (vq->vring.desc) {
|
664 |
VirtIODevice *vdev = vq->vdev; |
665 |
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); |
666 |
vq->handle_output(vdev, vq); |
667 |
} |
668 |
} |
669 |
|
670 |
void virtio_queue_notify(VirtIODevice *vdev, int n) |
671 |
{ |
672 |
virtio_queue_notify_vq(&vdev->vq[n]); |
673 |
} |
674 |
|
675 |
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
|
676 |
{ |
677 |
return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
|
678 |
VIRTIO_NO_VECTOR; |
679 |
} |
680 |
|
681 |
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) |
682 |
{ |
683 |
if (n < VIRTIO_PCI_QUEUE_MAX)
|
684 |
vdev->vq[n].vector = vector; |
685 |
} |
686 |
|
687 |
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
|
688 |
void (*handle_output)(VirtIODevice *, VirtQueue *))
|
689 |
{ |
690 |
int i;
|
691 |
|
692 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
693 |
if (vdev->vq[i].vring.num == 0) |
694 |
break;
|
695 |
} |
696 |
|
697 |
if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
|
698 |
abort(); |
699 |
|
700 |
vdev->vq[i].vring.num = queue_size; |
701 |
vdev->vq[i].handle_output = handle_output; |
702 |
|
703 |
return &vdev->vq[i];
|
704 |
} |
705 |
|
706 |
void virtio_del_queue(VirtIODevice *vdev, int n) |
707 |
{ |
708 |
if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) { |
709 |
abort(); |
710 |
} |
711 |
|
712 |
vdev->vq[n].vring.num = 0;
|
713 |
} |
714 |
|
715 |
void virtio_irq(VirtQueue *vq)
|
716 |
{ |
717 |
trace_virtio_irq(vq); |
718 |
vq->vdev->isr |= 0x01;
|
719 |
virtio_notify_vector(vq->vdev, vq->vector); |
720 |
} |
721 |
|
722 |
/* Assuming a given event_idx value from the other size, if
|
723 |
* we have just incremented index from old to new_idx,
|
724 |
* should we trigger an event? */
|
725 |
static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old) |
726 |
{ |
727 |
/* Note: Xen has similar logic for notification hold-off
|
728 |
* in include/xen/interface/io/ring.h with req_event and req_prod
|
729 |
* corresponding to event_idx + 1 and new respectively.
|
730 |
* Note also that req_event and req_prod in Xen start at 1,
|
731 |
* event indexes in virtio start at 0. */
|
732 |
return (uint16_t)(new - event - 1) < (uint16_t)(new - old); |
733 |
} |
734 |
|
735 |
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) |
736 |
{ |
737 |
uint16_t old, new; |
738 |
bool v;
|
739 |
/* We need to expose used array entries before checking used event. */
|
740 |
smp_mb(); |
741 |
/* Always notify when queue is empty (when feature acknowledge) */
|
742 |
if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) && |
743 |
!vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) { |
744 |
return true; |
745 |
} |
746 |
|
747 |
if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) { |
748 |
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
|
749 |
} |
750 |
|
751 |
v = vq->signalled_used_valid; |
752 |
vq->signalled_used_valid = true;
|
753 |
old = vq->signalled_used; |
754 |
new = vq->signalled_used = vring_used_idx(vq); |
755 |
return !v || vring_need_event(vring_used_event(vq), new, old);
|
756 |
} |
757 |
|
758 |
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
759 |
{ |
760 |
if (!vring_notify(vdev, vq)) {
|
761 |
return;
|
762 |
} |
763 |
|
764 |
trace_virtio_notify(vdev, vq); |
765 |
vdev->isr |= 0x01;
|
766 |
virtio_notify_vector(vdev, vq->vector); |
767 |
} |
768 |
|
769 |
void virtio_notify_config(VirtIODevice *vdev)
|
770 |
{ |
771 |
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
772 |
return;
|
773 |
|
774 |
vdev->isr |= 0x03;
|
775 |
virtio_notify_vector(vdev, vdev->config_vector); |
776 |
} |
777 |
|
778 |
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
|
779 |
{ |
780 |
int i;
|
781 |
|
782 |
if (vdev->binding->save_config)
|
783 |
vdev->binding->save_config(vdev->binding_opaque, f); |
784 |
|
785 |
qemu_put_8s(f, &vdev->status); |
786 |
qemu_put_8s(f, &vdev->isr); |
787 |
qemu_put_be16s(f, &vdev->queue_sel); |
788 |
qemu_put_be32s(f, &vdev->guest_features); |
789 |
qemu_put_be32(f, vdev->config_len); |
790 |
qemu_put_buffer(f, vdev->config, vdev->config_len); |
791 |
|
792 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
793 |
if (vdev->vq[i].vring.num == 0) |
794 |
break;
|
795 |
} |
796 |
|
797 |
qemu_put_be32(f, i); |
798 |
|
799 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
800 |
if (vdev->vq[i].vring.num == 0) |
801 |
break;
|
802 |
|
803 |
qemu_put_be32(f, vdev->vq[i].vring.num); |
804 |
qemu_put_be64(f, vdev->vq[i].pa); |
805 |
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
806 |
if (vdev->binding->save_queue)
|
807 |
vdev->binding->save_queue(vdev->binding_opaque, i, f); |
808 |
} |
809 |
} |
810 |
|
811 |
int virtio_set_features(VirtIODevice *vdev, uint32_t val)
|
812 |
{ |
813 |
uint32_t supported_features = |
814 |
vdev->binding->get_features(vdev->binding_opaque); |
815 |
bool bad = (val & ~supported_features) != 0; |
816 |
|
817 |
val &= supported_features; |
818 |
if (vdev->set_features) {
|
819 |
vdev->set_features(vdev, val); |
820 |
} |
821 |
vdev->guest_features = val; |
822 |
return bad ? -1 : 0; |
823 |
} |
824 |
|
825 |
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
826 |
{ |
827 |
int num, i, ret;
|
828 |
uint32_t features; |
829 |
uint32_t supported_features; |
830 |
|
831 |
if (vdev->binding->load_config) {
|
832 |
ret = vdev->binding->load_config(vdev->binding_opaque, f); |
833 |
if (ret)
|
834 |
return ret;
|
835 |
} |
836 |
|
837 |
qemu_get_8s(f, &vdev->status); |
838 |
qemu_get_8s(f, &vdev->isr); |
839 |
qemu_get_be16s(f, &vdev->queue_sel); |
840 |
qemu_get_be32s(f, &features); |
841 |
|
842 |
if (virtio_set_features(vdev, features) < 0) { |
843 |
supported_features = vdev->binding->get_features(vdev->binding_opaque); |
844 |
error_report("Features 0x%x unsupported. Allowed features: 0x%x",
|
845 |
features, supported_features); |
846 |
return -1; |
847 |
} |
848 |
vdev->config_len = qemu_get_be32(f); |
849 |
qemu_get_buffer(f, vdev->config, vdev->config_len); |
850 |
|
851 |
num = qemu_get_be32(f); |
852 |
|
853 |
for (i = 0; i < num; i++) { |
854 |
vdev->vq[i].vring.num = qemu_get_be32(f); |
855 |
vdev->vq[i].pa = qemu_get_be64(f); |
856 |
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
857 |
vdev->vq[i].signalled_used_valid = false;
|
858 |
vdev->vq[i].notification = true;
|
859 |
|
860 |
if (vdev->vq[i].pa) {
|
861 |
uint16_t nheads; |
862 |
virtqueue_init(&vdev->vq[i]); |
863 |
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; |
864 |
/* Check it isn't doing very strange things with descriptor numbers. */
|
865 |
if (nheads > vdev->vq[i].vring.num) {
|
866 |
error_report("VQ %d size 0x%x Guest index 0x%x "
|
867 |
"inconsistent with Host index 0x%x: delta 0x%x",
|
868 |
i, vdev->vq[i].vring.num, |
869 |
vring_avail_idx(&vdev->vq[i]), |
870 |
vdev->vq[i].last_avail_idx, nheads); |
871 |
return -1; |
872 |
} |
873 |
} else if (vdev->vq[i].last_avail_idx) { |
874 |
error_report("VQ %d address 0x0 "
|
875 |
"inconsistent with Host index 0x%x",
|
876 |
i, vdev->vq[i].last_avail_idx); |
877 |
return -1; |
878 |
} |
879 |
if (vdev->binding->load_queue) {
|
880 |
ret = vdev->binding->load_queue(vdev->binding_opaque, i, f); |
881 |
if (ret)
|
882 |
return ret;
|
883 |
} |
884 |
} |
885 |
|
886 |
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
887 |
return 0; |
888 |
} |
889 |
|
890 |
void virtio_common_cleanup(VirtIODevice *vdev)
|
891 |
{ |
892 |
qemu_del_vm_change_state_handler(vdev->vmstate); |
893 |
g_free(vdev->config); |
894 |
g_free(vdev->vq); |
895 |
} |
896 |
|
897 |
void virtio_cleanup(VirtIODevice *vdev)
|
898 |
{ |
899 |
virtio_common_cleanup(vdev); |
900 |
g_free(vdev); |
901 |
} |
902 |
|
903 |
static void virtio_vmstate_change(void *opaque, int running, RunState state) |
904 |
{ |
905 |
VirtIODevice *vdev = opaque; |
906 |
bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
|
907 |
vdev->vm_running = running; |
908 |
|
909 |
if (backend_run) {
|
910 |
virtio_set_status(vdev, vdev->status); |
911 |
} |
912 |
|
913 |
if (vdev->binding->vmstate_change) {
|
914 |
vdev->binding->vmstate_change(vdev->binding_opaque, backend_run); |
915 |
} |
916 |
|
917 |
if (!backend_run) {
|
918 |
virtio_set_status(vdev, vdev->status); |
919 |
} |
920 |
} |
921 |
|
922 |
void virtio_init(VirtIODevice *vdev, const char *name, |
923 |
uint16_t device_id, size_t config_size) |
924 |
{ |
925 |
int i;
|
926 |
vdev->device_id = device_id; |
927 |
vdev->status = 0;
|
928 |
vdev->isr = 0;
|
929 |
vdev->queue_sel = 0;
|
930 |
vdev->config_vector = VIRTIO_NO_VECTOR; |
931 |
vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
|
932 |
vdev->vm_running = runstate_is_running(); |
933 |
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
934 |
vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
935 |
vdev->vq[i].vdev = vdev; |
936 |
vdev->vq[i].queue_index = i; |
937 |
} |
938 |
|
939 |
vdev->name = name; |
940 |
vdev->config_len = config_size; |
941 |
if (vdev->config_len) {
|
942 |
vdev->config = g_malloc0(config_size); |
943 |
} else {
|
944 |
vdev->config = NULL;
|
945 |
} |
946 |
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, |
947 |
vdev); |
948 |
} |
949 |
|
950 |
VirtIODevice *virtio_common_init(const char *name, uint16_t device_id, |
951 |
size_t config_size, size_t struct_size) |
952 |
{ |
953 |
VirtIODevice *vdev; |
954 |
vdev = g_malloc0(struct_size); |
955 |
virtio_init(vdev, name, device_id, config_size); |
956 |
return vdev;
|
957 |
} |
958 |
|
959 |
void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding, |
960 |
DeviceState *opaque) |
961 |
{ |
962 |
vdev->binding = binding; |
963 |
vdev->binding_opaque = opaque; |
964 |
} |
965 |
|
966 |
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
|
967 |
{ |
968 |
return vdev->vq[n].vring.desc;
|
969 |
} |
970 |
|
971 |
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
|
972 |
{ |
973 |
return vdev->vq[n].vring.avail;
|
974 |
} |
975 |
|
976 |
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
|
977 |
{ |
978 |
return vdev->vq[n].vring.used;
|
979 |
} |
980 |
|
981 |
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
|
982 |
{ |
983 |
return vdev->vq[n].vring.desc;
|
984 |
} |
985 |
|
986 |
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
|
987 |
{ |
988 |
return sizeof(VRingDesc) * vdev->vq[n].vring.num; |
989 |
} |
990 |
|
991 |
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
|
992 |
{ |
993 |
return offsetof(VRingAvail, ring) +
|
994 |
sizeof(uint64_t) * vdev->vq[n].vring.num;
|
995 |
} |
996 |
|
997 |
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
|
998 |
{ |
999 |
return offsetof(VRingUsed, ring) +
|
1000 |
sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
|
1001 |
} |
1002 |
|
1003 |
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
|
1004 |
{ |
1005 |
return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
|
1006 |
virtio_queue_get_used_size(vdev, n); |
1007 |
} |
1008 |
|
1009 |
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
|
1010 |
{ |
1011 |
return vdev->vq[n].last_avail_idx;
|
1012 |
} |
1013 |
|
1014 |
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) |
1015 |
{ |
1016 |
vdev->vq[n].last_avail_idx = idx; |
1017 |
} |
1018 |
|
1019 |
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
|
1020 |
{ |
1021 |
return vdev->vq + n;
|
1022 |
} |
1023 |
|
1024 |
uint16_t virtio_get_queue_index(VirtQueue *vq) |
1025 |
{ |
1026 |
return vq->queue_index;
|
1027 |
} |
1028 |
|
1029 |
static void virtio_queue_guest_notifier_read(EventNotifier *n) |
1030 |
{ |
1031 |
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); |
1032 |
if (event_notifier_test_and_clear(n)) {
|
1033 |
virtio_irq(vq); |
1034 |
} |
1035 |
} |
1036 |
|
1037 |
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, |
1038 |
bool with_irqfd)
|
1039 |
{ |
1040 |
if (assign && !with_irqfd) {
|
1041 |
event_notifier_set_handler(&vq->guest_notifier, |
1042 |
virtio_queue_guest_notifier_read); |
1043 |
} else {
|
1044 |
event_notifier_set_handler(&vq->guest_notifier, NULL);
|
1045 |
} |
1046 |
if (!assign) {
|
1047 |
/* Test and clear notifier before closing it,
|
1048 |
* in case poll callback didn't have time to run. */
|
1049 |
virtio_queue_guest_notifier_read(&vq->guest_notifier); |
1050 |
} |
1051 |
} |
1052 |
|
1053 |
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) |
1054 |
{ |
1055 |
return &vq->guest_notifier;
|
1056 |
} |
1057 |
|
1058 |
static void virtio_queue_host_notifier_read(EventNotifier *n) |
1059 |
{ |
1060 |
VirtQueue *vq = container_of(n, VirtQueue, host_notifier); |
1061 |
if (event_notifier_test_and_clear(n)) {
|
1062 |
virtio_queue_notify_vq(vq); |
1063 |
} |
1064 |
} |
1065 |
|
1066 |
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, |
1067 |
bool set_handler)
|
1068 |
{ |
1069 |
if (assign && set_handler) {
|
1070 |
event_notifier_set_handler(&vq->host_notifier, |
1071 |
virtio_queue_host_notifier_read); |
1072 |
} else {
|
1073 |
event_notifier_set_handler(&vq->host_notifier, NULL);
|
1074 |
} |
1075 |
if (!assign) {
|
1076 |
/* Test and clear notifier before after disabling event,
|
1077 |
* in case poll callback didn't have time to run. */
|
1078 |
virtio_queue_host_notifier_read(&vq->host_notifier); |
1079 |
} |
1080 |
} |
1081 |
|
1082 |
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) |
1083 |
{ |
1084 |
return &vq->host_notifier;
|
1085 |
} |
1086 |
|
1087 |
static int virtio_device_init(DeviceState *qdev) |
1088 |
{ |
1089 |
VirtIODevice *vdev = VIRTIO_DEVICE(qdev); |
1090 |
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(qdev); |
1091 |
assert(k->init != NULL);
|
1092 |
if (k->init(vdev) < 0) { |
1093 |
return -1; |
1094 |
} |
1095 |
virtio_bus_plug_device(vdev); |
1096 |
return 0; |
1097 |
} |
1098 |
|
1099 |
static void virtio_device_class_init(ObjectClass *klass, void *data) |
1100 |
{ |
1101 |
/* Set the default value here. */
|
1102 |
DeviceClass *dc = DEVICE_CLASS(klass); |
1103 |
dc->init = virtio_device_init; |
1104 |
dc->bus_type = TYPE_VIRTIO_BUS; |
1105 |
} |
1106 |
|
1107 |
static const TypeInfo virtio_device_info = { |
1108 |
.name = TYPE_VIRTIO_DEVICE, |
1109 |
.parent = TYPE_DEVICE, |
1110 |
.instance_size = sizeof(VirtIODevice),
|
1111 |
.class_init = virtio_device_class_init, |
1112 |
.abstract = true,
|
1113 |
.class_size = sizeof(VirtioDeviceClass),
|
1114 |
}; |
1115 |
|
1116 |
static void virtio_register_types(void) |
1117 |
{ |
1118 |
type_register_static(&virtio_device_info); |
1119 |
} |
1120 |
|
1121 |
type_init(virtio_register_types) |