Statistics
| Branch: | Revision:

root / hw / virtio.c @ 2c50e26e

History | View | Annotate | Download (21.3 kB)

1
/*
2
 * Virtio Support
3
 *
4
 * Copyright IBM, Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include <inttypes.h>
15

    
16
#include "trace.h"
17
#include "virtio.h"
18
#include "sysemu.h"
19

    
20
/* The alignment to use between consumer and producer parts of vring.
21
 * x86 pagesize again. */
22
#define VIRTIO_PCI_VRING_ALIGN         4096
23

    
24
/* QEMU doesn't strictly need write barriers since everything runs in
25
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
26
 * KVM or if kqemu gets SMP support.
27
 * In any case, we must prevent the compiler from reordering the code.
28
 * TODO: we likely need some rmb()/mb() as well.
29
 */
30

    
31
#define wmb() __asm__ __volatile__("": : :"memory")
32

    
33
typedef struct VRingDesc
34
{
35
    uint64_t addr;
36
    uint32_t len;
37
    uint16_t flags;
38
    uint16_t next;
39
} VRingDesc;
40

    
41
typedef struct VRingAvail
42
{
43
    uint16_t flags;
44
    uint16_t idx;
45
    uint16_t ring[0];
46
} VRingAvail;
47

    
48
typedef struct VRingUsedElem
49
{
50
    uint32_t id;
51
    uint32_t len;
52
} VRingUsedElem;
53

    
54
typedef struct VRingUsed
55
{
56
    uint16_t flags;
57
    uint16_t idx;
58
    VRingUsedElem ring[0];
59
} VRingUsed;
60

    
61
typedef struct VRing
62
{
63
    unsigned int num;
64
    target_phys_addr_t desc;
65
    target_phys_addr_t avail;
66
    target_phys_addr_t used;
67
} VRing;
68

    
69
struct VirtQueue
70
{
71
    VRing vring;
72
    target_phys_addr_t pa;
73
    uint16_t last_avail_idx;
74
    int inuse;
75
    uint16_t vector;
76
    void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
77
    VirtIODevice *vdev;
78
    EventNotifier guest_notifier;
79
    EventNotifier host_notifier;
80
};
81

    
82
/* virt queue functions */
83
static void virtqueue_init(VirtQueue *vq)
84
{
85
    target_phys_addr_t pa = vq->pa;
86

    
87
    vq->vring.desc = pa;
88
    vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
89
    vq->vring.used = vring_align(vq->vring.avail +
90
                                 offsetof(VRingAvail, ring[vq->vring.num]),
91
                                 VIRTIO_PCI_VRING_ALIGN);
92
}
93

    
94
static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
95
{
96
    target_phys_addr_t pa;
97
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
98
    return ldq_phys(pa);
99
}
100

    
101
static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
102
{
103
    target_phys_addr_t pa;
104
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
105
    return ldl_phys(pa);
106
}
107

    
108
static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
109
{
110
    target_phys_addr_t pa;
111
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
112
    return lduw_phys(pa);
113
}
114

    
115
static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
116
{
117
    target_phys_addr_t pa;
118
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
119
    return lduw_phys(pa);
120
}
121

    
122
static inline uint16_t vring_avail_flags(VirtQueue *vq)
123
{
124
    target_phys_addr_t pa;
125
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
126
    return lduw_phys(pa);
127
}
128

    
129
static inline uint16_t vring_avail_idx(VirtQueue *vq)
130
{
131
    target_phys_addr_t pa;
132
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
133
    return lduw_phys(pa);
134
}
135

    
136
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
137
{
138
    target_phys_addr_t pa;
139
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
140
    return lduw_phys(pa);
141
}
142

    
143
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
144
{
145
    target_phys_addr_t pa;
146
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
147
    stl_phys(pa, val);
148
}
149

    
150
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
151
{
152
    target_phys_addr_t pa;
153
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
154
    stl_phys(pa, val);
155
}
156

    
157
static uint16_t vring_used_idx(VirtQueue *vq)
158
{
159
    target_phys_addr_t pa;
160
    pa = vq->vring.used + offsetof(VRingUsed, idx);
161
    return lduw_phys(pa);
162
}
163

    
164
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
165
{
166
    target_phys_addr_t pa;
167
    pa = vq->vring.used + offsetof(VRingUsed, idx);
168
    stw_phys(pa, vring_used_idx(vq) + val);
169
}
170

    
171
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
172
{
173
    target_phys_addr_t pa;
174
    pa = vq->vring.used + offsetof(VRingUsed, flags);
175
    stw_phys(pa, lduw_phys(pa) | mask);
176
}
177

    
178
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
179
{
180
    target_phys_addr_t pa;
181
    pa = vq->vring.used + offsetof(VRingUsed, flags);
182
    stw_phys(pa, lduw_phys(pa) & ~mask);
183
}
184

    
185
void virtio_queue_set_notification(VirtQueue *vq, int enable)
186
{
187
    if (enable)
188
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
189
    else
190
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
191
}
192

    
193
int virtio_queue_ready(VirtQueue *vq)
194
{
195
    return vq->vring.avail != 0;
196
}
197

    
198
int virtio_queue_empty(VirtQueue *vq)
199
{
200
    return vring_avail_idx(vq) == vq->last_avail_idx;
201
}
202

    
203
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
204
                    unsigned int len, unsigned int idx)
205
{
206
    unsigned int offset;
207
    int i;
208

    
209
    trace_virtqueue_fill(vq, elem, len, idx);
210

    
211
    offset = 0;
212
    for (i = 0; i < elem->in_num; i++) {
213
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
214

    
215
        cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
216
                                  elem->in_sg[i].iov_len,
217
                                  1, size);
218

    
219
        offset += elem->in_sg[i].iov_len;
220
    }
221

    
222
    for (i = 0; i < elem->out_num; i++)
223
        cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
224
                                  elem->out_sg[i].iov_len,
225
                                  0, elem->out_sg[i].iov_len);
226

    
227
    idx = (idx + vring_used_idx(vq)) % vq->vring.num;
228

    
229
    /* Get a pointer to the next entry in the used ring. */
230
    vring_used_ring_id(vq, idx, elem->index);
231
    vring_used_ring_len(vq, idx, len);
232
}
233

    
234
void virtqueue_flush(VirtQueue *vq, unsigned int count)
235
{
236
    /* Make sure buffer is written before we update index. */
237
    wmb();
238
    trace_virtqueue_flush(vq, count);
239
    vring_used_idx_increment(vq, count);
240
    vq->inuse -= count;
241
}
242

    
243
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
244
                    unsigned int len)
245
{
246
    virtqueue_fill(vq, elem, len, 0);
247
    virtqueue_flush(vq, 1);
248
}
249

    
250
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
251
{
252
    uint16_t num_heads = vring_avail_idx(vq) - idx;
253

    
254
    /* Check it isn't doing very strange things with descriptor numbers. */
255
    if (num_heads > vq->vring.num) {
256
        fprintf(stderr, "Guest moved used index from %u to %u",
257
                idx, vring_avail_idx(vq));
258
        exit(1);
259
    }
260

    
261
    return num_heads;
262
}
263

    
264
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
265
{
266
    unsigned int head;
267

    
268
    /* Grab the next descriptor number they're advertising, and increment
269
     * the index we've seen. */
270
    head = vring_avail_ring(vq, idx % vq->vring.num);
271

    
272
    /* If their number is silly, that's a fatal mistake. */
273
    if (head >= vq->vring.num) {
274
        fprintf(stderr, "Guest says index %u is available", head);
275
        exit(1);
276
    }
277

    
278
    return head;
279
}
280

    
281
static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
282
                                    unsigned int i, unsigned int max)
283
{
284
    unsigned int next;
285

    
286
    /* If this descriptor says it doesn't chain, we're done. */
287
    if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
288
        return max;
289

    
290
    /* Check they're not leading us off end of descriptors. */
291
    next = vring_desc_next(desc_pa, i);
292
    /* Make sure compiler knows to grab that: we don't want it changing! */
293
    wmb();
294

    
295
    if (next >= max) {
296
        fprintf(stderr, "Desc next is %u", next);
297
        exit(1);
298
    }
299

    
300
    return next;
301
}
302

    
303
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
304
{
305
    unsigned int idx;
306
    int total_bufs, in_total, out_total;
307

    
308
    idx = vq->last_avail_idx;
309

    
310
    total_bufs = in_total = out_total = 0;
311
    while (virtqueue_num_heads(vq, idx)) {
312
        unsigned int max, num_bufs, indirect = 0;
313
        target_phys_addr_t desc_pa;
314
        int i;
315

    
316
        max = vq->vring.num;
317
        num_bufs = total_bufs;
318
        i = virtqueue_get_head(vq, idx++);
319
        desc_pa = vq->vring.desc;
320

    
321
        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
322
            if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
323
                fprintf(stderr, "Invalid size for indirect buffer table\n");
324
                exit(1);
325
            }
326

    
327
            /* If we've got too many, that implies a descriptor loop. */
328
            if (num_bufs >= max) {
329
                fprintf(stderr, "Looped descriptor");
330
                exit(1);
331
            }
332

    
333
            /* loop over the indirect descriptor table */
334
            indirect = 1;
335
            max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
336
            num_bufs = i = 0;
337
            desc_pa = vring_desc_addr(desc_pa, i);
338
        }
339

    
340
        do {
341
            /* If we've got too many, that implies a descriptor loop. */
342
            if (++num_bufs > max) {
343
                fprintf(stderr, "Looped descriptor");
344
                exit(1);
345
            }
346

    
347
            if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
348
                if (in_bytes > 0 &&
349
                    (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
350
                    return 1;
351
            } else {
352
                if (out_bytes > 0 &&
353
                    (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
354
                    return 1;
355
            }
356
        } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
357

    
358
        if (!indirect)
359
            total_bufs = num_bufs;
360
        else
361
            total_bufs++;
362
    }
363

    
364
    return 0;
365
}
366

    
367
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
368
    size_t num_sg, int is_write)
369
{
370
    unsigned int i;
371
    target_phys_addr_t len;
372

    
373
    for (i = 0; i < num_sg; i++) {
374
        len = sg[i].iov_len;
375
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
376
        if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
377
            fprintf(stderr, "virtio: trying to map MMIO memory\n");
378
            exit(1);
379
        }
380
    }
381
}
382

    
383
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
384
{
385
    unsigned int i, head, max;
386
    target_phys_addr_t desc_pa = vq->vring.desc;
387

    
388
    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
389
        return 0;
390

    
391
    /* When we start there are none of either input nor output. */
392
    elem->out_num = elem->in_num = 0;
393

    
394
    max = vq->vring.num;
395

    
396
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
397

    
398
    if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
399
        if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
400
            fprintf(stderr, "Invalid size for indirect buffer table\n");
401
            exit(1);
402
        }
403

    
404
        /* loop over the indirect descriptor table */
405
        max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
406
        desc_pa = vring_desc_addr(desc_pa, i);
407
        i = 0;
408
    }
409

    
410
    /* Collect all the descriptors */
411
    do {
412
        struct iovec *sg;
413

    
414
        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
415
            elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
416
            sg = &elem->in_sg[elem->in_num++];
417
        } else {
418
            elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
419
            sg = &elem->out_sg[elem->out_num++];
420
        }
421

    
422
        sg->iov_len = vring_desc_len(desc_pa, i);
423

    
424
        /* If we've got too many, that implies a descriptor loop. */
425
        if ((elem->in_num + elem->out_num) > max) {
426
            fprintf(stderr, "Looped descriptor");
427
            exit(1);
428
        }
429
    } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
430

    
431
    /* Now map what we have collected */
432
    virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
433
    virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
434

    
435
    elem->index = head;
436

    
437
    vq->inuse++;
438

    
439
    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
440
    return elem->in_num + elem->out_num;
441
}
442

    
443
/* virtio device */
444
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
445
{
446
    if (vdev->binding->notify) {
447
        vdev->binding->notify(vdev->binding_opaque, vector);
448
    }
449
}
450

    
451
void virtio_update_irq(VirtIODevice *vdev)
452
{
453
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
454
}
455

    
456
void virtio_reset(void *opaque)
457
{
458
    VirtIODevice *vdev = opaque;
459
    int i;
460

    
461
    if (vdev->reset)
462
        vdev->reset(vdev);
463

    
464
    vdev->guest_features = 0;
465
    vdev->queue_sel = 0;
466
    vdev->status = 0;
467
    vdev->isr = 0;
468
    vdev->config_vector = VIRTIO_NO_VECTOR;
469
    virtio_notify_vector(vdev, vdev->config_vector);
470

    
471
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
472
        vdev->vq[i].vring.desc = 0;
473
        vdev->vq[i].vring.avail = 0;
474
        vdev->vq[i].vring.used = 0;
475
        vdev->vq[i].last_avail_idx = 0;
476
        vdev->vq[i].pa = 0;
477
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
478
    }
479
}
480

    
481
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
482
{
483
    uint8_t val;
484

    
485
    vdev->get_config(vdev, vdev->config);
486

    
487
    if (addr > (vdev->config_len - sizeof(val)))
488
        return (uint32_t)-1;
489

    
490
    memcpy(&val, vdev->config + addr, sizeof(val));
491
    return val;
492
}
493

    
494
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
495
{
496
    uint16_t val;
497

    
498
    vdev->get_config(vdev, vdev->config);
499

    
500
    if (addr > (vdev->config_len - sizeof(val)))
501
        return (uint32_t)-1;
502

    
503
    memcpy(&val, vdev->config + addr, sizeof(val));
504
    return val;
505
}
506

    
507
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
508
{
509
    uint32_t val;
510

    
511
    vdev->get_config(vdev, vdev->config);
512

    
513
    if (addr > (vdev->config_len - sizeof(val)))
514
        return (uint32_t)-1;
515

    
516
    memcpy(&val, vdev->config + addr, sizeof(val));
517
    return val;
518
}
519

    
520
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
521
{
522
    uint8_t val = data;
523

    
524
    if (addr > (vdev->config_len - sizeof(val)))
525
        return;
526

    
527
    memcpy(vdev->config + addr, &val, sizeof(val));
528

    
529
    if (vdev->set_config)
530
        vdev->set_config(vdev, vdev->config);
531
}
532

    
533
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
534
{
535
    uint16_t val = data;
536

    
537
    if (addr > (vdev->config_len - sizeof(val)))
538
        return;
539

    
540
    memcpy(vdev->config + addr, &val, sizeof(val));
541

    
542
    if (vdev->set_config)
543
        vdev->set_config(vdev, vdev->config);
544
}
545

    
546
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
547
{
548
    uint32_t val = data;
549

    
550
    if (addr > (vdev->config_len - sizeof(val)))
551
        return;
552

    
553
    memcpy(vdev->config + addr, &val, sizeof(val));
554

    
555
    if (vdev->set_config)
556
        vdev->set_config(vdev, vdev->config);
557
}
558

    
559
void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
560
{
561
    vdev->vq[n].pa = addr;
562
    virtqueue_init(&vdev->vq[n]);
563
}
564

    
565
target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
566
{
567
    return vdev->vq[n].pa;
568
}
569

    
570
int virtio_queue_get_num(VirtIODevice *vdev, int n)
571
{
572
    return vdev->vq[n].vring.num;
573
}
574

    
575
void virtio_queue_notify(VirtIODevice *vdev, int n)
576
{
577
    if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
578
        trace_virtio_queue_notify(vdev, n, &vdev->vq[n]);
579
        vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
580
    }
581
}
582

    
583
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
584
{
585
    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
586
        VIRTIO_NO_VECTOR;
587
}
588

    
589
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
590
{
591
    if (n < VIRTIO_PCI_QUEUE_MAX)
592
        vdev->vq[n].vector = vector;
593
}
594

    
595
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
596
                            void (*handle_output)(VirtIODevice *, VirtQueue *))
597
{
598
    int i;
599

    
600
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
601
        if (vdev->vq[i].vring.num == 0)
602
            break;
603
    }
604

    
605
    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
606
        abort();
607

    
608
    vdev->vq[i].vring.num = queue_size;
609
    vdev->vq[i].handle_output = handle_output;
610

    
611
    return &vdev->vq[i];
612
}
613

    
614
void virtio_irq(VirtQueue *vq)
615
{
616
    trace_virtio_irq(vq);
617
    vq->vdev->isr |= 0x01;
618
    virtio_notify_vector(vq->vdev, vq->vector);
619
}
620

    
621
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
622
{
623
    /* Always notify when queue is empty (when feature acknowledge) */
624
    if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
625
        (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
626
         (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
627
        return;
628

    
629
    trace_virtio_notify(vdev, vq);
630
    vdev->isr |= 0x01;
631
    virtio_notify_vector(vdev, vq->vector);
632
}
633

    
634
void virtio_notify_config(VirtIODevice *vdev)
635
{
636
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
637
        return;
638

    
639
    vdev->isr |= 0x03;
640
    virtio_notify_vector(vdev, vdev->config_vector);
641
}
642

    
643
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
644
{
645
    int i;
646

    
647
    if (vdev->binding->save_config)
648
        vdev->binding->save_config(vdev->binding_opaque, f);
649

    
650
    qemu_put_8s(f, &vdev->status);
651
    qemu_put_8s(f, &vdev->isr);
652
    qemu_put_be16s(f, &vdev->queue_sel);
653
    qemu_put_be32s(f, &vdev->guest_features);
654
    qemu_put_be32(f, vdev->config_len);
655
    qemu_put_buffer(f, vdev->config, vdev->config_len);
656

    
657
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
658
        if (vdev->vq[i].vring.num == 0)
659
            break;
660
    }
661

    
662
    qemu_put_be32(f, i);
663

    
664
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
665
        if (vdev->vq[i].vring.num == 0)
666
            break;
667

    
668
        qemu_put_be32(f, vdev->vq[i].vring.num);
669
        qemu_put_be64(f, vdev->vq[i].pa);
670
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
671
        if (vdev->binding->save_queue)
672
            vdev->binding->save_queue(vdev->binding_opaque, i, f);
673
    }
674
}
675

    
676
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
677
{
678
    int num, i, ret;
679
    uint32_t features;
680
    uint32_t supported_features =
681
        vdev->binding->get_features(vdev->binding_opaque);
682

    
683
    if (vdev->binding->load_config) {
684
        ret = vdev->binding->load_config(vdev->binding_opaque, f);
685
        if (ret)
686
            return ret;
687
    }
688

    
689
    qemu_get_8s(f, &vdev->status);
690
    qemu_get_8s(f, &vdev->isr);
691
    qemu_get_be16s(f, &vdev->queue_sel);
692
    qemu_get_be32s(f, &features);
693
    if (features & ~supported_features) {
694
        fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
695
                features, supported_features);
696
        return -1;
697
    }
698
    if (vdev->set_features)
699
        vdev->set_features(vdev, features);
700
    vdev->guest_features = features;
701
    vdev->config_len = qemu_get_be32(f);
702
    qemu_get_buffer(f, vdev->config, vdev->config_len);
703

    
704
    num = qemu_get_be32(f);
705

    
706
    for (i = 0; i < num; i++) {
707
        vdev->vq[i].vring.num = qemu_get_be32(f);
708
        vdev->vq[i].pa = qemu_get_be64(f);
709
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
710

    
711
        if (vdev->vq[i].pa) {
712
            virtqueue_init(&vdev->vq[i]);
713
        }
714
        if (vdev->binding->load_queue) {
715
            ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
716
            if (ret)
717
                return ret;
718
        }
719
    }
720

    
721
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
722
    return 0;
723
}
724

    
725
void virtio_cleanup(VirtIODevice *vdev)
726
{
727
    if (vdev->config)
728
        qemu_free(vdev->config);
729
    qemu_free(vdev->vq);
730
}
731

    
732
VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
733
                                 size_t config_size, size_t struct_size)
734
{
735
    VirtIODevice *vdev;
736
    int i;
737

    
738
    vdev = qemu_mallocz(struct_size);
739

    
740
    vdev->device_id = device_id;
741
    vdev->status = 0;
742
    vdev->isr = 0;
743
    vdev->queue_sel = 0;
744
    vdev->config_vector = VIRTIO_NO_VECTOR;
745
    vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
746
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
747
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
748
        vdev->vq[i].vdev = vdev;
749
    }
750

    
751
    vdev->name = name;
752
    vdev->config_len = config_size;
753
    if (vdev->config_len)
754
        vdev->config = qemu_mallocz(config_size);
755
    else
756
        vdev->config = NULL;
757

    
758
    return vdev;
759
}
760

    
761
void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
762
                        void *opaque)
763
{
764
    vdev->binding = binding;
765
    vdev->binding_opaque = opaque;
766
}
767

    
768
target_phys_addr_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
769
{
770
    return vdev->vq[n].vring.desc;
771
}
772

    
773
target_phys_addr_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
774
{
775
    return vdev->vq[n].vring.avail;
776
}
777

    
778
target_phys_addr_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
779
{
780
    return vdev->vq[n].vring.used;
781
}
782

    
783
target_phys_addr_t virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
784
{
785
    return vdev->vq[n].vring.desc;
786
}
787

    
788
target_phys_addr_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
789
{
790
    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
791
}
792

    
793
target_phys_addr_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
794
{
795
    return offsetof(VRingAvail, ring) +
796
        sizeof(uint64_t) * vdev->vq[n].vring.num;
797
}
798

    
799
target_phys_addr_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
800
{
801
    return offsetof(VRingUsed, ring) +
802
        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
803
}
804

    
805
target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
806
{
807
    return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
808
            virtio_queue_get_used_size(vdev, n);
809
}
810

    
811
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
812
{
813
    return vdev->vq[n].last_avail_idx;
814
}
815

    
816
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
817
{
818
    vdev->vq[n].last_avail_idx = idx;
819
}
820

    
821
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
822
{
823
    return vdev->vq + n;
824
}
825

    
826
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
827
{
828
    return &vq->guest_notifier;
829
}
830
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
831
{
832
    return &vq->host_notifier;
833
}