Statistics
| Branch: | Revision:

root / hw / virtio.c @ bb61564c

History | View | Annotate | Download (18.9 kB)

1
/*
2
 * Virtio Support
3
 *
4
 * Copyright IBM, Corp. 2007
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include <inttypes.h>
15

    
16
#include "virtio.h"
17
#include "sysemu.h"
18

    
19
/* The alignment to use between consumer and producer parts of vring.
20
 * x86 pagesize again. */
21
#define VIRTIO_PCI_VRING_ALIGN         4096
22

    
23
/* QEMU doesn't strictly need write barriers since everything runs in
24
 * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
25
 * KVM or if kqemu gets SMP support.
26
 * In any case, we must prevent the compiler from reordering the code.
27
 * TODO: we likely need some rmb()/mb() as well.
28
 */
29

    
30
#define wmb() __asm__ __volatile__("": : :"memory")
31

    
32
typedef struct VRingDesc
33
{
34
    uint64_t addr;
35
    uint32_t len;
36
    uint16_t flags;
37
    uint16_t next;
38
} VRingDesc;
39

    
40
typedef struct VRingAvail
41
{
42
    uint16_t flags;
43
    uint16_t idx;
44
    uint16_t ring[0];
45
} VRingAvail;
46

    
47
typedef struct VRingUsedElem
48
{
49
    uint32_t id;
50
    uint32_t len;
51
} VRingUsedElem;
52

    
53
typedef struct VRingUsed
54
{
55
    uint16_t flags;
56
    uint16_t idx;
57
    VRingUsedElem ring[0];
58
} VRingUsed;
59

    
60
typedef struct VRing
61
{
62
    unsigned int num;
63
    target_phys_addr_t desc;
64
    target_phys_addr_t avail;
65
    target_phys_addr_t used;
66
} VRing;
67

    
68
struct VirtQueue
69
{
70
    VRing vring;
71
    target_phys_addr_t pa;
72
    uint16_t last_avail_idx;
73
    int inuse;
74
    uint16_t vector;
75
    void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
76
};
77

    
78
/* virt queue functions */
79
static void virtqueue_init(VirtQueue *vq)
80
{
81
    target_phys_addr_t pa = vq->pa;
82

    
83
    vq->vring.desc = pa;
84
    vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
85
    vq->vring.used = vring_align(vq->vring.avail +
86
                                 offsetof(VRingAvail, ring[vq->vring.num]),
87
                                 VIRTIO_PCI_VRING_ALIGN);
88
}
89

    
90
static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
91
{
92
    target_phys_addr_t pa;
93
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
94
    return ldq_phys(pa);
95
}
96

    
97
static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
98
{
99
    target_phys_addr_t pa;
100
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
101
    return ldl_phys(pa);
102
}
103

    
104
static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
105
{
106
    target_phys_addr_t pa;
107
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
108
    return lduw_phys(pa);
109
}
110

    
111
static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
112
{
113
    target_phys_addr_t pa;
114
    pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
115
    return lduw_phys(pa);
116
}
117

    
118
static inline uint16_t vring_avail_flags(VirtQueue *vq)
119
{
120
    target_phys_addr_t pa;
121
    pa = vq->vring.avail + offsetof(VRingAvail, flags);
122
    return lduw_phys(pa);
123
}
124

    
125
static inline uint16_t vring_avail_idx(VirtQueue *vq)
126
{
127
    target_phys_addr_t pa;
128
    pa = vq->vring.avail + offsetof(VRingAvail, idx);
129
    return lduw_phys(pa);
130
}
131

    
132
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
133
{
134
    target_phys_addr_t pa;
135
    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
136
    return lduw_phys(pa);
137
}
138

    
139
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
140
{
141
    target_phys_addr_t pa;
142
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
143
    stl_phys(pa, val);
144
}
145

    
146
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
147
{
148
    target_phys_addr_t pa;
149
    pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
150
    stl_phys(pa, val);
151
}
152

    
153
static uint16_t vring_used_idx(VirtQueue *vq)
154
{
155
    target_phys_addr_t pa;
156
    pa = vq->vring.used + offsetof(VRingUsed, idx);
157
    return lduw_phys(pa);
158
}
159

    
160
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
161
{
162
    target_phys_addr_t pa;
163
    pa = vq->vring.used + offsetof(VRingUsed, idx);
164
    stw_phys(pa, vring_used_idx(vq) + val);
165
}
166

    
167
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
168
{
169
    target_phys_addr_t pa;
170
    pa = vq->vring.used + offsetof(VRingUsed, flags);
171
    stw_phys(pa, lduw_phys(pa) | mask);
172
}
173

    
174
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
175
{
176
    target_phys_addr_t pa;
177
    pa = vq->vring.used + offsetof(VRingUsed, flags);
178
    stw_phys(pa, lduw_phys(pa) & ~mask);
179
}
180

    
181
void virtio_queue_set_notification(VirtQueue *vq, int enable)
182
{
183
    if (enable)
184
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
185
    else
186
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
187
}
188

    
189
int virtio_queue_ready(VirtQueue *vq)
190
{
191
    return vq->vring.avail != 0;
192
}
193

    
194
int virtio_queue_empty(VirtQueue *vq)
195
{
196
    return vring_avail_idx(vq) == vq->last_avail_idx;
197
}
198

    
199
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
200
                    unsigned int len, unsigned int idx)
201
{
202
    unsigned int offset;
203
    int i;
204

    
205
    offset = 0;
206
    for (i = 0; i < elem->in_num; i++) {
207
        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
208

    
209
        cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
210
                                  elem->in_sg[i].iov_len,
211
                                  1, size);
212

    
213
        offset += elem->in_sg[i].iov_len;
214
    }
215

    
216
    for (i = 0; i < elem->out_num; i++)
217
        cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
218
                                  elem->out_sg[i].iov_len,
219
                                  0, elem->out_sg[i].iov_len);
220

    
221
    idx = (idx + vring_used_idx(vq)) % vq->vring.num;
222

    
223
    /* Get a pointer to the next entry in the used ring. */
224
    vring_used_ring_id(vq, idx, elem->index);
225
    vring_used_ring_len(vq, idx, len);
226
}
227

    
228
void virtqueue_flush(VirtQueue *vq, unsigned int count)
229
{
230
    /* Make sure buffer is written before we update index. */
231
    wmb();
232
    vring_used_idx_increment(vq, count);
233
    vq->inuse -= count;
234
}
235

    
236
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
237
                    unsigned int len)
238
{
239
    virtqueue_fill(vq, elem, len, 0);
240
    virtqueue_flush(vq, 1);
241
}
242

    
243
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
244
{
245
    uint16_t num_heads = vring_avail_idx(vq) - idx;
246

    
247
    /* Check it isn't doing very strange things with descriptor numbers. */
248
    if (num_heads > vq->vring.num) {
249
        fprintf(stderr, "Guest moved used index from %u to %u",
250
                idx, vring_avail_idx(vq));
251
        exit(1);
252
    }
253

    
254
    return num_heads;
255
}
256

    
257
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
258
{
259
    unsigned int head;
260

    
261
    /* Grab the next descriptor number they're advertising, and increment
262
     * the index we've seen. */
263
    head = vring_avail_ring(vq, idx % vq->vring.num);
264

    
265
    /* If their number is silly, that's a fatal mistake. */
266
    if (head >= vq->vring.num) {
267
        fprintf(stderr, "Guest says index %u is available", head);
268
        exit(1);
269
    }
270

    
271
    return head;
272
}
273

    
274
static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
275
                                    unsigned int i, unsigned int max)
276
{
277
    unsigned int next;
278

    
279
    /* If this descriptor says it doesn't chain, we're done. */
280
    if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
281
        return max;
282

    
283
    /* Check they're not leading us off end of descriptors. */
284
    next = vring_desc_next(desc_pa, i);
285
    /* Make sure compiler knows to grab that: we don't want it changing! */
286
    wmb();
287

    
288
    if (next >= max) {
289
        fprintf(stderr, "Desc next is %u", next);
290
        exit(1);
291
    }
292

    
293
    return next;
294
}
295

    
296
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
297
{
298
    unsigned int idx;
299
    int total_bufs, in_total, out_total;
300

    
301
    idx = vq->last_avail_idx;
302

    
303
    total_bufs = in_total = out_total = 0;
304
    while (virtqueue_num_heads(vq, idx)) {
305
        unsigned int max, num_bufs, indirect = 0;
306
        target_phys_addr_t desc_pa;
307
        int i;
308

    
309
        max = vq->vring.num;
310
        num_bufs = total_bufs;
311
        i = virtqueue_get_head(vq, idx++);
312
        desc_pa = vq->vring.desc;
313

    
314
        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
315
            if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
316
                fprintf(stderr, "Invalid size for indirect buffer table\n");
317
                exit(1);
318
            }
319

    
320
            /* If we've got too many, that implies a descriptor loop. */
321
            if (num_bufs >= max) {
322
                fprintf(stderr, "Looped descriptor");
323
                exit(1);
324
            }
325

    
326
            /* loop over the indirect descriptor table */
327
            indirect = 1;
328
            max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
329
            num_bufs = i = 0;
330
            desc_pa = vring_desc_addr(desc_pa, i);
331
        }
332

    
333
        do {
334
            /* If we've got too many, that implies a descriptor loop. */
335
            if (++num_bufs > max) {
336
                fprintf(stderr, "Looped descriptor");
337
                exit(1);
338
            }
339

    
340
            if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
341
                if (in_bytes > 0 &&
342
                    (in_total += vring_desc_len(desc_pa, i)) >= in_bytes)
343
                    return 1;
344
            } else {
345
                if (out_bytes > 0 &&
346
                    (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
347
                    return 1;
348
            }
349
        } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
350

    
351
        if (!indirect)
352
            total_bufs = num_bufs;
353
        else
354
            total_bufs++;
355
    }
356

    
357
    return 0;
358
}
359

    
360
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
361
{
362
    unsigned int i, head, max;
363
    target_phys_addr_t desc_pa = vq->vring.desc;
364
    target_phys_addr_t len;
365

    
366
    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
367
        return 0;
368

    
369
    /* When we start there are none of either input nor output. */
370
    elem->out_num = elem->in_num = 0;
371

    
372
    max = vq->vring.num;
373

    
374
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
375

    
376
    if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
377
        if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
378
            fprintf(stderr, "Invalid size for indirect buffer table\n");
379
            exit(1);
380
        }
381

    
382
        /* loop over the indirect descriptor table */
383
        max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
384
        desc_pa = vring_desc_addr(desc_pa, i);
385
        i = 0;
386
    }
387

    
388
    do {
389
        struct iovec *sg;
390
        int is_write = 0;
391

    
392
        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
393
            elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
394
            sg = &elem->in_sg[elem->in_num++];
395
            is_write = 1;
396
        } else
397
            sg = &elem->out_sg[elem->out_num++];
398

    
399
        /* Grab the first descriptor, and check it's OK. */
400
        sg->iov_len = vring_desc_len(desc_pa, i);
401
        len = sg->iov_len;
402

    
403
        sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
404
                                               &len, is_write);
405

    
406
        if (sg->iov_base == NULL || len != sg->iov_len) {
407
            fprintf(stderr, "virtio: trying to map MMIO memory\n");
408
            exit(1);
409
        }
410

    
411
        /* If we've got too many, that implies a descriptor loop. */
412
        if ((elem->in_num + elem->out_num) > max) {
413
            fprintf(stderr, "Looped descriptor");
414
            exit(1);
415
        }
416
    } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
417

    
418
    elem->index = head;
419

    
420
    vq->inuse++;
421

    
422
    return elem->in_num + elem->out_num;
423
}
424

    
425
/* virtio device */
426
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
427
{
428
    if (vdev->binding->notify) {
429
        vdev->binding->notify(vdev->binding_opaque, vector);
430
    }
431
}
432

    
433
void virtio_update_irq(VirtIODevice *vdev)
434
{
435
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
436
}
437

    
438
void virtio_reset(void *opaque)
439
{
440
    VirtIODevice *vdev = opaque;
441
    int i;
442

    
443
    if (vdev->reset)
444
        vdev->reset(vdev);
445

    
446
    vdev->guest_features = 0;
447
    vdev->queue_sel = 0;
448
    vdev->status = 0;
449
    vdev->isr = 0;
450
    vdev->config_vector = VIRTIO_NO_VECTOR;
451
    virtio_notify_vector(vdev, vdev->config_vector);
452

    
453
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
454
        vdev->vq[i].vring.desc = 0;
455
        vdev->vq[i].vring.avail = 0;
456
        vdev->vq[i].vring.used = 0;
457
        vdev->vq[i].last_avail_idx = 0;
458
        vdev->vq[i].pa = 0;
459
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
460
    }
461
}
462

    
463
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
464
{
465
    uint8_t val;
466

    
467
    vdev->get_config(vdev, vdev->config);
468

    
469
    if (addr > (vdev->config_len - sizeof(val)))
470
        return (uint32_t)-1;
471

    
472
    memcpy(&val, vdev->config + addr, sizeof(val));
473
    return val;
474
}
475

    
476
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
477
{
478
    uint16_t val;
479

    
480
    vdev->get_config(vdev, vdev->config);
481

    
482
    if (addr > (vdev->config_len - sizeof(val)))
483
        return (uint32_t)-1;
484

    
485
    memcpy(&val, vdev->config + addr, sizeof(val));
486
    return val;
487
}
488

    
489
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
490
{
491
    uint32_t val;
492

    
493
    vdev->get_config(vdev, vdev->config);
494

    
495
    if (addr > (vdev->config_len - sizeof(val)))
496
        return (uint32_t)-1;
497

    
498
    memcpy(&val, vdev->config + addr, sizeof(val));
499
    return val;
500
}
501

    
502
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
503
{
504
    uint8_t val = data;
505

    
506
    if (addr > (vdev->config_len - sizeof(val)))
507
        return;
508

    
509
    memcpy(vdev->config + addr, &val, sizeof(val));
510

    
511
    if (vdev->set_config)
512
        vdev->set_config(vdev, vdev->config);
513
}
514

    
515
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
516
{
517
    uint16_t val = data;
518

    
519
    if (addr > (vdev->config_len - sizeof(val)))
520
        return;
521

    
522
    memcpy(vdev->config + addr, &val, sizeof(val));
523

    
524
    if (vdev->set_config)
525
        vdev->set_config(vdev, vdev->config);
526
}
527

    
528
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
529
{
530
    uint32_t val = data;
531

    
532
    if (addr > (vdev->config_len - sizeof(val)))
533
        return;
534

    
535
    memcpy(vdev->config + addr, &val, sizeof(val));
536

    
537
    if (vdev->set_config)
538
        vdev->set_config(vdev, vdev->config);
539
}
540

    
541
void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr)
542
{
543
    vdev->vq[n].pa = addr;
544
    virtqueue_init(&vdev->vq[n]);
545
}
546

    
547
target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
548
{
549
    return vdev->vq[n].pa;
550
}
551

    
552
int virtio_queue_get_num(VirtIODevice *vdev, int n)
553
{
554
    return vdev->vq[n].vring.num;
555
}
556

    
557
void virtio_queue_notify(VirtIODevice *vdev, int n)
558
{
559
    if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) {
560
        vdev->vq[n].handle_output(vdev, &vdev->vq[n]);
561
    }
562
}
563

    
564
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
565
{
566
    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
567
        VIRTIO_NO_VECTOR;
568
}
569

    
570
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
571
{
572
    if (n < VIRTIO_PCI_QUEUE_MAX)
573
        vdev->vq[n].vector = vector;
574
}
575

    
576
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
577
                            void (*handle_output)(VirtIODevice *, VirtQueue *))
578
{
579
    int i;
580

    
581
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
582
        if (vdev->vq[i].vring.num == 0)
583
            break;
584
    }
585

    
586
    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
587
        abort();
588

    
589
    vdev->vq[i].vring.num = queue_size;
590
    vdev->vq[i].handle_output = handle_output;
591

    
592
    return &vdev->vq[i];
593
}
594

    
595
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
596
{
597
    /* Always notify when queue is empty (when feature acknowledge) */
598
    if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
599
        (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
600
         (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
601
        return;
602

    
603
    vdev->isr |= 0x01;
604
    virtio_notify_vector(vdev, vq->vector);
605
}
606

    
607
void virtio_notify_config(VirtIODevice *vdev)
608
{
609
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
610
        return;
611

    
612
    vdev->isr |= 0x03;
613
    virtio_notify_vector(vdev, vdev->config_vector);
614
}
615

    
616
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
617
{
618
    int i;
619

    
620
    if (vdev->binding->save_config)
621
        vdev->binding->save_config(vdev->binding_opaque, f);
622

    
623
    qemu_put_8s(f, &vdev->status);
624
    qemu_put_8s(f, &vdev->isr);
625
    qemu_put_be16s(f, &vdev->queue_sel);
626
    qemu_put_be32s(f, &vdev->guest_features);
627
    qemu_put_be32(f, vdev->config_len);
628
    qemu_put_buffer(f, vdev->config, vdev->config_len);
629

    
630
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
631
        if (vdev->vq[i].vring.num == 0)
632
            break;
633
    }
634

    
635
    qemu_put_be32(f, i);
636

    
637
    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
638
        if (vdev->vq[i].vring.num == 0)
639
            break;
640

    
641
        qemu_put_be32(f, vdev->vq[i].vring.num);
642
        qemu_put_be64(f, vdev->vq[i].pa);
643
        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
644
        if (vdev->binding->save_queue)
645
            vdev->binding->save_queue(vdev->binding_opaque, i, f);
646
    }
647
}
648

    
649
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
650
{
651
    int num, i, ret;
652
    uint32_t features;
653
    uint32_t supported_features =
654
        vdev->binding->get_features(vdev->binding_opaque);
655

    
656
    if (vdev->binding->load_config) {
657
        ret = vdev->binding->load_config(vdev->binding_opaque, f);
658
        if (ret)
659
            return ret;
660
    }
661

    
662
    qemu_get_8s(f, &vdev->status);
663
    qemu_get_8s(f, &vdev->isr);
664
    qemu_get_be16s(f, &vdev->queue_sel);
665
    qemu_get_be32s(f, &features);
666
    if (features & ~supported_features) {
667
        fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
668
                features, supported_features);
669
        return -1;
670
    }
671
    vdev->guest_features = features;
672
    vdev->config_len = qemu_get_be32(f);
673
    qemu_get_buffer(f, vdev->config, vdev->config_len);
674

    
675
    num = qemu_get_be32(f);
676

    
677
    for (i = 0; i < num; i++) {
678
        vdev->vq[i].vring.num = qemu_get_be32(f);
679
        vdev->vq[i].pa = qemu_get_be64(f);
680
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
681

    
682
        if (vdev->vq[i].pa) {
683
            virtqueue_init(&vdev->vq[i]);
684
        }
685
        if (vdev->binding->load_queue) {
686
            ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
687
            if (ret)
688
                return ret;
689
        }
690
    }
691

    
692
    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
693
    return 0;
694
}
695

    
696
void virtio_cleanup(VirtIODevice *vdev)
697
{
698
    if (vdev->config)
699
        qemu_free(vdev->config);
700
    qemu_free(vdev->vq);
701
}
702

    
703
VirtIODevice *virtio_common_init(const char *name, uint16_t device_id,
704
                                 size_t config_size, size_t struct_size)
705
{
706
    VirtIODevice *vdev;
707
    int i;
708

    
709
    vdev = qemu_mallocz(struct_size);
710

    
711
    vdev->device_id = device_id;
712
    vdev->status = 0;
713
    vdev->isr = 0;
714
    vdev->queue_sel = 0;
715
    vdev->config_vector = VIRTIO_NO_VECTOR;
716
    vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
717
    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++)
718
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
719

    
720
    vdev->name = name;
721
    vdev->config_len = config_size;
722
    if (vdev->config_len)
723
        vdev->config = qemu_mallocz(config_size);
724
    else
725
        vdev->config = NULL;
726

    
727
    return vdev;
728
}
729

    
730
void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding,
731
                        void *opaque)
732
{
733
    vdev->binding = binding;
734
    vdev->binding_opaque = opaque;
735
}