Statistics
| Branch: | Revision:

root / hw / vhost.c @ 54dd9321

History | View | Annotate | Download (22.2 kB)

1
/*
2
 * vhost support
3
 *
4
 * Copyright Red Hat, Inc. 2010
5
 *
6
 * Authors:
7
 *  Michael S. Tsirkin <mst@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 */
12

    
13
#include <sys/ioctl.h>
14
#include "vhost.h"
15
#include "hw/hw.h"
16
#include "range.h"
17
#include <linux/vhost.h>
18

    
19
static void vhost_dev_sync_region(struct vhost_dev *dev,
20
                                  uint64_t mfirst, uint64_t mlast,
21
                                  uint64_t rfirst, uint64_t rlast)
22
{
23
    uint64_t start = MAX(mfirst, rfirst);
24
    uint64_t end = MIN(mlast, rlast);
25
    vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
26
    vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
27
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
28

    
29
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
30
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
31
    if (end < start) {
32
        return;
33
    }
34
    for (;from < to; ++from) {
35
        vhost_log_chunk_t log;
36
        int bit;
37
        /* We first check with non-atomic: much cheaper,
38
         * and we expect non-dirty to be the common case. */
39
        if (!*from) {
40
            continue;
41
        }
42
        /* Data must be read atomically. We don't really
43
         * need the barrier semantics of __sync
44
         * builtins, but it's easier to use them than
45
         * roll our own. */
46
        log = __sync_fetch_and_and(from, 0);
47
        while ((bit = sizeof(log) > sizeof(int) ?
48
                ffsll(log) : ffs(log))) {
49
            bit -= 1;
50
            cpu_physical_memory_set_dirty(addr + bit * VHOST_LOG_PAGE);
51
            log &= ~(0x1ull << bit);
52
        }
53
        addr += VHOST_LOG_CHUNK;
54
    }
55
}
56

    
57
static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client,
58
                                          target_phys_addr_t start_addr,
59
                                          target_phys_addr_t end_addr)
60
{
61
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
62
    int i;
63
    if (!dev->log_enabled || !dev->started) {
64
        return 0;
65
    }
66
    for (i = 0; i < dev->mem->nregions; ++i) {
67
        struct vhost_memory_region *reg = dev->mem->regions + i;
68
        vhost_dev_sync_region(dev, start_addr, end_addr,
69
                              reg->guest_phys_addr,
70
                              range_get_last(reg->guest_phys_addr,
71
                                             reg->memory_size));
72
    }
73
    for (i = 0; i < dev->nvqs; ++i) {
74
        struct vhost_virtqueue *vq = dev->vqs + i;
75
        vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys,
76
                              range_get_last(vq->used_phys, vq->used_size));
77
    }
78
    return 0;
79
}
80

    
81
/* Assign/unassign. Keep an unsorted array of non-overlapping
82
 * memory regions in dev->mem. */
83
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
84
                                      uint64_t start_addr,
85
                                      uint64_t size)
86
{
87
    int from, to, n = dev->mem->nregions;
88
    /* Track overlapping/split regions for sanity checking. */
89
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
90

    
91
    for (from = 0, to = 0; from < n; ++from, ++to) {
92
        struct vhost_memory_region *reg = dev->mem->regions + to;
93
        uint64_t reglast;
94
        uint64_t memlast;
95
        uint64_t change;
96

    
97
        /* clone old region */
98
        if (to != from) {
99
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
100
        }
101

    
102
        /* No overlap is simple */
103
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
104
                            start_addr, size)) {
105
            continue;
106
        }
107

    
108
        /* Split only happens if supplied region
109
         * is in the middle of an existing one. Thus it can not
110
         * overlap with any other existing region. */
111
        assert(!split);
112

    
113
        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
114
        memlast = range_get_last(start_addr, size);
115

    
116
        /* Remove whole region */
117
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
118
            --dev->mem->nregions;
119
            --to;
120
            assert(to >= 0);
121
            ++overlap_middle;
122
            continue;
123
        }
124

    
125
        /* Shrink region */
126
        if (memlast >= reglast) {
127
            reg->memory_size = start_addr - reg->guest_phys_addr;
128
            assert(reg->memory_size);
129
            assert(!overlap_end);
130
            ++overlap_end;
131
            continue;
132
        }
133

    
134
        /* Shift region */
135
        if (start_addr <= reg->guest_phys_addr) {
136
            change = memlast + 1 - reg->guest_phys_addr;
137
            reg->memory_size -= change;
138
            reg->guest_phys_addr += change;
139
            reg->userspace_addr += change;
140
            assert(reg->memory_size);
141
            assert(!overlap_start);
142
            ++overlap_start;
143
            continue;
144
        }
145

    
146
        /* This only happens if supplied region
147
         * is in the middle of an existing one. Thus it can not
148
         * overlap with any other existing region. */
149
        assert(!overlap_start);
150
        assert(!overlap_end);
151
        assert(!overlap_middle);
152
        /* Split region: shrink first part, shift second part. */
153
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
154
        reg->memory_size = start_addr - reg->guest_phys_addr;
155
        assert(reg->memory_size);
156
        change = memlast + 1 - reg->guest_phys_addr;
157
        reg = dev->mem->regions + n;
158
        reg->memory_size -= change;
159
        assert(reg->memory_size);
160
        reg->guest_phys_addr += change;
161
        reg->userspace_addr += change;
162
        /* Never add more than 1 region */
163
        assert(dev->mem->nregions == n);
164
        ++dev->mem->nregions;
165
        ++split;
166
    }
167
}
168

    
169
/* Called after unassign, so no regions overlap the given range. */
170
static void vhost_dev_assign_memory(struct vhost_dev *dev,
171
                                    uint64_t start_addr,
172
                                    uint64_t size,
173
                                    uint64_t uaddr)
174
{
175
    int from, to;
176
    struct vhost_memory_region *merged = NULL;
177
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
178
        struct vhost_memory_region *reg = dev->mem->regions + to;
179
        uint64_t prlast, urlast;
180
        uint64_t pmlast, umlast;
181
        uint64_t s, e, u;
182

    
183
        /* clone old region */
184
        if (to != from) {
185
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
186
        }
187
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
188
        pmlast = range_get_last(start_addr, size);
189
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
190
        umlast = range_get_last(uaddr, size);
191

    
192
        /* check for overlapping regions: should never happen. */
193
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
194
        /* Not an adjacent or overlapping region - do not merge. */
195
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
196
            (pmlast + 1 != reg->guest_phys_addr ||
197
             umlast + 1 != reg->userspace_addr)) {
198
            continue;
199
        }
200

    
201
        if (merged) {
202
            --to;
203
            assert(to >= 0);
204
        } else {
205
            merged = reg;
206
        }
207
        u = MIN(uaddr, reg->userspace_addr);
208
        s = MIN(start_addr, reg->guest_phys_addr);
209
        e = MAX(pmlast, prlast);
210
        uaddr = merged->userspace_addr = u;
211
        start_addr = merged->guest_phys_addr = s;
212
        size = merged->memory_size = e - s + 1;
213
        assert(merged->memory_size);
214
    }
215

    
216
    if (!merged) {
217
        struct vhost_memory_region *reg = dev->mem->regions + to;
218
        memset(reg, 0, sizeof *reg);
219
        reg->memory_size = size;
220
        assert(reg->memory_size);
221
        reg->guest_phys_addr = start_addr;
222
        reg->userspace_addr = uaddr;
223
        ++to;
224
    }
225
    assert(to <= dev->mem->nregions + 1);
226
    dev->mem->nregions = to;
227
}
228

    
229
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
230
{
231
    uint64_t log_size = 0;
232
    int i;
233
    for (i = 0; i < dev->mem->nregions; ++i) {
234
        struct vhost_memory_region *reg = dev->mem->regions + i;
235
        uint64_t last = range_get_last(reg->guest_phys_addr,
236
                                       reg->memory_size);
237
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
238
    }
239
    for (i = 0; i < dev->nvqs; ++i) {
240
        struct vhost_virtqueue *vq = dev->vqs + i;
241
        uint64_t last = vq->used_phys + vq->used_size - 1;
242
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
243
    }
244
    return log_size;
245
}
246

    
247
static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
248
{
249
    vhost_log_chunk_t *log;
250
    uint64_t log_base;
251
    int r;
252
    if (size) {
253
        log = qemu_mallocz(size * sizeof *log);
254
    } else {
255
        log = NULL;
256
    }
257
    log_base = (uint64_t)(unsigned long)log;
258
    r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
259
    assert(r >= 0);
260
    vhost_client_sync_dirty_bitmap(&dev->client, 0,
261
                                   (target_phys_addr_t)~0x0ull);
262
    if (dev->log) {
263
        qemu_free(dev->log);
264
    }
265
    dev->log = log;
266
    dev->log_size = size;
267
}
268

    
269
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
270
                                      uint64_t start_addr,
271
                                      uint64_t size)
272
{
273
    int i;
274
    for (i = 0; i < dev->nvqs; ++i) {
275
        struct vhost_virtqueue *vq = dev->vqs + i;
276
        target_phys_addr_t l;
277
        void *p;
278

    
279
        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
280
            continue;
281
        }
282
        l = vq->ring_size;
283
        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
284
        if (!p || l != vq->ring_size) {
285
            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
286
            return -ENOMEM;
287
        }
288
        if (p != vq->ring) {
289
            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
290
            return -EBUSY;
291
        }
292
        cpu_physical_memory_unmap(p, l, 0, 0);
293
    }
294
    return 0;
295
}
296

    
297
static void vhost_client_set_memory(CPUPhysMemoryClient *client,
298
                                    target_phys_addr_t start_addr,
299
                                    ram_addr_t size,
300
                                    ram_addr_t phys_offset)
301
{
302
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
303
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
304
    int s = offsetof(struct vhost_memory, regions) +
305
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
306
    uint64_t log_size;
307
    int r;
308
    dev->mem = qemu_realloc(dev->mem, s);
309

    
310
    assert(size);
311

    
312
    vhost_dev_unassign_memory(dev, start_addr, size);
313
    if (flags == IO_MEM_RAM) {
314
        /* Add given mapping, merging adjacent regions if any */
315
        vhost_dev_assign_memory(dev, start_addr, size,
316
                                (uintptr_t)qemu_get_ram_ptr(phys_offset));
317
    } else {
318
        /* Remove old mapping for this memory, if any. */
319
        vhost_dev_unassign_memory(dev, start_addr, size);
320
    }
321

    
322
    if (!dev->started) {
323
        return;
324
    }
325

    
326
    if (dev->started) {
327
        r = vhost_verify_ring_mappings(dev, start_addr, size);
328
        assert(r >= 0);
329
    }
330

    
331
    if (!dev->log_enabled) {
332
        r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
333
        assert(r >= 0);
334
        return;
335
    }
336
    log_size = vhost_get_log_size(dev);
337
    /* We allocate an extra 4K bytes to log,
338
     * to reduce the * number of reallocations. */
339
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
340
    /* To log more, must increase log size before table update. */
341
    if (dev->log_size < log_size) {
342
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
343
    }
344
    r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
345
    assert(r >= 0);
346
    /* To log less, can only decrease log size after table update. */
347
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
348
        vhost_dev_log_resize(dev, log_size);
349
    }
350
}
351

    
352
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
353
                                    struct vhost_virtqueue *vq,
354
                                    unsigned idx, bool enable_log)
355
{
356
    struct vhost_vring_addr addr = {
357
        .index = idx,
358
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
359
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
360
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
361
        .log_guest_addr = vq->used_phys,
362
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
363
    };
364
    int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
365
    if (r < 0) {
366
        return -errno;
367
    }
368
    return 0;
369
}
370

    
371
static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
372
{
373
    uint64_t features = dev->acked_features;
374
    int r;
375
    if (enable_log) {
376
        features |= 0x1 << VHOST_F_LOG_ALL;
377
    }
378
    r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
379
    return r < 0 ? -errno : 0;
380
}
381

    
382
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
383
{
384
    int r, t, i;
385
    r = vhost_dev_set_features(dev, enable_log);
386
    if (r < 0) {
387
        goto err_features;
388
    }
389
    for (i = 0; i < dev->nvqs; ++i) {
390
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
391
                                     enable_log);
392
        if (r < 0) {
393
            goto err_vq;
394
        }
395
    }
396
    return 0;
397
err_vq:
398
    for (; i >= 0; --i) {
399
        t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
400
                                     dev->log_enabled);
401
        assert(t >= 0);
402
    }
403
    t = vhost_dev_set_features(dev, dev->log_enabled);
404
    assert(t >= 0);
405
err_features:
406
    return r;
407
}
408

    
409
static int vhost_client_migration_log(CPUPhysMemoryClient *client,
410
                                      int enable)
411
{
412
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
413
    int r;
414
    if (!!enable == dev->log_enabled) {
415
        return 0;
416
    }
417
    if (!dev->started) {
418
        dev->log_enabled = enable;
419
        return 0;
420
    }
421
    if (!enable) {
422
        r = vhost_dev_set_log(dev, false);
423
        if (r < 0) {
424
            return r;
425
        }
426
        if (dev->log) {
427
            qemu_free(dev->log);
428
        }
429
        dev->log = NULL;
430
        dev->log_size = 0;
431
    } else {
432
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
433
        r = vhost_dev_set_log(dev, true);
434
        if (r < 0) {
435
            return r;
436
        }
437
    }
438
    dev->log_enabled = enable;
439
    return 0;
440
}
441

    
442
static int vhost_virtqueue_init(struct vhost_dev *dev,
443
                                struct VirtIODevice *vdev,
444
                                struct vhost_virtqueue *vq,
445
                                unsigned idx)
446
{
447
    target_phys_addr_t s, l, a;
448
    int r;
449
    struct vhost_vring_file file = {
450
        .index = idx,
451
    };
452
    struct vhost_vring_state state = {
453
        .index = idx,
454
    };
455
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
456

    
457
    if (!vdev->binding->set_host_notifier) {
458
        fprintf(stderr, "binding does not support host notifiers\n");
459
        return -ENOSYS;
460
    }
461

    
462
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
463
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
464
    if (r) {
465
        return -errno;
466
    }
467

    
468
    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
469
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
470
    if (r) {
471
        return -errno;
472
    }
473

    
474
    s = l = virtio_queue_get_desc_size(vdev, idx);
475
    a = virtio_queue_get_desc_addr(vdev, idx);
476
    vq->desc = cpu_physical_memory_map(a, &l, 0);
477
    if (!vq->desc || l != s) {
478
        r = -ENOMEM;
479
        goto fail_alloc_desc;
480
    }
481
    s = l = virtio_queue_get_avail_size(vdev, idx);
482
    a = virtio_queue_get_avail_addr(vdev, idx);
483
    vq->avail = cpu_physical_memory_map(a, &l, 0);
484
    if (!vq->avail || l != s) {
485
        r = -ENOMEM;
486
        goto fail_alloc_avail;
487
    }
488
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
489
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
490
    vq->used = cpu_physical_memory_map(a, &l, 1);
491
    if (!vq->used || l != s) {
492
        r = -ENOMEM;
493
        goto fail_alloc_used;
494
    }
495

    
496
    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
497
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
498
    vq->ring = cpu_physical_memory_map(a, &l, 1);
499
    if (!vq->ring || l != s) {
500
        r = -ENOMEM;
501
        goto fail_alloc_ring;
502
    }
503

    
504
    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
505
    if (r < 0) {
506
        r = -errno;
507
        goto fail_alloc;
508
    }
509
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
510
    if (r < 0) {
511
        fprintf(stderr, "Error binding host notifier: %d\n", -r);
512
        goto fail_host_notifier;
513
    }
514

    
515
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
516
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
517
    if (r) {
518
        goto fail_kick;
519
    }
520

    
521
    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
522
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
523
    if (r) {
524
        goto fail_call;
525
    }
526

    
527
    return 0;
528

    
529
fail_call:
530
fail_kick:
531
    vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
532
fail_host_notifier:
533
fail_alloc:
534
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
535
                              0, 0);
536
fail_alloc_ring:
537
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
538
                              0, 0);
539
fail_alloc_used:
540
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
541
                              0, 0);
542
fail_alloc_avail:
543
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
544
                              0, 0);
545
fail_alloc_desc:
546
    return r;
547
}
548

    
549
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
550
                                    struct VirtIODevice *vdev,
551
                                    struct vhost_virtqueue *vq,
552
                                    unsigned idx)
553
{
554
    struct vhost_vring_state state = {
555
        .index = idx,
556
    };
557
    int r;
558
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
559
    if (r < 0) {
560
        fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
561
        fflush(stderr);
562
    }
563
    assert (r >= 0);
564
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
565
    if (r < 0) {
566
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
567
        fflush(stderr);
568
    }
569
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
570
    assert (r >= 0);
571
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
572
                              0, virtio_queue_get_ring_size(vdev, idx));
573
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
574
                              1, virtio_queue_get_used_size(vdev, idx));
575
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
576
                              0, virtio_queue_get_avail_size(vdev, idx));
577
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
578
                              0, virtio_queue_get_desc_size(vdev, idx));
579
}
580

    
581
int vhost_dev_init(struct vhost_dev *hdev, int devfd)
582
{
583
    uint64_t features;
584
    int r;
585
    if (devfd >= 0) {
586
        hdev->control = devfd;
587
    } else {
588
        hdev->control = open("/dev/vhost-net", O_RDWR);
589
        if (hdev->control < 0) {
590
            return -errno;
591
        }
592
    }
593
    r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
594
    if (r < 0) {
595
        goto fail;
596
    }
597

    
598
    r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
599
    if (r < 0) {
600
        goto fail;
601
    }
602
    hdev->features = features;
603

    
604
    hdev->client.set_memory = vhost_client_set_memory;
605
    hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap;
606
    hdev->client.migration_log = vhost_client_migration_log;
607
    hdev->mem = qemu_mallocz(offsetof(struct vhost_memory, regions));
608
    hdev->log = NULL;
609
    hdev->log_size = 0;
610
    hdev->log_enabled = false;
611
    hdev->started = false;
612
    cpu_register_phys_memory_client(&hdev->client);
613
    return 0;
614
fail:
615
    r = -errno;
616
    close(hdev->control);
617
    return r;
618
}
619

    
620
void vhost_dev_cleanup(struct vhost_dev *hdev)
621
{
622
    cpu_unregister_phys_memory_client(&hdev->client);
623
    qemu_free(hdev->mem);
624
    close(hdev->control);
625
}
626

    
627
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
628
{
629
    int i, r;
630
    if (!vdev->binding->set_guest_notifiers) {
631
        fprintf(stderr, "binding does not support guest notifiers\n");
632
        r = -ENOSYS;
633
        goto fail;
634
    }
635

    
636
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
637
    if (r < 0) {
638
        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
639
        goto fail_notifiers;
640
    }
641

    
642
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
643
    if (r < 0) {
644
        goto fail_features;
645
    }
646
    r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
647
    if (r < 0) {
648
        r = -errno;
649
        goto fail_mem;
650
    }
651
    for (i = 0; i < hdev->nvqs; ++i) {
652
        r = vhost_virtqueue_init(hdev,
653
                                 vdev,
654
                                 hdev->vqs + i,
655
                                 i);
656
        if (r < 0) {
657
            goto fail_vq;
658
        }
659
    }
660

    
661
    if (hdev->log_enabled) {
662
        hdev->log_size = vhost_get_log_size(hdev);
663
        hdev->log = hdev->log_size ?
664
            qemu_mallocz(hdev->log_size * sizeof *hdev->log) : NULL;
665
        r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
666
                  (uint64_t)(unsigned long)hdev->log);
667
        if (r < 0) {
668
            r = -errno;
669
            goto fail_log;
670
        }
671
    }
672

    
673
    hdev->started = true;
674

    
675
    return 0;
676
fail_log:
677
fail_vq:
678
    while (--i >= 0) {
679
        vhost_virtqueue_cleanup(hdev,
680
                                vdev,
681
                                hdev->vqs + i,
682
                                i);
683
    }
684
fail_mem:
685
fail_features:
686
    vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
687
fail_notifiers:
688
fail:
689
    return r;
690
}
691

    
692
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
693
{
694
    int i, r;
695

    
696
    for (i = 0; i < hdev->nvqs; ++i) {
697
        vhost_virtqueue_cleanup(hdev,
698
                                vdev,
699
                                hdev->vqs + i,
700
                                i);
701
    }
702
    vhost_client_sync_dirty_bitmap(&hdev->client, 0,
703
                                   (target_phys_addr_t)~0x0ull);
704
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
705
    if (r < 0) {
706
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
707
        fflush(stderr);
708
    }
709
    assert (r >= 0);
710

    
711
    hdev->started = false;
712
    qemu_free(hdev->log);
713
    hdev->log_size = 0;
714
}