Statistics
| Branch: | Revision:

root / hw / vhost.c @ df182043

History | View | Annotate | Download (25.4 kB)

1
/*
2
 * vhost support
3
 *
4
 * Copyright Red Hat, Inc. 2010
5
 *
6
 * Authors:
7
 *  Michael S. Tsirkin <mst@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 */
12

    
13
#include <sys/ioctl.h>
14
#include "vhost.h"
15
#include "hw/hw.h"
16
#include "range.h"
17
#include <linux/vhost.h>
18

    
19
static void vhost_dev_sync_region(struct vhost_dev *dev,
20
                                  uint64_t mfirst, uint64_t mlast,
21
                                  uint64_t rfirst, uint64_t rlast)
22
{
23
    uint64_t start = MAX(mfirst, rfirst);
24
    uint64_t end = MIN(mlast, rlast);
25
    vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
26
    vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
27
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
28

    
29
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
30
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
31
    if (end < start) {
32
        return;
33
    }
34
    for (;from < to; ++from) {
35
        vhost_log_chunk_t log;
36
        int bit;
37
        /* We first check with non-atomic: much cheaper,
38
         * and we expect non-dirty to be the common case. */
39
        if (!*from) {
40
            addr += VHOST_LOG_CHUNK;
41
            continue;
42
        }
43
        /* Data must be read atomically. We don't really
44
         * need the barrier semantics of __sync
45
         * builtins, but it's easier to use them than
46
         * roll our own. */
47
        log = __sync_fetch_and_and(from, 0);
48
        while ((bit = sizeof(log) > sizeof(int) ?
49
                ffsll(log) : ffs(log))) {
50
            ram_addr_t ram_addr;
51
            bit -= 1;
52
            ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE);
53
            cpu_physical_memory_set_dirty(ram_addr);
54
            log &= ~(0x1ull << bit);
55
        }
56
        addr += VHOST_LOG_CHUNK;
57
    }
58
}
59

    
60
static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client,
61
                                          target_phys_addr_t start_addr,
62
                                          target_phys_addr_t end_addr)
63
{
64
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
65
    int i;
66
    if (!dev->log_enabled || !dev->started) {
67
        return 0;
68
    }
69
    for (i = 0; i < dev->mem->nregions; ++i) {
70
        struct vhost_memory_region *reg = dev->mem->regions + i;
71
        vhost_dev_sync_region(dev, start_addr, end_addr,
72
                              reg->guest_phys_addr,
73
                              range_get_last(reg->guest_phys_addr,
74
                                             reg->memory_size));
75
    }
76
    for (i = 0; i < dev->nvqs; ++i) {
77
        struct vhost_virtqueue *vq = dev->vqs + i;
78
        vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys,
79
                              range_get_last(vq->used_phys, vq->used_size));
80
    }
81
    return 0;
82
}
83

    
84
/* Assign/unassign. Keep an unsorted array of non-overlapping
85
 * memory regions in dev->mem. */
86
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
87
                                      uint64_t start_addr,
88
                                      uint64_t size)
89
{
90
    int from, to, n = dev->mem->nregions;
91
    /* Track overlapping/split regions for sanity checking. */
92
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
93

    
94
    for (from = 0, to = 0; from < n; ++from, ++to) {
95
        struct vhost_memory_region *reg = dev->mem->regions + to;
96
        uint64_t reglast;
97
        uint64_t memlast;
98
        uint64_t change;
99

    
100
        /* clone old region */
101
        if (to != from) {
102
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
103
        }
104

    
105
        /* No overlap is simple */
106
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
107
                            start_addr, size)) {
108
            continue;
109
        }
110

    
111
        /* Split only happens if supplied region
112
         * is in the middle of an existing one. Thus it can not
113
         * overlap with any other existing region. */
114
        assert(!split);
115

    
116
        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
117
        memlast = range_get_last(start_addr, size);
118

    
119
        /* Remove whole region */
120
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
121
            --dev->mem->nregions;
122
            --to;
123
            ++overlap_middle;
124
            continue;
125
        }
126

    
127
        /* Shrink region */
128
        if (memlast >= reglast) {
129
            reg->memory_size = start_addr - reg->guest_phys_addr;
130
            assert(reg->memory_size);
131
            assert(!overlap_end);
132
            ++overlap_end;
133
            continue;
134
        }
135

    
136
        /* Shift region */
137
        if (start_addr <= reg->guest_phys_addr) {
138
            change = memlast + 1 - reg->guest_phys_addr;
139
            reg->memory_size -= change;
140
            reg->guest_phys_addr += change;
141
            reg->userspace_addr += change;
142
            assert(reg->memory_size);
143
            assert(!overlap_start);
144
            ++overlap_start;
145
            continue;
146
        }
147

    
148
        /* This only happens if supplied region
149
         * is in the middle of an existing one. Thus it can not
150
         * overlap with any other existing region. */
151
        assert(!overlap_start);
152
        assert(!overlap_end);
153
        assert(!overlap_middle);
154
        /* Split region: shrink first part, shift second part. */
155
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
156
        reg->memory_size = start_addr - reg->guest_phys_addr;
157
        assert(reg->memory_size);
158
        change = memlast + 1 - reg->guest_phys_addr;
159
        reg = dev->mem->regions + n;
160
        reg->memory_size -= change;
161
        assert(reg->memory_size);
162
        reg->guest_phys_addr += change;
163
        reg->userspace_addr += change;
164
        /* Never add more than 1 region */
165
        assert(dev->mem->nregions == n);
166
        ++dev->mem->nregions;
167
        ++split;
168
    }
169
}
170

    
171
/* Called after unassign, so no regions overlap the given range. */
172
static void vhost_dev_assign_memory(struct vhost_dev *dev,
173
                                    uint64_t start_addr,
174
                                    uint64_t size,
175
                                    uint64_t uaddr)
176
{
177
    int from, to;
178
    struct vhost_memory_region *merged = NULL;
179
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
180
        struct vhost_memory_region *reg = dev->mem->regions + to;
181
        uint64_t prlast, urlast;
182
        uint64_t pmlast, umlast;
183
        uint64_t s, e, u;
184

    
185
        /* clone old region */
186
        if (to != from) {
187
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
188
        }
189
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
190
        pmlast = range_get_last(start_addr, size);
191
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
192
        umlast = range_get_last(uaddr, size);
193

    
194
        /* check for overlapping regions: should never happen. */
195
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
196
        /* Not an adjacent or overlapping region - do not merge. */
197
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
198
            (pmlast + 1 != reg->guest_phys_addr ||
199
             umlast + 1 != reg->userspace_addr)) {
200
            continue;
201
        }
202

    
203
        if (merged) {
204
            --to;
205
            assert(to >= 0);
206
        } else {
207
            merged = reg;
208
        }
209
        u = MIN(uaddr, reg->userspace_addr);
210
        s = MIN(start_addr, reg->guest_phys_addr);
211
        e = MAX(pmlast, prlast);
212
        uaddr = merged->userspace_addr = u;
213
        start_addr = merged->guest_phys_addr = s;
214
        size = merged->memory_size = e - s + 1;
215
        assert(merged->memory_size);
216
    }
217

    
218
    if (!merged) {
219
        struct vhost_memory_region *reg = dev->mem->regions + to;
220
        memset(reg, 0, sizeof *reg);
221
        reg->memory_size = size;
222
        assert(reg->memory_size);
223
        reg->guest_phys_addr = start_addr;
224
        reg->userspace_addr = uaddr;
225
        ++to;
226
    }
227
    assert(to <= dev->mem->nregions + 1);
228
    dev->mem->nregions = to;
229
}
230

    
231
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
232
{
233
    uint64_t log_size = 0;
234
    int i;
235
    for (i = 0; i < dev->mem->nregions; ++i) {
236
        struct vhost_memory_region *reg = dev->mem->regions + i;
237
        uint64_t last = range_get_last(reg->guest_phys_addr,
238
                                       reg->memory_size);
239
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
240
    }
241
    for (i = 0; i < dev->nvqs; ++i) {
242
        struct vhost_virtqueue *vq = dev->vqs + i;
243
        uint64_t last = vq->used_phys + vq->used_size - 1;
244
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
245
    }
246
    return log_size;
247
}
248

    
249
static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
250
{
251
    vhost_log_chunk_t *log;
252
    uint64_t log_base;
253
    int r;
254
    if (size) {
255
        log = g_malloc0(size * sizeof *log);
256
    } else {
257
        log = NULL;
258
    }
259
    log_base = (uint64_t)(unsigned long)log;
260
    r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
261
    assert(r >= 0);
262
    vhost_client_sync_dirty_bitmap(&dev->client, 0,
263
                                   (target_phys_addr_t)~0x0ull);
264
    if (dev->log) {
265
        g_free(dev->log);
266
    }
267
    dev->log = log;
268
    dev->log_size = size;
269
}
270

    
271
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
272
                                      uint64_t start_addr,
273
                                      uint64_t size)
274
{
275
    int i;
276
    for (i = 0; i < dev->nvqs; ++i) {
277
        struct vhost_virtqueue *vq = dev->vqs + i;
278
        target_phys_addr_t l;
279
        void *p;
280

    
281
        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
282
            continue;
283
        }
284
        l = vq->ring_size;
285
        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
286
        if (!p || l != vq->ring_size) {
287
            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
288
            return -ENOMEM;
289
        }
290
        if (p != vq->ring) {
291
            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
292
            return -EBUSY;
293
        }
294
        cpu_physical_memory_unmap(p, l, 0, 0);
295
    }
296
    return 0;
297
}
298

    
299
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
300
                                                      uint64_t start_addr,
301
                                                      uint64_t size)
302
{
303
    int i, n = dev->mem->nregions;
304
    for (i = 0; i < n; ++i) {
305
        struct vhost_memory_region *reg = dev->mem->regions + i;
306
        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
307
                           start_addr, size)) {
308
            return reg;
309
        }
310
    }
311
    return NULL;
312
}
313

    
314
static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
315
                                 uint64_t start_addr,
316
                                 uint64_t size,
317
                                 uint64_t uaddr)
318
{
319
    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
320
    uint64_t reglast;
321
    uint64_t memlast;
322

    
323
    if (!reg) {
324
        return true;
325
    }
326

    
327
    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
328
    memlast = range_get_last(start_addr, size);
329

    
330
    /* Need to extend region? */
331
    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
332
        return true;
333
    }
334
    /* userspace_addr changed? */
335
    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
336
}
337

    
338
static void vhost_client_set_memory(CPUPhysMemoryClient *client,
339
                                    target_phys_addr_t start_addr,
340
                                    ram_addr_t size,
341
                                    ram_addr_t phys_offset,
342
                                    bool log_dirty)
343
{
344
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
345
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
346
    int s = offsetof(struct vhost_memory, regions) +
347
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
348
    uint64_t log_size;
349
    int r;
350

    
351
    dev->mem = g_realloc(dev->mem, s);
352

    
353
    if (log_dirty) {
354
        flags = IO_MEM_UNASSIGNED;
355
    }
356

    
357
    assert(size);
358

    
359
    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
360
    if (flags == IO_MEM_RAM) {
361
        if (!vhost_dev_cmp_memory(dev, start_addr, size,
362
                                  (uintptr_t)qemu_get_ram_ptr(phys_offset))) {
363
            /* Region exists with same address. Nothing to do. */
364
            return;
365
        }
366
    } else {
367
        if (!vhost_dev_find_reg(dev, start_addr, size)) {
368
            /* Removing region that we don't access. Nothing to do. */
369
            return;
370
        }
371
    }
372

    
373
    vhost_dev_unassign_memory(dev, start_addr, size);
374
    if (flags == IO_MEM_RAM) {
375
        /* Add given mapping, merging adjacent regions if any */
376
        vhost_dev_assign_memory(dev, start_addr, size,
377
                                (uintptr_t)qemu_get_ram_ptr(phys_offset));
378
    } else {
379
        /* Remove old mapping for this memory, if any. */
380
        vhost_dev_unassign_memory(dev, start_addr, size);
381
    }
382

    
383
    if (!dev->started) {
384
        return;
385
    }
386

    
387
    if (dev->started) {
388
        r = vhost_verify_ring_mappings(dev, start_addr, size);
389
        assert(r >= 0);
390
    }
391

    
392
    if (!dev->log_enabled) {
393
        r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
394
        assert(r >= 0);
395
        return;
396
    }
397
    log_size = vhost_get_log_size(dev);
398
    /* We allocate an extra 4K bytes to log,
399
     * to reduce the * number of reallocations. */
400
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
401
    /* To log more, must increase log size before table update. */
402
    if (dev->log_size < log_size) {
403
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
404
    }
405
    r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
406
    assert(r >= 0);
407
    /* To log less, can only decrease log size after table update. */
408
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
409
        vhost_dev_log_resize(dev, log_size);
410
    }
411
}
412

    
413
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
414
                                    struct vhost_virtqueue *vq,
415
                                    unsigned idx, bool enable_log)
416
{
417
    struct vhost_vring_addr addr = {
418
        .index = idx,
419
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
420
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
421
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
422
        .log_guest_addr = vq->used_phys,
423
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
424
    };
425
    int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
426
    if (r < 0) {
427
        return -errno;
428
    }
429
    return 0;
430
}
431

    
432
static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
433
{
434
    uint64_t features = dev->acked_features;
435
    int r;
436
    if (enable_log) {
437
        features |= 0x1 << VHOST_F_LOG_ALL;
438
    }
439
    r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
440
    return r < 0 ? -errno : 0;
441
}
442

    
443
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
444
{
445
    int r, t, i;
446
    r = vhost_dev_set_features(dev, enable_log);
447
    if (r < 0) {
448
        goto err_features;
449
    }
450
    for (i = 0; i < dev->nvqs; ++i) {
451
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
452
                                     enable_log);
453
        if (r < 0) {
454
            goto err_vq;
455
        }
456
    }
457
    return 0;
458
err_vq:
459
    for (; i >= 0; --i) {
460
        t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
461
                                     dev->log_enabled);
462
        assert(t >= 0);
463
    }
464
    t = vhost_dev_set_features(dev, dev->log_enabled);
465
    assert(t >= 0);
466
err_features:
467
    return r;
468
}
469

    
470
static int vhost_client_migration_log(CPUPhysMemoryClient *client,
471
                                      int enable)
472
{
473
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
474
    int r;
475
    if (!!enable == dev->log_enabled) {
476
        return 0;
477
    }
478
    if (!dev->started) {
479
        dev->log_enabled = enable;
480
        return 0;
481
    }
482
    if (!enable) {
483
        r = vhost_dev_set_log(dev, false);
484
        if (r < 0) {
485
            return r;
486
        }
487
        if (dev->log) {
488
            g_free(dev->log);
489
        }
490
        dev->log = NULL;
491
        dev->log_size = 0;
492
    } else {
493
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
494
        r = vhost_dev_set_log(dev, true);
495
        if (r < 0) {
496
            return r;
497
        }
498
    }
499
    dev->log_enabled = enable;
500
    return 0;
501
}
502

    
503
static int vhost_virtqueue_init(struct vhost_dev *dev,
504
                                struct VirtIODevice *vdev,
505
                                struct vhost_virtqueue *vq,
506
                                unsigned idx)
507
{
508
    target_phys_addr_t s, l, a;
509
    int r;
510
    struct vhost_vring_file file = {
511
        .index = idx,
512
    };
513
    struct vhost_vring_state state = {
514
        .index = idx,
515
    };
516
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
517

    
518
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
519
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
520
    if (r) {
521
        return -errno;
522
    }
523

    
524
    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
525
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
526
    if (r) {
527
        return -errno;
528
    }
529

    
530
    s = l = virtio_queue_get_desc_size(vdev, idx);
531
    a = virtio_queue_get_desc_addr(vdev, idx);
532
    vq->desc = cpu_physical_memory_map(a, &l, 0);
533
    if (!vq->desc || l != s) {
534
        r = -ENOMEM;
535
        goto fail_alloc_desc;
536
    }
537
    s = l = virtio_queue_get_avail_size(vdev, idx);
538
    a = virtio_queue_get_avail_addr(vdev, idx);
539
    vq->avail = cpu_physical_memory_map(a, &l, 0);
540
    if (!vq->avail || l != s) {
541
        r = -ENOMEM;
542
        goto fail_alloc_avail;
543
    }
544
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
545
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
546
    vq->used = cpu_physical_memory_map(a, &l, 1);
547
    if (!vq->used || l != s) {
548
        r = -ENOMEM;
549
        goto fail_alloc_used;
550
    }
551

    
552
    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
553
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
554
    vq->ring = cpu_physical_memory_map(a, &l, 1);
555
    if (!vq->ring || l != s) {
556
        r = -ENOMEM;
557
        goto fail_alloc_ring;
558
    }
559

    
560
    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
561
    if (r < 0) {
562
        r = -errno;
563
        goto fail_alloc;
564
    }
565
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
566
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
567
    if (r) {
568
        r = -errno;
569
        goto fail_kick;
570
    }
571

    
572
    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
573
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
574
    if (r) {
575
        r = -errno;
576
        goto fail_call;
577
    }
578

    
579
    return 0;
580

    
581
fail_call:
582
fail_kick:
583
fail_alloc:
584
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
585
                              0, 0);
586
fail_alloc_ring:
587
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
588
                              0, 0);
589
fail_alloc_used:
590
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
591
                              0, 0);
592
fail_alloc_avail:
593
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
594
                              0, 0);
595
fail_alloc_desc:
596
    return r;
597
}
598

    
599
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
600
                                    struct VirtIODevice *vdev,
601
                                    struct vhost_virtqueue *vq,
602
                                    unsigned idx)
603
{
604
    struct vhost_vring_state state = {
605
        .index = idx,
606
    };
607
    int r;
608
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
609
    if (r < 0) {
610
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
611
        fflush(stderr);
612
    }
613
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
614
    assert (r >= 0);
615
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
616
                              0, virtio_queue_get_ring_size(vdev, idx));
617
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
618
                              1, virtio_queue_get_used_size(vdev, idx));
619
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
620
                              0, virtio_queue_get_avail_size(vdev, idx));
621
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
622
                              0, virtio_queue_get_desc_size(vdev, idx));
623
}
624

    
625
int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
626
{
627
    uint64_t features;
628
    int r;
629
    if (devfd >= 0) {
630
        hdev->control = devfd;
631
    } else {
632
        hdev->control = open("/dev/vhost-net", O_RDWR);
633
        if (hdev->control < 0) {
634
            return -errno;
635
        }
636
    }
637
    r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
638
    if (r < 0) {
639
        goto fail;
640
    }
641

    
642
    r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
643
    if (r < 0) {
644
        goto fail;
645
    }
646
    hdev->features = features;
647

    
648
    hdev->client.set_memory = vhost_client_set_memory;
649
    hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap;
650
    hdev->client.migration_log = vhost_client_migration_log;
651
    hdev->client.log_start = NULL;
652
    hdev->client.log_stop = NULL;
653
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
654
    hdev->log = NULL;
655
    hdev->log_size = 0;
656
    hdev->log_enabled = false;
657
    hdev->started = false;
658
    cpu_register_phys_memory_client(&hdev->client);
659
    hdev->force = force;
660
    return 0;
661
fail:
662
    r = -errno;
663
    close(hdev->control);
664
    return r;
665
}
666

    
667
void vhost_dev_cleanup(struct vhost_dev *hdev)
668
{
669
    cpu_unregister_phys_memory_client(&hdev->client);
670
    g_free(hdev->mem);
671
    close(hdev->control);
672
}
673

    
674
bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
675
{
676
    return !vdev->binding->query_guest_notifiers ||
677
        vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
678
        hdev->force;
679
}
680

    
681
/* Stop processing guest IO notifications in qemu.
682
 * Start processing them in vhost in kernel.
683
 */
684
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
685
{
686
    int i, r;
687
    if (!vdev->binding->set_host_notifier) {
688
        fprintf(stderr, "binding does not support host notifiers\n");
689
        r = -ENOSYS;
690
        goto fail;
691
    }
692

    
693
    for (i = 0; i < hdev->nvqs; ++i) {
694
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
695
        if (r < 0) {
696
            fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
697
            goto fail_vq;
698
        }
699
    }
700

    
701
    return 0;
702
fail_vq:
703
    while (--i >= 0) {
704
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
705
        if (r < 0) {
706
            fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
707
            fflush(stderr);
708
        }
709
        assert (r >= 0);
710
    }
711
fail:
712
    return r;
713
}
714

    
715
/* Stop processing guest IO notifications in vhost.
716
 * Start processing them in qemu.
717
 * This might actually run the qemu handlers right away,
718
 * so virtio in qemu must be completely setup when this is called.
719
 */
720
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
721
{
722
    int i, r;
723

    
724
    for (i = 0; i < hdev->nvqs; ++i) {
725
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
726
        if (r < 0) {
727
            fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
728
            fflush(stderr);
729
        }
730
        assert (r >= 0);
731
    }
732
}
733

    
734
/* Host notifiers must be enabled at this point. */
735
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
736
{
737
    int i, r;
738
    if (!vdev->binding->set_guest_notifiers) {
739
        fprintf(stderr, "binding does not support guest notifiers\n");
740
        r = -ENOSYS;
741
        goto fail;
742
    }
743

    
744
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
745
    if (r < 0) {
746
        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
747
        goto fail_notifiers;
748
    }
749

    
750
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
751
    if (r < 0) {
752
        goto fail_features;
753
    }
754
    r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
755
    if (r < 0) {
756
        r = -errno;
757
        goto fail_mem;
758
    }
759
    for (i = 0; i < hdev->nvqs; ++i) {
760
        r = vhost_virtqueue_init(hdev,
761
                                 vdev,
762
                                 hdev->vqs + i,
763
                                 i);
764
        if (r < 0) {
765
            goto fail_vq;
766
        }
767
    }
768

    
769
    if (hdev->log_enabled) {
770
        hdev->log_size = vhost_get_log_size(hdev);
771
        hdev->log = hdev->log_size ?
772
            g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
773
        r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
774
                  (uint64_t)(unsigned long)hdev->log);
775
        if (r < 0) {
776
            r = -errno;
777
            goto fail_log;
778
        }
779
    }
780

    
781
    hdev->started = true;
782

    
783
    return 0;
784
fail_log:
785
fail_vq:
786
    while (--i >= 0) {
787
        vhost_virtqueue_cleanup(hdev,
788
                                vdev,
789
                                hdev->vqs + i,
790
                                i);
791
    }
792
fail_mem:
793
fail_features:
794
    vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
795
fail_notifiers:
796
fail:
797
    return r;
798
}
799

    
800
/* Host notifiers must be enabled at this point. */
801
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
802
{
803
    int i, r;
804

    
805
    for (i = 0; i < hdev->nvqs; ++i) {
806
        vhost_virtqueue_cleanup(hdev,
807
                                vdev,
808
                                hdev->vqs + i,
809
                                i);
810
    }
811
    vhost_client_sync_dirty_bitmap(&hdev->client, 0,
812
                                   (target_phys_addr_t)~0x0ull);
813
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
814
    if (r < 0) {
815
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
816
        fflush(stderr);
817
    }
818
    assert (r >= 0);
819

    
820
    hdev->started = false;
821
    g_free(hdev->log);
822
    hdev->log = NULL;
823
    hdev->log_size = 0;
824
}