Statistics
| Branch: | Revision:

root / hw / vhost.c @ be62a2eb

History | View | Annotate | Download (27.9 kB)

1
/*
2
 * vhost support
3
 *
4
 * Copyright Red Hat, Inc. 2010
5
 *
6
 * Authors:
7
 *  Michael S. Tsirkin <mst@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 */
12

    
13
#include <sys/ioctl.h>
14
#include "vhost.h"
15
#include "hw/hw.h"
16
#include "range.h"
17
#include <linux/vhost.h>
18

    
19
static void vhost_dev_sync_region(struct vhost_dev *dev,
20
                                  MemoryRegionSection *section,
21
                                  uint64_t mfirst, uint64_t mlast,
22
                                  uint64_t rfirst, uint64_t rlast)
23
{
24
    uint64_t start = MAX(mfirst, rfirst);
25
    uint64_t end = MIN(mlast, rlast);
26
    vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
27
    vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
28
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
29

    
30
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
31
    assert(start / VHOST_LOG_CHUNK < dev->log_size);
32
    if (end < start) {
33
        return;
34
    }
35
    for (;from < to; ++from) {
36
        vhost_log_chunk_t log;
37
        int bit;
38
        /* We first check with non-atomic: much cheaper,
39
         * and we expect non-dirty to be the common case. */
40
        if (!*from) {
41
            addr += VHOST_LOG_CHUNK;
42
            continue;
43
        }
44
        /* Data must be read atomically. We don't really
45
         * need the barrier semantics of __sync
46
         * builtins, but it's easier to use them than
47
         * roll our own. */
48
        log = __sync_fetch_and_and(from, 0);
49
        while ((bit = sizeof(log) > sizeof(int) ?
50
                ffsll(log) : ffs(log))) {
51
            ram_addr_t ram_addr;
52
            bit -= 1;
53
            ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE;
54
            memory_region_set_dirty(section->mr, ram_addr);
55
            log &= ~(0x1ull << bit);
56
        }
57
        addr += VHOST_LOG_CHUNK;
58
    }
59
}
60

    
61
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
62
                                   MemoryRegionSection *section,
63
                                   target_phys_addr_t start_addr,
64
                                   target_phys_addr_t end_addr)
65
{
66
    int i;
67

    
68
    if (!dev->log_enabled || !dev->started) {
69
        return 0;
70
    }
71
    for (i = 0; i < dev->mem->nregions; ++i) {
72
        struct vhost_memory_region *reg = dev->mem->regions + i;
73
        vhost_dev_sync_region(dev, section, start_addr, end_addr,
74
                              reg->guest_phys_addr,
75
                              range_get_last(reg->guest_phys_addr,
76
                                             reg->memory_size));
77
    }
78
    for (i = 0; i < dev->nvqs; ++i) {
79
        struct vhost_virtqueue *vq = dev->vqs + i;
80
        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
81
                              range_get_last(vq->used_phys, vq->used_size));
82
    }
83
    return 0;
84
}
85

    
86
static void vhost_log_sync(MemoryListener *listener,
87
                          MemoryRegionSection *section)
88
{
89
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
90
                                         memory_listener);
91
    target_phys_addr_t start_addr = section->offset_within_address_space;
92
    target_phys_addr_t end_addr = start_addr + section->size;
93

    
94
    vhost_sync_dirty_bitmap(dev, section, start_addr, end_addr);
95
}
96

    
97
/* Assign/unassign. Keep an unsorted array of non-overlapping
98
 * memory regions in dev->mem. */
99
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
100
                                      uint64_t start_addr,
101
                                      uint64_t size)
102
{
103
    int from, to, n = dev->mem->nregions;
104
    /* Track overlapping/split regions for sanity checking. */
105
    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
106

    
107
    for (from = 0, to = 0; from < n; ++from, ++to) {
108
        struct vhost_memory_region *reg = dev->mem->regions + to;
109
        uint64_t reglast;
110
        uint64_t memlast;
111
        uint64_t change;
112

    
113
        /* clone old region */
114
        if (to != from) {
115
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
116
        }
117

    
118
        /* No overlap is simple */
119
        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
120
                            start_addr, size)) {
121
            continue;
122
        }
123

    
124
        /* Split only happens if supplied region
125
         * is in the middle of an existing one. Thus it can not
126
         * overlap with any other existing region. */
127
        assert(!split);
128

    
129
        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
130
        memlast = range_get_last(start_addr, size);
131

    
132
        /* Remove whole region */
133
        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
134
            --dev->mem->nregions;
135
            --to;
136
            ++overlap_middle;
137
            continue;
138
        }
139

    
140
        /* Shrink region */
141
        if (memlast >= reglast) {
142
            reg->memory_size = start_addr - reg->guest_phys_addr;
143
            assert(reg->memory_size);
144
            assert(!overlap_end);
145
            ++overlap_end;
146
            continue;
147
        }
148

    
149
        /* Shift region */
150
        if (start_addr <= reg->guest_phys_addr) {
151
            change = memlast + 1 - reg->guest_phys_addr;
152
            reg->memory_size -= change;
153
            reg->guest_phys_addr += change;
154
            reg->userspace_addr += change;
155
            assert(reg->memory_size);
156
            assert(!overlap_start);
157
            ++overlap_start;
158
            continue;
159
        }
160

    
161
        /* This only happens if supplied region
162
         * is in the middle of an existing one. Thus it can not
163
         * overlap with any other existing region. */
164
        assert(!overlap_start);
165
        assert(!overlap_end);
166
        assert(!overlap_middle);
167
        /* Split region: shrink first part, shift second part. */
168
        memcpy(dev->mem->regions + n, reg, sizeof *reg);
169
        reg->memory_size = start_addr - reg->guest_phys_addr;
170
        assert(reg->memory_size);
171
        change = memlast + 1 - reg->guest_phys_addr;
172
        reg = dev->mem->regions + n;
173
        reg->memory_size -= change;
174
        assert(reg->memory_size);
175
        reg->guest_phys_addr += change;
176
        reg->userspace_addr += change;
177
        /* Never add more than 1 region */
178
        assert(dev->mem->nregions == n);
179
        ++dev->mem->nregions;
180
        ++split;
181
    }
182
}
183

    
184
/* Called after unassign, so no regions overlap the given range. */
185
static void vhost_dev_assign_memory(struct vhost_dev *dev,
186
                                    uint64_t start_addr,
187
                                    uint64_t size,
188
                                    uint64_t uaddr)
189
{
190
    int from, to;
191
    struct vhost_memory_region *merged = NULL;
192
    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
193
        struct vhost_memory_region *reg = dev->mem->regions + to;
194
        uint64_t prlast, urlast;
195
        uint64_t pmlast, umlast;
196
        uint64_t s, e, u;
197

    
198
        /* clone old region */
199
        if (to != from) {
200
            memcpy(reg, dev->mem->regions + from, sizeof *reg);
201
        }
202
        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
203
        pmlast = range_get_last(start_addr, size);
204
        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
205
        umlast = range_get_last(uaddr, size);
206

    
207
        /* check for overlapping regions: should never happen. */
208
        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
209
        /* Not an adjacent or overlapping region - do not merge. */
210
        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
211
            (pmlast + 1 != reg->guest_phys_addr ||
212
             umlast + 1 != reg->userspace_addr)) {
213
            continue;
214
        }
215

    
216
        if (merged) {
217
            --to;
218
            assert(to >= 0);
219
        } else {
220
            merged = reg;
221
        }
222
        u = MIN(uaddr, reg->userspace_addr);
223
        s = MIN(start_addr, reg->guest_phys_addr);
224
        e = MAX(pmlast, prlast);
225
        uaddr = merged->userspace_addr = u;
226
        start_addr = merged->guest_phys_addr = s;
227
        size = merged->memory_size = e - s + 1;
228
        assert(merged->memory_size);
229
    }
230

    
231
    if (!merged) {
232
        struct vhost_memory_region *reg = dev->mem->regions + to;
233
        memset(reg, 0, sizeof *reg);
234
        reg->memory_size = size;
235
        assert(reg->memory_size);
236
        reg->guest_phys_addr = start_addr;
237
        reg->userspace_addr = uaddr;
238
        ++to;
239
    }
240
    assert(to <= dev->mem->nregions + 1);
241
    dev->mem->nregions = to;
242
}
243

    
244
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
245
{
246
    uint64_t log_size = 0;
247
    int i;
248
    for (i = 0; i < dev->mem->nregions; ++i) {
249
        struct vhost_memory_region *reg = dev->mem->regions + i;
250
        uint64_t last = range_get_last(reg->guest_phys_addr,
251
                                       reg->memory_size);
252
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
253
    }
254
    for (i = 0; i < dev->nvqs; ++i) {
255
        struct vhost_virtqueue *vq = dev->vqs + i;
256
        uint64_t last = vq->used_phys + vq->used_size - 1;
257
        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
258
    }
259
    return log_size;
260
}
261

    
262
static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
263
{
264
    vhost_log_chunk_t *log;
265
    uint64_t log_base;
266
    int r, i;
267
    if (size) {
268
        log = g_malloc0(size * sizeof *log);
269
    } else {
270
        log = NULL;
271
    }
272
    log_base = (uint64_t)(unsigned long)log;
273
    r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
274
    assert(r >= 0);
275
    for (i = 0; i < dev->n_mem_sections; ++i) {
276
        vhost_sync_dirty_bitmap(dev, &dev->mem_sections[i],
277
                                0, (target_phys_addr_t)~0x0ull);
278
    }
279
    if (dev->log) {
280
        g_free(dev->log);
281
    }
282
    dev->log = log;
283
    dev->log_size = size;
284
}
285

    
286
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
287
                                      uint64_t start_addr,
288
                                      uint64_t size)
289
{
290
    int i;
291
    for (i = 0; i < dev->nvqs; ++i) {
292
        struct vhost_virtqueue *vq = dev->vqs + i;
293
        target_phys_addr_t l;
294
        void *p;
295

    
296
        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
297
            continue;
298
        }
299
        l = vq->ring_size;
300
        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
301
        if (!p || l != vq->ring_size) {
302
            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
303
            return -ENOMEM;
304
        }
305
        if (p != vq->ring) {
306
            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
307
            return -EBUSY;
308
        }
309
        cpu_physical_memory_unmap(p, l, 0, 0);
310
    }
311
    return 0;
312
}
313

    
314
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
315
                                                      uint64_t start_addr,
316
                                                      uint64_t size)
317
{
318
    int i, n = dev->mem->nregions;
319
    for (i = 0; i < n; ++i) {
320
        struct vhost_memory_region *reg = dev->mem->regions + i;
321
        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
322
                           start_addr, size)) {
323
            return reg;
324
        }
325
    }
326
    return NULL;
327
}
328

    
329
static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
330
                                 uint64_t start_addr,
331
                                 uint64_t size,
332
                                 uint64_t uaddr)
333
{
334
    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
335
    uint64_t reglast;
336
    uint64_t memlast;
337

    
338
    if (!reg) {
339
        return true;
340
    }
341

    
342
    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
343
    memlast = range_get_last(start_addr, size);
344

    
345
    /* Need to extend region? */
346
    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
347
        return true;
348
    }
349
    /* userspace_addr changed? */
350
    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
351
}
352

    
353
static void vhost_set_memory(MemoryListener *listener,
354
                             MemoryRegionSection *section,
355
                             bool add)
356
{
357
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
358
                                         memory_listener);
359
    target_phys_addr_t start_addr = section->offset_within_address_space;
360
    ram_addr_t size = section->size;
361
    bool log_dirty = memory_region_is_logging(section->mr);
362
    int s = offsetof(struct vhost_memory, regions) +
363
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
364
    uint64_t log_size;
365
    int r;
366
    void *ram;
367

    
368
    if (!memory_region_is_ram(section->mr)) {
369
        return;
370
    }
371

    
372
    dev->mem = g_realloc(dev->mem, s);
373

    
374
    if (log_dirty) {
375
        add = false;
376
    }
377

    
378
    assert(size);
379

    
380
    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
381
    ram = memory_region_get_ram_ptr(section->mr);
382
    if (add) {
383
        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
384
            /* Region exists with same address. Nothing to do. */
385
            return;
386
        }
387
    } else {
388
        if (!vhost_dev_find_reg(dev, start_addr, size)) {
389
            /* Removing region that we don't access. Nothing to do. */
390
            return;
391
        }
392
    }
393

    
394
    vhost_dev_unassign_memory(dev, start_addr, size);
395
    if (add) {
396
        /* Add given mapping, merging adjacent regions if any */
397
        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
398
    } else {
399
        /* Remove old mapping for this memory, if any. */
400
        vhost_dev_unassign_memory(dev, start_addr, size);
401
    }
402

    
403
    if (!dev->started) {
404
        return;
405
    }
406

    
407
    if (dev->started) {
408
        r = vhost_verify_ring_mappings(dev, start_addr, size);
409
        assert(r >= 0);
410
    }
411

    
412
    if (!dev->log_enabled) {
413
        r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
414
        assert(r >= 0);
415
        return;
416
    }
417
    log_size = vhost_get_log_size(dev);
418
    /* We allocate an extra 4K bytes to log,
419
     * to reduce the * number of reallocations. */
420
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
421
    /* To log more, must increase log size before table update. */
422
    if (dev->log_size < log_size) {
423
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
424
    }
425
    r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
426
    assert(r >= 0);
427
    /* To log less, can only decrease log size after table update. */
428
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
429
        vhost_dev_log_resize(dev, log_size);
430
    }
431
}
432

    
433
static void vhost_region_add(MemoryListener *listener,
434
                             MemoryRegionSection *section)
435
{
436
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
437
                                         memory_listener);
438

    
439
    ++dev->n_mem_sections;
440
    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
441
                                dev->n_mem_sections);
442
    dev->mem_sections[dev->n_mem_sections - 1] = *section;
443
    vhost_set_memory(listener, section, true);
444
}
445

    
446
static void vhost_region_del(MemoryListener *listener,
447
                             MemoryRegionSection *section)
448
{
449
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
450
                                         memory_listener);
451
    int i;
452

    
453
    vhost_set_memory(listener, section, false);
454
    for (i = 0; i < dev->n_mem_sections; ++i) {
455
        if (dev->mem_sections[i].offset_within_address_space
456
            == section->offset_within_address_space) {
457
            --dev->n_mem_sections;
458
            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
459
                    dev->n_mem_sections - i);
460
            break;
461
        }
462
    }
463
}
464

    
465
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
466
                                    struct vhost_virtqueue *vq,
467
                                    unsigned idx, bool enable_log)
468
{
469
    struct vhost_vring_addr addr = {
470
        .index = idx,
471
        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
472
        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
473
        .used_user_addr = (uint64_t)(unsigned long)vq->used,
474
        .log_guest_addr = vq->used_phys,
475
        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
476
    };
477
    int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
478
    if (r < 0) {
479
        return -errno;
480
    }
481
    return 0;
482
}
483

    
484
static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
485
{
486
    uint64_t features = dev->acked_features;
487
    int r;
488
    if (enable_log) {
489
        features |= 0x1 << VHOST_F_LOG_ALL;
490
    }
491
    r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
492
    return r < 0 ? -errno : 0;
493
}
494

    
495
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
496
{
497
    int r, t, i;
498
    r = vhost_dev_set_features(dev, enable_log);
499
    if (r < 0) {
500
        goto err_features;
501
    }
502
    for (i = 0; i < dev->nvqs; ++i) {
503
        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
504
                                     enable_log);
505
        if (r < 0) {
506
            goto err_vq;
507
        }
508
    }
509
    return 0;
510
err_vq:
511
    for (; i >= 0; --i) {
512
        t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
513
                                     dev->log_enabled);
514
        assert(t >= 0);
515
    }
516
    t = vhost_dev_set_features(dev, dev->log_enabled);
517
    assert(t >= 0);
518
err_features:
519
    return r;
520
}
521

    
522
static int vhost_migration_log(MemoryListener *listener, int enable)
523
{
524
    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
525
                                         memory_listener);
526
    int r;
527
    if (!!enable == dev->log_enabled) {
528
        return 0;
529
    }
530
    if (!dev->started) {
531
        dev->log_enabled = enable;
532
        return 0;
533
    }
534
    if (!enable) {
535
        r = vhost_dev_set_log(dev, false);
536
        if (r < 0) {
537
            return r;
538
        }
539
        if (dev->log) {
540
            g_free(dev->log);
541
        }
542
        dev->log = NULL;
543
        dev->log_size = 0;
544
    } else {
545
        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
546
        r = vhost_dev_set_log(dev, true);
547
        if (r < 0) {
548
            return r;
549
        }
550
    }
551
    dev->log_enabled = enable;
552
    return 0;
553
}
554

    
555
static void vhost_log_global_start(MemoryListener *listener)
556
{
557
    int r;
558

    
559
    r = vhost_migration_log(listener, true);
560
    if (r < 0) {
561
        abort();
562
    }
563
}
564

    
565
static void vhost_log_global_stop(MemoryListener *listener)
566
{
567
    int r;
568

    
569
    r = vhost_migration_log(listener, false);
570
    if (r < 0) {
571
        abort();
572
    }
573
}
574

    
575
static void vhost_log_start(MemoryListener *listener,
576
                            MemoryRegionSection *section)
577
{
578
    /* FIXME: implement */
579
}
580

    
581
static void vhost_log_stop(MemoryListener *listener,
582
                           MemoryRegionSection *section)
583
{
584
    /* FIXME: implement */
585
}
586

    
587
static int vhost_virtqueue_init(struct vhost_dev *dev,
588
                                struct VirtIODevice *vdev,
589
                                struct vhost_virtqueue *vq,
590
                                unsigned idx)
591
{
592
    target_phys_addr_t s, l, a;
593
    int r;
594
    struct vhost_vring_file file = {
595
        .index = idx,
596
    };
597
    struct vhost_vring_state state = {
598
        .index = idx,
599
    };
600
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
601

    
602
    vq->num = state.num = virtio_queue_get_num(vdev, idx);
603
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
604
    if (r) {
605
        return -errno;
606
    }
607

    
608
    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
609
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
610
    if (r) {
611
        return -errno;
612
    }
613

    
614
    s = l = virtio_queue_get_desc_size(vdev, idx);
615
    a = virtio_queue_get_desc_addr(vdev, idx);
616
    vq->desc = cpu_physical_memory_map(a, &l, 0);
617
    if (!vq->desc || l != s) {
618
        r = -ENOMEM;
619
        goto fail_alloc_desc;
620
    }
621
    s = l = virtio_queue_get_avail_size(vdev, idx);
622
    a = virtio_queue_get_avail_addr(vdev, idx);
623
    vq->avail = cpu_physical_memory_map(a, &l, 0);
624
    if (!vq->avail || l != s) {
625
        r = -ENOMEM;
626
        goto fail_alloc_avail;
627
    }
628
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
629
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
630
    vq->used = cpu_physical_memory_map(a, &l, 1);
631
    if (!vq->used || l != s) {
632
        r = -ENOMEM;
633
        goto fail_alloc_used;
634
    }
635

    
636
    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
637
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
638
    vq->ring = cpu_physical_memory_map(a, &l, 1);
639
    if (!vq->ring || l != s) {
640
        r = -ENOMEM;
641
        goto fail_alloc_ring;
642
    }
643

    
644
    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
645
    if (r < 0) {
646
        r = -errno;
647
        goto fail_alloc;
648
    }
649
    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
650
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
651
    if (r) {
652
        r = -errno;
653
        goto fail_kick;
654
    }
655

    
656
    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
657
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
658
    if (r) {
659
        r = -errno;
660
        goto fail_call;
661
    }
662

    
663
    return 0;
664

    
665
fail_call:
666
fail_kick:
667
fail_alloc:
668
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
669
                              0, 0);
670
fail_alloc_ring:
671
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
672
                              0, 0);
673
fail_alloc_used:
674
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
675
                              0, 0);
676
fail_alloc_avail:
677
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
678
                              0, 0);
679
fail_alloc_desc:
680
    return r;
681
}
682

    
683
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
684
                                    struct VirtIODevice *vdev,
685
                                    struct vhost_virtqueue *vq,
686
                                    unsigned idx)
687
{
688
    struct vhost_vring_state state = {
689
        .index = idx,
690
    };
691
    int r;
692
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
693
    if (r < 0) {
694
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
695
        fflush(stderr);
696
    }
697
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
698
    assert (r >= 0);
699
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
700
                              0, virtio_queue_get_ring_size(vdev, idx));
701
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
702
                              1, virtio_queue_get_used_size(vdev, idx));
703
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
704
                              0, virtio_queue_get_avail_size(vdev, idx));
705
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
706
                              0, virtio_queue_get_desc_size(vdev, idx));
707
}
708

    
709
int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
710
{
711
    uint64_t features;
712
    int r;
713
    if (devfd >= 0) {
714
        hdev->control = devfd;
715
    } else {
716
        hdev->control = open("/dev/vhost-net", O_RDWR);
717
        if (hdev->control < 0) {
718
            return -errno;
719
        }
720
    }
721
    r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
722
    if (r < 0) {
723
        goto fail;
724
    }
725

    
726
    r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
727
    if (r < 0) {
728
        goto fail;
729
    }
730
    hdev->features = features;
731

    
732
    hdev->memory_listener = (MemoryListener) {
733
        .region_add = vhost_region_add,
734
        .region_del = vhost_region_del,
735
        .log_start = vhost_log_start,
736
        .log_stop = vhost_log_stop,
737
        .log_sync = vhost_log_sync,
738
        .log_global_start = vhost_log_global_start,
739
        .log_global_stop = vhost_log_global_stop,
740
    };
741
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
742
    hdev->n_mem_sections = 0;
743
    hdev->mem_sections = NULL;
744
    hdev->log = NULL;
745
    hdev->log_size = 0;
746
    hdev->log_enabled = false;
747
    hdev->started = false;
748
    memory_listener_register(&hdev->memory_listener);
749
    hdev->force = force;
750
    return 0;
751
fail:
752
    r = -errno;
753
    close(hdev->control);
754
    return r;
755
}
756

    
757
void vhost_dev_cleanup(struct vhost_dev *hdev)
758
{
759
    memory_listener_unregister(&hdev->memory_listener);
760
    g_free(hdev->mem);
761
    g_free(hdev->mem_sections);
762
    close(hdev->control);
763
}
764

    
765
bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
766
{
767
    return !vdev->binding->query_guest_notifiers ||
768
        vdev->binding->query_guest_notifiers(vdev->binding_opaque) ||
769
        hdev->force;
770
}
771

    
772
/* Stop processing guest IO notifications in qemu.
773
 * Start processing them in vhost in kernel.
774
 */
775
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
776
{
777
    int i, r;
778
    if (!vdev->binding->set_host_notifier) {
779
        fprintf(stderr, "binding does not support host notifiers\n");
780
        r = -ENOSYS;
781
        goto fail;
782
    }
783

    
784
    for (i = 0; i < hdev->nvqs; ++i) {
785
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
786
        if (r < 0) {
787
            fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
788
            goto fail_vq;
789
        }
790
    }
791

    
792
    return 0;
793
fail_vq:
794
    while (--i >= 0) {
795
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
796
        if (r < 0) {
797
            fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
798
            fflush(stderr);
799
        }
800
        assert (r >= 0);
801
    }
802
fail:
803
    return r;
804
}
805

    
806
/* Stop processing guest IO notifications in vhost.
807
 * Start processing them in qemu.
808
 * This might actually run the qemu handlers right away,
809
 * so virtio in qemu must be completely setup when this is called.
810
 */
811
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
812
{
813
    int i, r;
814

    
815
    for (i = 0; i < hdev->nvqs; ++i) {
816
        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
817
        if (r < 0) {
818
            fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
819
            fflush(stderr);
820
        }
821
        assert (r >= 0);
822
    }
823
}
824

    
825
/* Host notifiers must be enabled at this point. */
826
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
827
{
828
    int i, r;
829
    if (!vdev->binding->set_guest_notifiers) {
830
        fprintf(stderr, "binding does not support guest notifiers\n");
831
        r = -ENOSYS;
832
        goto fail;
833
    }
834

    
835
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
836
    if (r < 0) {
837
        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
838
        goto fail_notifiers;
839
    }
840

    
841
    r = vhost_dev_set_features(hdev, hdev->log_enabled);
842
    if (r < 0) {
843
        goto fail_features;
844
    }
845
    r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
846
    if (r < 0) {
847
        r = -errno;
848
        goto fail_mem;
849
    }
850
    for (i = 0; i < hdev->nvqs; ++i) {
851
        r = vhost_virtqueue_init(hdev,
852
                                 vdev,
853
                                 hdev->vqs + i,
854
                                 i);
855
        if (r < 0) {
856
            goto fail_vq;
857
        }
858
    }
859

    
860
    if (hdev->log_enabled) {
861
        hdev->log_size = vhost_get_log_size(hdev);
862
        hdev->log = hdev->log_size ?
863
            g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
864
        r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
865
                  (uint64_t)(unsigned long)hdev->log);
866
        if (r < 0) {
867
            r = -errno;
868
            goto fail_log;
869
        }
870
    }
871

    
872
    hdev->started = true;
873

    
874
    return 0;
875
fail_log:
876
fail_vq:
877
    while (--i >= 0) {
878
        vhost_virtqueue_cleanup(hdev,
879
                                vdev,
880
                                hdev->vqs + i,
881
                                i);
882
    }
883
fail_mem:
884
fail_features:
885
    vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
886
fail_notifiers:
887
fail:
888
    return r;
889
}
890

    
891
/* Host notifiers must be enabled at this point. */
892
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
893
{
894
    int i, r;
895

    
896
    for (i = 0; i < hdev->nvqs; ++i) {
897
        vhost_virtqueue_cleanup(hdev,
898
                                vdev,
899
                                hdev->vqs + i,
900
                                i);
901
    }
902
    for (i = 0; i < hdev->n_mem_sections; ++i) {
903
        vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
904
                                0, (target_phys_addr_t)~0x0ull);
905
    }
906
    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
907
    if (r < 0) {
908
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
909
        fflush(stderr);
910
    }
911
    assert (r >= 0);
912

    
913
    hdev->started = false;
914
    g_free(hdev->log);
915
    hdev->log = NULL;
916
    hdev->log_size = 0;
917
}