root / hw / vhost.c @ ae2ebad7
History | View | Annotate | Download (24.5 kB)
1 |
/*
|
---|---|
2 |
* vhost support
|
3 |
*
|
4 |
* Copyright Red Hat, Inc. 2010
|
5 |
*
|
6 |
* Authors:
|
7 |
* Michael S. Tsirkin <mst@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*/
|
12 |
|
13 |
#include <sys/ioctl.h> |
14 |
#include "vhost.h" |
15 |
#include "hw/hw.h" |
16 |
#include "range.h" |
17 |
#include <linux/vhost.h> |
18 |
|
19 |
static void vhost_dev_sync_region(struct vhost_dev *dev, |
20 |
uint64_t mfirst, uint64_t mlast, |
21 |
uint64_t rfirst, uint64_t rlast) |
22 |
{ |
23 |
uint64_t start = MAX(mfirst, rfirst); |
24 |
uint64_t end = MIN(mlast, rlast); |
25 |
vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK; |
26 |
vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
|
27 |
uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; |
28 |
|
29 |
assert(end / VHOST_LOG_CHUNK < dev->log_size); |
30 |
assert(start / VHOST_LOG_CHUNK < dev->log_size); |
31 |
if (end < start) {
|
32 |
return;
|
33 |
} |
34 |
for (;from < to; ++from) {
|
35 |
vhost_log_chunk_t log; |
36 |
int bit;
|
37 |
/* We first check with non-atomic: much cheaper,
|
38 |
* and we expect non-dirty to be the common case. */
|
39 |
if (!*from) {
|
40 |
addr += VHOST_LOG_CHUNK; |
41 |
continue;
|
42 |
} |
43 |
/* Data must be read atomically. We don't really
|
44 |
* need the barrier semantics of __sync
|
45 |
* builtins, but it's easier to use them than
|
46 |
* roll our own. */
|
47 |
log = __sync_fetch_and_and(from, 0);
|
48 |
while ((bit = sizeof(log) > sizeof(int) ? |
49 |
ffsll(log) : ffs(log))) { |
50 |
ram_addr_t ram_addr; |
51 |
bit -= 1;
|
52 |
ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE); |
53 |
cpu_physical_memory_set_dirty(ram_addr); |
54 |
log &= ~(0x1ull << bit);
|
55 |
} |
56 |
addr += VHOST_LOG_CHUNK; |
57 |
} |
58 |
} |
59 |
|
60 |
static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client, |
61 |
target_phys_addr_t start_addr, |
62 |
target_phys_addr_t end_addr) |
63 |
{ |
64 |
struct vhost_dev *dev = container_of(client, struct vhost_dev, client); |
65 |
int i;
|
66 |
if (!dev->log_enabled || !dev->started) {
|
67 |
return 0; |
68 |
} |
69 |
for (i = 0; i < dev->mem->nregions; ++i) { |
70 |
struct vhost_memory_region *reg = dev->mem->regions + i;
|
71 |
vhost_dev_sync_region(dev, start_addr, end_addr, |
72 |
reg->guest_phys_addr, |
73 |
range_get_last(reg->guest_phys_addr, |
74 |
reg->memory_size)); |
75 |
} |
76 |
for (i = 0; i < dev->nvqs; ++i) { |
77 |
struct vhost_virtqueue *vq = dev->vqs + i;
|
78 |
vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys, |
79 |
range_get_last(vq->used_phys, vq->used_size)); |
80 |
} |
81 |
return 0; |
82 |
} |
83 |
|
84 |
/* Assign/unassign. Keep an unsorted array of non-overlapping
|
85 |
* memory regions in dev->mem. */
|
86 |
static void vhost_dev_unassign_memory(struct vhost_dev *dev, |
87 |
uint64_t start_addr, |
88 |
uint64_t size) |
89 |
{ |
90 |
int from, to, n = dev->mem->nregions;
|
91 |
/* Track overlapping/split regions for sanity checking. */
|
92 |
int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; |
93 |
|
94 |
for (from = 0, to = 0; from < n; ++from, ++to) { |
95 |
struct vhost_memory_region *reg = dev->mem->regions + to;
|
96 |
uint64_t reglast; |
97 |
uint64_t memlast; |
98 |
uint64_t change; |
99 |
|
100 |
/* clone old region */
|
101 |
if (to != from) {
|
102 |
memcpy(reg, dev->mem->regions + from, sizeof *reg);
|
103 |
} |
104 |
|
105 |
/* No overlap is simple */
|
106 |
if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
|
107 |
start_addr, size)) { |
108 |
continue;
|
109 |
} |
110 |
|
111 |
/* Split only happens if supplied region
|
112 |
* is in the middle of an existing one. Thus it can not
|
113 |
* overlap with any other existing region. */
|
114 |
assert(!split); |
115 |
|
116 |
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); |
117 |
memlast = range_get_last(start_addr, size); |
118 |
|
119 |
/* Remove whole region */
|
120 |
if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
|
121 |
--dev->mem->nregions; |
122 |
--to; |
123 |
assert(to >= 0);
|
124 |
++overlap_middle; |
125 |
continue;
|
126 |
} |
127 |
|
128 |
/* Shrink region */
|
129 |
if (memlast >= reglast) {
|
130 |
reg->memory_size = start_addr - reg->guest_phys_addr; |
131 |
assert(reg->memory_size); |
132 |
assert(!overlap_end); |
133 |
++overlap_end; |
134 |
continue;
|
135 |
} |
136 |
|
137 |
/* Shift region */
|
138 |
if (start_addr <= reg->guest_phys_addr) {
|
139 |
change = memlast + 1 - reg->guest_phys_addr;
|
140 |
reg->memory_size -= change; |
141 |
reg->guest_phys_addr += change; |
142 |
reg->userspace_addr += change; |
143 |
assert(reg->memory_size); |
144 |
assert(!overlap_start); |
145 |
++overlap_start; |
146 |
continue;
|
147 |
} |
148 |
|
149 |
/* This only happens if supplied region
|
150 |
* is in the middle of an existing one. Thus it can not
|
151 |
* overlap with any other existing region. */
|
152 |
assert(!overlap_start); |
153 |
assert(!overlap_end); |
154 |
assert(!overlap_middle); |
155 |
/* Split region: shrink first part, shift second part. */
|
156 |
memcpy(dev->mem->regions + n, reg, sizeof *reg);
|
157 |
reg->memory_size = start_addr - reg->guest_phys_addr; |
158 |
assert(reg->memory_size); |
159 |
change = memlast + 1 - reg->guest_phys_addr;
|
160 |
reg = dev->mem->regions + n; |
161 |
reg->memory_size -= change; |
162 |
assert(reg->memory_size); |
163 |
reg->guest_phys_addr += change; |
164 |
reg->userspace_addr += change; |
165 |
/* Never add more than 1 region */
|
166 |
assert(dev->mem->nregions == n); |
167 |
++dev->mem->nregions; |
168 |
++split; |
169 |
} |
170 |
} |
171 |
|
172 |
/* Called after unassign, so no regions overlap the given range. */
|
173 |
static void vhost_dev_assign_memory(struct vhost_dev *dev, |
174 |
uint64_t start_addr, |
175 |
uint64_t size, |
176 |
uint64_t uaddr) |
177 |
{ |
178 |
int from, to;
|
179 |
struct vhost_memory_region *merged = NULL; |
180 |
for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { |
181 |
struct vhost_memory_region *reg = dev->mem->regions + to;
|
182 |
uint64_t prlast, urlast; |
183 |
uint64_t pmlast, umlast; |
184 |
uint64_t s, e, u; |
185 |
|
186 |
/* clone old region */
|
187 |
if (to != from) {
|
188 |
memcpy(reg, dev->mem->regions + from, sizeof *reg);
|
189 |
} |
190 |
prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); |
191 |
pmlast = range_get_last(start_addr, size); |
192 |
urlast = range_get_last(reg->userspace_addr, reg->memory_size); |
193 |
umlast = range_get_last(uaddr, size); |
194 |
|
195 |
/* check for overlapping regions: should never happen. */
|
196 |
assert(prlast < start_addr || pmlast < reg->guest_phys_addr); |
197 |
/* Not an adjacent or overlapping region - do not merge. */
|
198 |
if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && |
199 |
(pmlast + 1 != reg->guest_phys_addr ||
|
200 |
umlast + 1 != reg->userspace_addr)) {
|
201 |
continue;
|
202 |
} |
203 |
|
204 |
if (merged) {
|
205 |
--to; |
206 |
assert(to >= 0);
|
207 |
} else {
|
208 |
merged = reg; |
209 |
} |
210 |
u = MIN(uaddr, reg->userspace_addr); |
211 |
s = MIN(start_addr, reg->guest_phys_addr); |
212 |
e = MAX(pmlast, prlast); |
213 |
uaddr = merged->userspace_addr = u; |
214 |
start_addr = merged->guest_phys_addr = s; |
215 |
size = merged->memory_size = e - s + 1;
|
216 |
assert(merged->memory_size); |
217 |
} |
218 |
|
219 |
if (!merged) {
|
220 |
struct vhost_memory_region *reg = dev->mem->regions + to;
|
221 |
memset(reg, 0, sizeof *reg); |
222 |
reg->memory_size = size; |
223 |
assert(reg->memory_size); |
224 |
reg->guest_phys_addr = start_addr; |
225 |
reg->userspace_addr = uaddr; |
226 |
++to; |
227 |
} |
228 |
assert(to <= dev->mem->nregions + 1);
|
229 |
dev->mem->nregions = to; |
230 |
} |
231 |
|
232 |
static uint64_t vhost_get_log_size(struct vhost_dev *dev) |
233 |
{ |
234 |
uint64_t log_size = 0;
|
235 |
int i;
|
236 |
for (i = 0; i < dev->mem->nregions; ++i) { |
237 |
struct vhost_memory_region *reg = dev->mem->regions + i;
|
238 |
uint64_t last = range_get_last(reg->guest_phys_addr, |
239 |
reg->memory_size); |
240 |
log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
|
241 |
} |
242 |
for (i = 0; i < dev->nvqs; ++i) { |
243 |
struct vhost_virtqueue *vq = dev->vqs + i;
|
244 |
uint64_t last = vq->used_phys + vq->used_size - 1;
|
245 |
log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
|
246 |
} |
247 |
return log_size;
|
248 |
} |
249 |
|
250 |
static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) |
251 |
{ |
252 |
vhost_log_chunk_t *log; |
253 |
uint64_t log_base; |
254 |
int r;
|
255 |
if (size) {
|
256 |
log = qemu_mallocz(size * sizeof *log);
|
257 |
} else {
|
258 |
log = NULL;
|
259 |
} |
260 |
log_base = (uint64_t)(unsigned long)log; |
261 |
r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base); |
262 |
assert(r >= 0);
|
263 |
vhost_client_sync_dirty_bitmap(&dev->client, 0,
|
264 |
(target_phys_addr_t)~0x0ull);
|
265 |
if (dev->log) {
|
266 |
qemu_free(dev->log); |
267 |
} |
268 |
dev->log = log; |
269 |
dev->log_size = size; |
270 |
} |
271 |
|
272 |
static int vhost_verify_ring_mappings(struct vhost_dev *dev, |
273 |
uint64_t start_addr, |
274 |
uint64_t size) |
275 |
{ |
276 |
int i;
|
277 |
for (i = 0; i < dev->nvqs; ++i) { |
278 |
struct vhost_virtqueue *vq = dev->vqs + i;
|
279 |
target_phys_addr_t l; |
280 |
void *p;
|
281 |
|
282 |
if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
|
283 |
continue;
|
284 |
} |
285 |
l = vq->ring_size; |
286 |
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
|
287 |
if (!p || l != vq->ring_size) {
|
288 |
fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
|
289 |
return -ENOMEM;
|
290 |
} |
291 |
if (p != vq->ring) {
|
292 |
fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
|
293 |
return -EBUSY;
|
294 |
} |
295 |
cpu_physical_memory_unmap(p, l, 0, 0); |
296 |
} |
297 |
return 0; |
298 |
} |
299 |
|
300 |
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, |
301 |
uint64_t start_addr, |
302 |
uint64_t size) |
303 |
{ |
304 |
int i, n = dev->mem->nregions;
|
305 |
for (i = 0; i < n; ++i) { |
306 |
struct vhost_memory_region *reg = dev->mem->regions + i;
|
307 |
if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
|
308 |
start_addr, size)) { |
309 |
return reg;
|
310 |
} |
311 |
} |
312 |
return NULL; |
313 |
} |
314 |
|
315 |
static bool vhost_dev_cmp_memory(struct vhost_dev *dev, |
316 |
uint64_t start_addr, |
317 |
uint64_t size, |
318 |
uint64_t uaddr) |
319 |
{ |
320 |
struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
|
321 |
uint64_t reglast; |
322 |
uint64_t memlast; |
323 |
|
324 |
if (!reg) {
|
325 |
return true; |
326 |
} |
327 |
|
328 |
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); |
329 |
memlast = range_get_last(start_addr, size); |
330 |
|
331 |
/* Need to extend region? */
|
332 |
if (start_addr < reg->guest_phys_addr || memlast > reglast) {
|
333 |
return true; |
334 |
} |
335 |
/* userspace_addr changed? */
|
336 |
return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
|
337 |
} |
338 |
|
339 |
static void vhost_client_set_memory(CPUPhysMemoryClient *client, |
340 |
target_phys_addr_t start_addr, |
341 |
ram_addr_t size, |
342 |
ram_addr_t phys_offset, |
343 |
bool log_dirty)
|
344 |
{ |
345 |
struct vhost_dev *dev = container_of(client, struct vhost_dev, client); |
346 |
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; |
347 |
int s = offsetof(struct vhost_memory, regions) + |
348 |
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; |
349 |
uint64_t log_size; |
350 |
int r;
|
351 |
|
352 |
dev->mem = qemu_realloc(dev->mem, s); |
353 |
|
354 |
if (log_dirty) {
|
355 |
flags = IO_MEM_UNASSIGNED; |
356 |
} |
357 |
|
358 |
assert(size); |
359 |
|
360 |
/* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
|
361 |
if (flags == IO_MEM_RAM) {
|
362 |
if (!vhost_dev_cmp_memory(dev, start_addr, size,
|
363 |
(uintptr_t)qemu_get_ram_ptr(phys_offset))) { |
364 |
/* Region exists with same address. Nothing to do. */
|
365 |
return;
|
366 |
} |
367 |
} else {
|
368 |
if (!vhost_dev_find_reg(dev, start_addr, size)) {
|
369 |
/* Removing region that we don't access. Nothing to do. */
|
370 |
return;
|
371 |
} |
372 |
} |
373 |
|
374 |
vhost_dev_unassign_memory(dev, start_addr, size); |
375 |
if (flags == IO_MEM_RAM) {
|
376 |
/* Add given mapping, merging adjacent regions if any */
|
377 |
vhost_dev_assign_memory(dev, start_addr, size, |
378 |
(uintptr_t)qemu_get_ram_ptr(phys_offset)); |
379 |
} else {
|
380 |
/* Remove old mapping for this memory, if any. */
|
381 |
vhost_dev_unassign_memory(dev, start_addr, size); |
382 |
} |
383 |
|
384 |
if (!dev->started) {
|
385 |
return;
|
386 |
} |
387 |
|
388 |
if (dev->started) {
|
389 |
r = vhost_verify_ring_mappings(dev, start_addr, size); |
390 |
assert(r >= 0);
|
391 |
} |
392 |
|
393 |
if (!dev->log_enabled) {
|
394 |
r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); |
395 |
assert(r >= 0);
|
396 |
return;
|
397 |
} |
398 |
log_size = vhost_get_log_size(dev); |
399 |
/* We allocate an extra 4K bytes to log,
|
400 |
* to reduce the * number of reallocations. */
|
401 |
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) |
402 |
/* To log more, must increase log size before table update. */
|
403 |
if (dev->log_size < log_size) {
|
404 |
vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); |
405 |
} |
406 |
r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); |
407 |
assert(r >= 0);
|
408 |
/* To log less, can only decrease log size after table update. */
|
409 |
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
|
410 |
vhost_dev_log_resize(dev, log_size); |
411 |
} |
412 |
} |
413 |
|
414 |
static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
415 |
struct vhost_virtqueue *vq,
|
416 |
unsigned idx, bool enable_log) |
417 |
{ |
418 |
struct vhost_vring_addr addr = {
|
419 |
.index = idx, |
420 |
.desc_user_addr = (uint64_t)(unsigned long)vq->desc, |
421 |
.avail_user_addr = (uint64_t)(unsigned long)vq->avail, |
422 |
.used_user_addr = (uint64_t)(unsigned long)vq->used, |
423 |
.log_guest_addr = vq->used_phys, |
424 |
.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, |
425 |
}; |
426 |
int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
|
427 |
if (r < 0) { |
428 |
return -errno;
|
429 |
} |
430 |
return 0; |
431 |
} |
432 |
|
433 |
static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) |
434 |
{ |
435 |
uint64_t features = dev->acked_features; |
436 |
int r;
|
437 |
if (enable_log) {
|
438 |
features |= 0x1 << VHOST_F_LOG_ALL;
|
439 |
} |
440 |
r = ioctl(dev->control, VHOST_SET_FEATURES, &features); |
441 |
return r < 0 ? -errno : 0; |
442 |
} |
443 |
|
444 |
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) |
445 |
{ |
446 |
int r, t, i;
|
447 |
r = vhost_dev_set_features(dev, enable_log); |
448 |
if (r < 0) { |
449 |
goto err_features;
|
450 |
} |
451 |
for (i = 0; i < dev->nvqs; ++i) { |
452 |
r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, |
453 |
enable_log); |
454 |
if (r < 0) { |
455 |
goto err_vq;
|
456 |
} |
457 |
} |
458 |
return 0; |
459 |
err_vq:
|
460 |
for (; i >= 0; --i) { |
461 |
t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, |
462 |
dev->log_enabled); |
463 |
assert(t >= 0);
|
464 |
} |
465 |
t = vhost_dev_set_features(dev, dev->log_enabled); |
466 |
assert(t >= 0);
|
467 |
err_features:
|
468 |
return r;
|
469 |
} |
470 |
|
471 |
static int vhost_client_migration_log(CPUPhysMemoryClient *client, |
472 |
int enable)
|
473 |
{ |
474 |
struct vhost_dev *dev = container_of(client, struct vhost_dev, client); |
475 |
int r;
|
476 |
if (!!enable == dev->log_enabled) {
|
477 |
return 0; |
478 |
} |
479 |
if (!dev->started) {
|
480 |
dev->log_enabled = enable; |
481 |
return 0; |
482 |
} |
483 |
if (!enable) {
|
484 |
r = vhost_dev_set_log(dev, false);
|
485 |
if (r < 0) { |
486 |
return r;
|
487 |
} |
488 |
if (dev->log) {
|
489 |
qemu_free(dev->log); |
490 |
} |
491 |
dev->log = NULL;
|
492 |
dev->log_size = 0;
|
493 |
} else {
|
494 |
vhost_dev_log_resize(dev, vhost_get_log_size(dev)); |
495 |
r = vhost_dev_set_log(dev, true);
|
496 |
if (r < 0) { |
497 |
return r;
|
498 |
} |
499 |
} |
500 |
dev->log_enabled = enable; |
501 |
return 0; |
502 |
} |
503 |
|
504 |
static int vhost_virtqueue_init(struct vhost_dev *dev, |
505 |
struct VirtIODevice *vdev,
|
506 |
struct vhost_virtqueue *vq,
|
507 |
unsigned idx)
|
508 |
{ |
509 |
target_phys_addr_t s, l, a; |
510 |
int r;
|
511 |
struct vhost_vring_file file = {
|
512 |
.index = idx, |
513 |
}; |
514 |
struct vhost_vring_state state = {
|
515 |
.index = idx, |
516 |
}; |
517 |
struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
|
518 |
|
519 |
if (!vdev->binding->set_host_notifier) {
|
520 |
fprintf(stderr, "binding does not support host notifiers\n");
|
521 |
return -ENOSYS;
|
522 |
} |
523 |
|
524 |
vq->num = state.num = virtio_queue_get_num(vdev, idx); |
525 |
r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); |
526 |
if (r) {
|
527 |
return -errno;
|
528 |
} |
529 |
|
530 |
state.num = virtio_queue_get_last_avail_idx(vdev, idx); |
531 |
r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); |
532 |
if (r) {
|
533 |
return -errno;
|
534 |
} |
535 |
|
536 |
s = l = virtio_queue_get_desc_size(vdev, idx); |
537 |
a = virtio_queue_get_desc_addr(vdev, idx); |
538 |
vq->desc = cpu_physical_memory_map(a, &l, 0);
|
539 |
if (!vq->desc || l != s) {
|
540 |
r = -ENOMEM; |
541 |
goto fail_alloc_desc;
|
542 |
} |
543 |
s = l = virtio_queue_get_avail_size(vdev, idx); |
544 |
a = virtio_queue_get_avail_addr(vdev, idx); |
545 |
vq->avail = cpu_physical_memory_map(a, &l, 0);
|
546 |
if (!vq->avail || l != s) {
|
547 |
r = -ENOMEM; |
548 |
goto fail_alloc_avail;
|
549 |
} |
550 |
vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); |
551 |
vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); |
552 |
vq->used = cpu_physical_memory_map(a, &l, 1);
|
553 |
if (!vq->used || l != s) {
|
554 |
r = -ENOMEM; |
555 |
goto fail_alloc_used;
|
556 |
} |
557 |
|
558 |
vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); |
559 |
vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); |
560 |
vq->ring = cpu_physical_memory_map(a, &l, 1);
|
561 |
if (!vq->ring || l != s) {
|
562 |
r = -ENOMEM; |
563 |
goto fail_alloc_ring;
|
564 |
} |
565 |
|
566 |
r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); |
567 |
if (r < 0) { |
568 |
r = -errno; |
569 |
goto fail_alloc;
|
570 |
} |
571 |
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
|
572 |
if (r < 0) { |
573 |
fprintf(stderr, "Error binding host notifier: %d\n", -r);
|
574 |
goto fail_host_notifier;
|
575 |
} |
576 |
|
577 |
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
578 |
r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); |
579 |
if (r) {
|
580 |
r = -errno; |
581 |
goto fail_kick;
|
582 |
} |
583 |
|
584 |
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); |
585 |
r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); |
586 |
if (r) {
|
587 |
r = -errno; |
588 |
goto fail_call;
|
589 |
} |
590 |
|
591 |
return 0; |
592 |
|
593 |
fail_call:
|
594 |
fail_kick:
|
595 |
vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
|
596 |
fail_host_notifier:
|
597 |
fail_alloc:
|
598 |
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), |
599 |
0, 0); |
600 |
fail_alloc_ring:
|
601 |
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), |
602 |
0, 0); |
603 |
fail_alloc_used:
|
604 |
cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), |
605 |
0, 0); |
606 |
fail_alloc_avail:
|
607 |
cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), |
608 |
0, 0); |
609 |
fail_alloc_desc:
|
610 |
return r;
|
611 |
} |
612 |
|
613 |
static void vhost_virtqueue_cleanup(struct vhost_dev *dev, |
614 |
struct VirtIODevice *vdev,
|
615 |
struct vhost_virtqueue *vq,
|
616 |
unsigned idx)
|
617 |
{ |
618 |
struct vhost_vring_state state = {
|
619 |
.index = idx, |
620 |
}; |
621 |
int r;
|
622 |
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
|
623 |
if (r < 0) { |
624 |
fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
|
625 |
fflush(stderr); |
626 |
} |
627 |
assert (r >= 0);
|
628 |
r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); |
629 |
if (r < 0) { |
630 |
fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
|
631 |
fflush(stderr); |
632 |
} |
633 |
virtio_queue_set_last_avail_idx(vdev, idx, state.num); |
634 |
assert (r >= 0);
|
635 |
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), |
636 |
0, virtio_queue_get_ring_size(vdev, idx));
|
637 |
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), |
638 |
1, virtio_queue_get_used_size(vdev, idx));
|
639 |
cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), |
640 |
0, virtio_queue_get_avail_size(vdev, idx));
|
641 |
cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), |
642 |
0, virtio_queue_get_desc_size(vdev, idx));
|
643 |
} |
644 |
|
645 |
int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force) |
646 |
{ |
647 |
uint64_t features; |
648 |
int r;
|
649 |
if (devfd >= 0) { |
650 |
hdev->control = devfd; |
651 |
} else {
|
652 |
hdev->control = open("/dev/vhost-net", O_RDWR);
|
653 |
if (hdev->control < 0) { |
654 |
return -errno;
|
655 |
} |
656 |
} |
657 |
r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
|
658 |
if (r < 0) { |
659 |
goto fail;
|
660 |
} |
661 |
|
662 |
r = ioctl(hdev->control, VHOST_GET_FEATURES, &features); |
663 |
if (r < 0) { |
664 |
goto fail;
|
665 |
} |
666 |
hdev->features = features; |
667 |
|
668 |
hdev->client.set_memory = vhost_client_set_memory; |
669 |
hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap; |
670 |
hdev->client.migration_log = vhost_client_migration_log; |
671 |
hdev->client.log_start = NULL;
|
672 |
hdev->client.log_stop = NULL;
|
673 |
hdev->mem = qemu_mallocz(offsetof(struct vhost_memory, regions));
|
674 |
hdev->log = NULL;
|
675 |
hdev->log_size = 0;
|
676 |
hdev->log_enabled = false;
|
677 |
hdev->started = false;
|
678 |
cpu_register_phys_memory_client(&hdev->client); |
679 |
hdev->force = force; |
680 |
return 0; |
681 |
fail:
|
682 |
r = -errno; |
683 |
close(hdev->control); |
684 |
return r;
|
685 |
} |
686 |
|
687 |
void vhost_dev_cleanup(struct vhost_dev *hdev) |
688 |
{ |
689 |
cpu_unregister_phys_memory_client(&hdev->client); |
690 |
qemu_free(hdev->mem); |
691 |
close(hdev->control); |
692 |
} |
693 |
|
694 |
bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev) |
695 |
{ |
696 |
return !vdev->binding->query_guest_notifiers ||
|
697 |
vdev->binding->query_guest_notifiers(vdev->binding_opaque) || |
698 |
hdev->force; |
699 |
} |
700 |
|
701 |
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) |
702 |
{ |
703 |
int i, r;
|
704 |
if (!vdev->binding->set_guest_notifiers) {
|
705 |
fprintf(stderr, "binding does not support guest notifiers\n");
|
706 |
r = -ENOSYS; |
707 |
goto fail;
|
708 |
} |
709 |
|
710 |
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
|
711 |
if (r < 0) { |
712 |
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
|
713 |
goto fail_notifiers;
|
714 |
} |
715 |
|
716 |
r = vhost_dev_set_features(hdev, hdev->log_enabled); |
717 |
if (r < 0) { |
718 |
goto fail_features;
|
719 |
} |
720 |
r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem); |
721 |
if (r < 0) { |
722 |
r = -errno; |
723 |
goto fail_mem;
|
724 |
} |
725 |
for (i = 0; i < hdev->nvqs; ++i) { |
726 |
r = vhost_virtqueue_init(hdev, |
727 |
vdev, |
728 |
hdev->vqs + i, |
729 |
i); |
730 |
if (r < 0) { |
731 |
goto fail_vq;
|
732 |
} |
733 |
} |
734 |
|
735 |
if (hdev->log_enabled) {
|
736 |
hdev->log_size = vhost_get_log_size(hdev); |
737 |
hdev->log = hdev->log_size ? |
738 |
qemu_mallocz(hdev->log_size * sizeof *hdev->log) : NULL; |
739 |
r = ioctl(hdev->control, VHOST_SET_LOG_BASE, |
740 |
(uint64_t)(unsigned long)hdev->log); |
741 |
if (r < 0) { |
742 |
r = -errno; |
743 |
goto fail_log;
|
744 |
} |
745 |
} |
746 |
|
747 |
hdev->started = true;
|
748 |
|
749 |
return 0; |
750 |
fail_log:
|
751 |
fail_vq:
|
752 |
while (--i >= 0) { |
753 |
vhost_virtqueue_cleanup(hdev, |
754 |
vdev, |
755 |
hdev->vqs + i, |
756 |
i); |
757 |
} |
758 |
fail_mem:
|
759 |
fail_features:
|
760 |
vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
|
761 |
fail_notifiers:
|
762 |
fail:
|
763 |
return r;
|
764 |
} |
765 |
|
766 |
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) |
767 |
{ |
768 |
int i, r;
|
769 |
|
770 |
for (i = 0; i < hdev->nvqs; ++i) { |
771 |
vhost_virtqueue_cleanup(hdev, |
772 |
vdev, |
773 |
hdev->vqs + i, |
774 |
i); |
775 |
} |
776 |
vhost_client_sync_dirty_bitmap(&hdev->client, 0,
|
777 |
(target_phys_addr_t)~0x0ull);
|
778 |
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
|
779 |
if (r < 0) { |
780 |
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
|
781 |
fflush(stderr); |
782 |
} |
783 |
assert (r >= 0);
|
784 |
|
785 |
hdev->started = false;
|
786 |
qemu_free(hdev->log); |
787 |
hdev->log_size = 0;
|
788 |
} |