root / kvm-all.c @ 8a7c7393
History | View | Annotate | Download (42.2 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2008
|
5 |
* Red Hat, Inc. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
* Glauber Costa <gcosta@redhat.com>
|
10 |
*
|
11 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
12 |
* See the COPYING file in the top-level directory.
|
13 |
*
|
14 |
*/
|
15 |
|
16 |
#include <sys/types.h> |
17 |
#include <sys/ioctl.h> |
18 |
#include <sys/mman.h> |
19 |
#include <stdarg.h> |
20 |
|
21 |
#include <linux/kvm.h> |
22 |
|
23 |
#include "qemu-common.h" |
24 |
#include "qemu-barrier.h" |
25 |
#include "sysemu.h" |
26 |
#include "hw/hw.h" |
27 |
#include "gdbstub.h" |
28 |
#include "kvm.h" |
29 |
#include "bswap.h" |
30 |
#include "memory.h" |
31 |
#include "exec-memory.h" |
32 |
|
33 |
/* This check must be after config-host.h is included */
|
34 |
#ifdef CONFIG_EVENTFD
|
35 |
#include <sys/eventfd.h> |
36 |
#endif
|
37 |
|
38 |
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
|
39 |
#define PAGE_SIZE TARGET_PAGE_SIZE
|
40 |
|
41 |
//#define DEBUG_KVM
|
42 |
|
43 |
#ifdef DEBUG_KVM
|
44 |
#define DPRINTF(fmt, ...) \
|
45 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
46 |
#else
|
47 |
#define DPRINTF(fmt, ...) \
|
48 |
do { } while (0) |
49 |
#endif
|
50 |
|
51 |
typedef struct KVMSlot |
52 |
{ |
53 |
target_phys_addr_t start_addr; |
54 |
ram_addr_t memory_size; |
55 |
void *ram;
|
56 |
int slot;
|
57 |
int flags;
|
58 |
} KVMSlot; |
59 |
|
60 |
typedef struct kvm_dirty_log KVMDirtyLog; |
61 |
|
62 |
struct KVMState
|
63 |
{ |
64 |
KVMSlot slots[32];
|
65 |
int fd;
|
66 |
int vmfd;
|
67 |
int coalesced_mmio;
|
68 |
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
69 |
bool coalesced_flush_in_progress;
|
70 |
int broken_set_mem_region;
|
71 |
int migration_log;
|
72 |
int vcpu_events;
|
73 |
int robust_singlestep;
|
74 |
int debugregs;
|
75 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
76 |
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
|
77 |
#endif
|
78 |
int pit_in_kernel;
|
79 |
int pit_state2;
|
80 |
int xsave, xcrs;
|
81 |
int many_ioeventfds;
|
82 |
int irqchip_inject_ioctl;
|
83 |
#ifdef KVM_CAP_IRQ_ROUTING
|
84 |
struct kvm_irq_routing *irq_routes;
|
85 |
int nr_allocated_irq_routes;
|
86 |
uint32_t *used_gsi_bitmap; |
87 |
unsigned int max_gsi; |
88 |
#endif
|
89 |
}; |
90 |
|
91 |
KVMState *kvm_state; |
92 |
bool kvm_kernel_irqchip;
|
93 |
|
94 |
static const KVMCapabilityInfo kvm_required_capabilites[] = { |
95 |
KVM_CAP_INFO(USER_MEMORY), |
96 |
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), |
97 |
KVM_CAP_LAST_INFO |
98 |
}; |
99 |
|
100 |
static KVMSlot *kvm_alloc_slot(KVMState *s)
|
101 |
{ |
102 |
int i;
|
103 |
|
104 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
105 |
if (s->slots[i].memory_size == 0) { |
106 |
return &s->slots[i];
|
107 |
} |
108 |
} |
109 |
|
110 |
fprintf(stderr, "%s: no free slot available\n", __func__);
|
111 |
abort(); |
112 |
} |
113 |
|
114 |
static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
|
115 |
target_phys_addr_t start_addr, |
116 |
target_phys_addr_t end_addr) |
117 |
{ |
118 |
int i;
|
119 |
|
120 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
121 |
KVMSlot *mem = &s->slots[i]; |
122 |
|
123 |
if (start_addr == mem->start_addr &&
|
124 |
end_addr == mem->start_addr + mem->memory_size) { |
125 |
return mem;
|
126 |
} |
127 |
} |
128 |
|
129 |
return NULL; |
130 |
} |
131 |
|
132 |
/*
|
133 |
* Find overlapping slot with lowest start address
|
134 |
*/
|
135 |
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
|
136 |
target_phys_addr_t start_addr, |
137 |
target_phys_addr_t end_addr) |
138 |
{ |
139 |
KVMSlot *found = NULL;
|
140 |
int i;
|
141 |
|
142 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
143 |
KVMSlot *mem = &s->slots[i]; |
144 |
|
145 |
if (mem->memory_size == 0 || |
146 |
(found && found->start_addr < mem->start_addr)) { |
147 |
continue;
|
148 |
} |
149 |
|
150 |
if (end_addr > mem->start_addr &&
|
151 |
start_addr < mem->start_addr + mem->memory_size) { |
152 |
found = mem; |
153 |
} |
154 |
} |
155 |
|
156 |
return found;
|
157 |
} |
158 |
|
159 |
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, |
160 |
target_phys_addr_t *phys_addr) |
161 |
{ |
162 |
int i;
|
163 |
|
164 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
165 |
KVMSlot *mem = &s->slots[i]; |
166 |
|
167 |
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
|
168 |
*phys_addr = mem->start_addr + (ram - mem->ram); |
169 |
return 1; |
170 |
} |
171 |
} |
172 |
|
173 |
return 0; |
174 |
} |
175 |
|
176 |
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
177 |
{ |
178 |
struct kvm_userspace_memory_region mem;
|
179 |
|
180 |
mem.slot = slot->slot; |
181 |
mem.guest_phys_addr = slot->start_addr; |
182 |
mem.memory_size = slot->memory_size; |
183 |
mem.userspace_addr = (unsigned long)slot->ram; |
184 |
mem.flags = slot->flags; |
185 |
if (s->migration_log) {
|
186 |
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; |
187 |
} |
188 |
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
189 |
} |
190 |
|
191 |
static void kvm_reset_vcpu(void *opaque) |
192 |
{ |
193 |
CPUState *env = opaque; |
194 |
|
195 |
kvm_arch_reset_vcpu(env); |
196 |
} |
197 |
|
198 |
int kvm_pit_in_kernel(void) |
199 |
{ |
200 |
return kvm_state->pit_in_kernel;
|
201 |
} |
202 |
|
203 |
int kvm_init_vcpu(CPUState *env)
|
204 |
{ |
205 |
KVMState *s = kvm_state; |
206 |
long mmap_size;
|
207 |
int ret;
|
208 |
|
209 |
DPRINTF("kvm_init_vcpu\n");
|
210 |
|
211 |
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); |
212 |
if (ret < 0) { |
213 |
DPRINTF("kvm_create_vcpu failed\n");
|
214 |
goto err;
|
215 |
} |
216 |
|
217 |
env->kvm_fd = ret; |
218 |
env->kvm_state = s; |
219 |
env->kvm_vcpu_dirty = 1;
|
220 |
|
221 |
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
|
222 |
if (mmap_size < 0) { |
223 |
ret = mmap_size; |
224 |
DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
|
225 |
goto err;
|
226 |
} |
227 |
|
228 |
env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
229 |
env->kvm_fd, 0);
|
230 |
if (env->kvm_run == MAP_FAILED) {
|
231 |
ret = -errno; |
232 |
DPRINTF("mmap'ing vcpu state failed\n");
|
233 |
goto err;
|
234 |
} |
235 |
|
236 |
if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
|
237 |
s->coalesced_mmio_ring = |
238 |
(void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
|
239 |
} |
240 |
|
241 |
ret = kvm_arch_init_vcpu(env); |
242 |
if (ret == 0) { |
243 |
qemu_register_reset(kvm_reset_vcpu, env); |
244 |
kvm_arch_reset_vcpu(env); |
245 |
} |
246 |
err:
|
247 |
return ret;
|
248 |
} |
249 |
|
250 |
/*
|
251 |
* dirty pages logging control
|
252 |
*/
|
253 |
|
254 |
static int kvm_mem_flags(KVMState *s, bool log_dirty) |
255 |
{ |
256 |
return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0; |
257 |
} |
258 |
|
259 |
static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty) |
260 |
{ |
261 |
KVMState *s = kvm_state; |
262 |
int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
|
263 |
int old_flags;
|
264 |
|
265 |
old_flags = mem->flags; |
266 |
|
267 |
flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty); |
268 |
mem->flags = flags; |
269 |
|
270 |
/* If nothing changed effectively, no need to issue ioctl */
|
271 |
if (s->migration_log) {
|
272 |
flags |= KVM_MEM_LOG_DIRTY_PAGES; |
273 |
} |
274 |
|
275 |
if (flags == old_flags) {
|
276 |
return 0; |
277 |
} |
278 |
|
279 |
return kvm_set_user_memory_region(s, mem);
|
280 |
} |
281 |
|
282 |
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, |
283 |
ram_addr_t size, bool log_dirty)
|
284 |
{ |
285 |
KVMState *s = kvm_state; |
286 |
KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
287 |
|
288 |
if (mem == NULL) { |
289 |
fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
290 |
TARGET_FMT_plx "\n", __func__, phys_addr,
|
291 |
(target_phys_addr_t)(phys_addr + size - 1));
|
292 |
return -EINVAL;
|
293 |
} |
294 |
return kvm_slot_dirty_pages_log_change(mem, log_dirty);
|
295 |
} |
296 |
|
297 |
static void kvm_log_start(MemoryListener *listener, |
298 |
MemoryRegionSection *section) |
299 |
{ |
300 |
int r;
|
301 |
|
302 |
r = kvm_dirty_pages_log_change(section->offset_within_address_space, |
303 |
section->size, true);
|
304 |
if (r < 0) { |
305 |
abort(); |
306 |
} |
307 |
} |
308 |
|
309 |
static void kvm_log_stop(MemoryListener *listener, |
310 |
MemoryRegionSection *section) |
311 |
{ |
312 |
int r;
|
313 |
|
314 |
r = kvm_dirty_pages_log_change(section->offset_within_address_space, |
315 |
section->size, false);
|
316 |
if (r < 0) { |
317 |
abort(); |
318 |
} |
319 |
} |
320 |
|
321 |
static int kvm_set_migration_log(int enable) |
322 |
{ |
323 |
KVMState *s = kvm_state; |
324 |
KVMSlot *mem; |
325 |
int i, err;
|
326 |
|
327 |
s->migration_log = enable; |
328 |
|
329 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
330 |
mem = &s->slots[i]; |
331 |
|
332 |
if (!mem->memory_size) {
|
333 |
continue;
|
334 |
} |
335 |
if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
|
336 |
continue;
|
337 |
} |
338 |
err = kvm_set_user_memory_region(s, mem); |
339 |
if (err) {
|
340 |
return err;
|
341 |
} |
342 |
} |
343 |
return 0; |
344 |
} |
345 |
|
346 |
/* get kvm's dirty pages bitmap and update qemu's */
|
347 |
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, |
348 |
unsigned long *bitmap) |
349 |
{ |
350 |
unsigned int i, j; |
351 |
unsigned long page_number, c; |
352 |
target_phys_addr_t addr, addr1; |
353 |
unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS; |
354 |
|
355 |
/*
|
356 |
* bitmap-traveling is faster than memory-traveling (for addr...)
|
357 |
* especially when most of the memory is not dirty.
|
358 |
*/
|
359 |
for (i = 0; i < len; i++) { |
360 |
if (bitmap[i] != 0) { |
361 |
c = leul_to_cpu(bitmap[i]); |
362 |
do {
|
363 |
j = ffsl(c) - 1;
|
364 |
c &= ~(1ul << j);
|
365 |
page_number = i * HOST_LONG_BITS + j; |
366 |
addr1 = page_number * TARGET_PAGE_SIZE; |
367 |
addr = section->offset_within_region + addr1; |
368 |
memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE); |
369 |
} while (c != 0); |
370 |
} |
371 |
} |
372 |
return 0; |
373 |
} |
374 |
|
375 |
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) |
376 |
|
377 |
/**
|
378 |
* kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
|
379 |
* This function updates qemu's dirty bitmap using
|
380 |
* memory_region_set_dirty(). This means all bits are set
|
381 |
* to dirty.
|
382 |
*
|
383 |
* @start_add: start of logged region.
|
384 |
* @end_addr: end of logged region.
|
385 |
*/
|
386 |
static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) |
387 |
{ |
388 |
KVMState *s = kvm_state; |
389 |
unsigned long size, allocated_size = 0; |
390 |
KVMDirtyLog d; |
391 |
KVMSlot *mem; |
392 |
int ret = 0; |
393 |
target_phys_addr_t start_addr = section->offset_within_address_space; |
394 |
target_phys_addr_t end_addr = start_addr + section->size; |
395 |
|
396 |
d.dirty_bitmap = NULL;
|
397 |
while (start_addr < end_addr) {
|
398 |
mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); |
399 |
if (mem == NULL) { |
400 |
break;
|
401 |
} |
402 |
|
403 |
/* XXX bad kernel interface alert
|
404 |
* For dirty bitmap, kernel allocates array of size aligned to
|
405 |
* bits-per-long. But for case when the kernel is 64bits and
|
406 |
* the userspace is 32bits, userspace can't align to the same
|
407 |
* bits-per-long, since sizeof(long) is different between kernel
|
408 |
* and user space. This way, userspace will provide buffer which
|
409 |
* may be 4 bytes less than the kernel will use, resulting in
|
410 |
* userspace memory corruption (which is not detectable by valgrind
|
411 |
* too, in most cases).
|
412 |
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
413 |
* a hope that sizeof(long) wont become >8 any time soon.
|
414 |
*/
|
415 |
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), |
416 |
/*HOST_LONG_BITS*/ 64) / 8; |
417 |
if (!d.dirty_bitmap) {
|
418 |
d.dirty_bitmap = g_malloc(size); |
419 |
} else if (size > allocated_size) { |
420 |
d.dirty_bitmap = g_realloc(d.dirty_bitmap, size); |
421 |
} |
422 |
allocated_size = size; |
423 |
memset(d.dirty_bitmap, 0, allocated_size);
|
424 |
|
425 |
d.slot = mem->slot; |
426 |
|
427 |
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { |
428 |
DPRINTF("ioctl failed %d\n", errno);
|
429 |
ret = -1;
|
430 |
break;
|
431 |
} |
432 |
|
433 |
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); |
434 |
start_addr = mem->start_addr + mem->memory_size; |
435 |
} |
436 |
g_free(d.dirty_bitmap); |
437 |
|
438 |
return ret;
|
439 |
} |
440 |
|
441 |
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
442 |
{ |
443 |
int ret = -ENOSYS;
|
444 |
KVMState *s = kvm_state; |
445 |
|
446 |
if (s->coalesced_mmio) {
|
447 |
struct kvm_coalesced_mmio_zone zone;
|
448 |
|
449 |
zone.addr = start; |
450 |
zone.size = size; |
451 |
|
452 |
ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); |
453 |
} |
454 |
|
455 |
return ret;
|
456 |
} |
457 |
|
458 |
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
459 |
{ |
460 |
int ret = -ENOSYS;
|
461 |
KVMState *s = kvm_state; |
462 |
|
463 |
if (s->coalesced_mmio) {
|
464 |
struct kvm_coalesced_mmio_zone zone;
|
465 |
|
466 |
zone.addr = start; |
467 |
zone.size = size; |
468 |
|
469 |
ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); |
470 |
} |
471 |
|
472 |
return ret;
|
473 |
} |
474 |
|
475 |
int kvm_check_extension(KVMState *s, unsigned int extension) |
476 |
{ |
477 |
int ret;
|
478 |
|
479 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); |
480 |
if (ret < 0) { |
481 |
ret = 0;
|
482 |
} |
483 |
|
484 |
return ret;
|
485 |
} |
486 |
|
487 |
static int kvm_check_many_ioeventfds(void) |
488 |
{ |
489 |
/* Userspace can use ioeventfd for io notification. This requires a host
|
490 |
* that supports eventfd(2) and an I/O thread; since eventfd does not
|
491 |
* support SIGIO it cannot interrupt the vcpu.
|
492 |
*
|
493 |
* Older kernels have a 6 device limit on the KVM io bus. Find out so we
|
494 |
* can avoid creating too many ioeventfds.
|
495 |
*/
|
496 |
#if defined(CONFIG_EVENTFD)
|
497 |
int ioeventfds[7]; |
498 |
int i, ret = 0; |
499 |
for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { |
500 |
ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
|
501 |
if (ioeventfds[i] < 0) { |
502 |
break;
|
503 |
} |
504 |
ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true); |
505 |
if (ret < 0) { |
506 |
close(ioeventfds[i]); |
507 |
break;
|
508 |
} |
509 |
} |
510 |
|
511 |
/* Decide whether many devices are supported or not */
|
512 |
ret = i == ARRAY_SIZE(ioeventfds); |
513 |
|
514 |
while (i-- > 0) { |
515 |
kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false); |
516 |
close(ioeventfds[i]); |
517 |
} |
518 |
return ret;
|
519 |
#else
|
520 |
return 0; |
521 |
#endif
|
522 |
} |
523 |
|
524 |
static const KVMCapabilityInfo * |
525 |
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
526 |
{ |
527 |
while (list->name) {
|
528 |
if (!kvm_check_extension(s, list->value)) {
|
529 |
return list;
|
530 |
} |
531 |
list++; |
532 |
} |
533 |
return NULL; |
534 |
} |
535 |
|
536 |
static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) |
537 |
{ |
538 |
KVMState *s = kvm_state; |
539 |
KVMSlot *mem, old; |
540 |
int err;
|
541 |
MemoryRegion *mr = section->mr; |
542 |
bool log_dirty = memory_region_is_logging(mr);
|
543 |
target_phys_addr_t start_addr = section->offset_within_address_space; |
544 |
ram_addr_t size = section->size; |
545 |
void *ram = NULL; |
546 |
unsigned delta;
|
547 |
|
548 |
/* kvm works in page size chunks, but the function may be called
|
549 |
with sub-page size and unaligned start address. */
|
550 |
delta = TARGET_PAGE_ALIGN(size) - size; |
551 |
if (delta > size) {
|
552 |
return;
|
553 |
} |
554 |
start_addr += delta; |
555 |
size -= delta; |
556 |
size &= TARGET_PAGE_MASK; |
557 |
if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
|
558 |
return;
|
559 |
} |
560 |
|
561 |
if (!memory_region_is_ram(mr)) {
|
562 |
return;
|
563 |
} |
564 |
|
565 |
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta; |
566 |
|
567 |
while (1) { |
568 |
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); |
569 |
if (!mem) {
|
570 |
break;
|
571 |
} |
572 |
|
573 |
if (add && start_addr >= mem->start_addr &&
|
574 |
(start_addr + size <= mem->start_addr + mem->memory_size) && |
575 |
(ram - start_addr == mem->ram - mem->start_addr)) { |
576 |
/* The new slot fits into the existing one and comes with
|
577 |
* identical parameters - update flags and done. */
|
578 |
kvm_slot_dirty_pages_log_change(mem, log_dirty); |
579 |
return;
|
580 |
} |
581 |
|
582 |
old = *mem; |
583 |
|
584 |
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
585 |
kvm_physical_sync_dirty_bitmap(section); |
586 |
} |
587 |
|
588 |
/* unregister the overlapping slot */
|
589 |
mem->memory_size = 0;
|
590 |
err = kvm_set_user_memory_region(s, mem); |
591 |
if (err) {
|
592 |
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
|
593 |
__func__, strerror(-err)); |
594 |
abort(); |
595 |
} |
596 |
|
597 |
/* Workaround for older KVM versions: we can't join slots, even not by
|
598 |
* unregistering the previous ones and then registering the larger
|
599 |
* slot. We have to maintain the existing fragmentation. Sigh.
|
600 |
*
|
601 |
* This workaround assumes that the new slot starts at the same
|
602 |
* address as the first existing one. If not or if some overlapping
|
603 |
* slot comes around later, we will fail (not seen in practice so far)
|
604 |
* - and actually require a recent KVM version. */
|
605 |
if (s->broken_set_mem_region &&
|
606 |
old.start_addr == start_addr && old.memory_size < size && add) { |
607 |
mem = kvm_alloc_slot(s); |
608 |
mem->memory_size = old.memory_size; |
609 |
mem->start_addr = old.start_addr; |
610 |
mem->ram = old.ram; |
611 |
mem->flags = kvm_mem_flags(s, log_dirty); |
612 |
|
613 |
err = kvm_set_user_memory_region(s, mem); |
614 |
if (err) {
|
615 |
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
|
616 |
strerror(-err)); |
617 |
abort(); |
618 |
} |
619 |
|
620 |
start_addr += old.memory_size; |
621 |
ram += old.memory_size; |
622 |
size -= old.memory_size; |
623 |
continue;
|
624 |
} |
625 |
|
626 |
/* register prefix slot */
|
627 |
if (old.start_addr < start_addr) {
|
628 |
mem = kvm_alloc_slot(s); |
629 |
mem->memory_size = start_addr - old.start_addr; |
630 |
mem->start_addr = old.start_addr; |
631 |
mem->ram = old.ram; |
632 |
mem->flags = kvm_mem_flags(s, log_dirty); |
633 |
|
634 |
err = kvm_set_user_memory_region(s, mem); |
635 |
if (err) {
|
636 |
fprintf(stderr, "%s: error registering prefix slot: %s\n",
|
637 |
__func__, strerror(-err)); |
638 |
#ifdef TARGET_PPC
|
639 |
fprintf(stderr, "%s: This is probably because your kernel's " \
|
640 |
"PAGE_SIZE is too big. Please try to use 4k " \
|
641 |
"PAGE_SIZE!\n", __func__);
|
642 |
#endif
|
643 |
abort(); |
644 |
} |
645 |
} |
646 |
|
647 |
/* register suffix slot */
|
648 |
if (old.start_addr + old.memory_size > start_addr + size) {
|
649 |
ram_addr_t size_delta; |
650 |
|
651 |
mem = kvm_alloc_slot(s); |
652 |
mem->start_addr = start_addr + size; |
653 |
size_delta = mem->start_addr - old.start_addr; |
654 |
mem->memory_size = old.memory_size - size_delta; |
655 |
mem->ram = old.ram + size_delta; |
656 |
mem->flags = kvm_mem_flags(s, log_dirty); |
657 |
|
658 |
err = kvm_set_user_memory_region(s, mem); |
659 |
if (err) {
|
660 |
fprintf(stderr, "%s: error registering suffix slot: %s\n",
|
661 |
__func__, strerror(-err)); |
662 |
abort(); |
663 |
} |
664 |
} |
665 |
} |
666 |
|
667 |
/* in case the KVM bug workaround already "consumed" the new slot */
|
668 |
if (!size) {
|
669 |
return;
|
670 |
} |
671 |
if (!add) {
|
672 |
return;
|
673 |
} |
674 |
mem = kvm_alloc_slot(s); |
675 |
mem->memory_size = size; |
676 |
mem->start_addr = start_addr; |
677 |
mem->ram = ram; |
678 |
mem->flags = kvm_mem_flags(s, log_dirty); |
679 |
|
680 |
err = kvm_set_user_memory_region(s, mem); |
681 |
if (err) {
|
682 |
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
683 |
strerror(-err)); |
684 |
abort(); |
685 |
} |
686 |
} |
687 |
|
688 |
static void kvm_begin(MemoryListener *listener) |
689 |
{ |
690 |
} |
691 |
|
692 |
static void kvm_commit(MemoryListener *listener) |
693 |
{ |
694 |
} |
695 |
|
696 |
static void kvm_region_add(MemoryListener *listener, |
697 |
MemoryRegionSection *section) |
698 |
{ |
699 |
kvm_set_phys_mem(section, true);
|
700 |
} |
701 |
|
702 |
static void kvm_region_del(MemoryListener *listener, |
703 |
MemoryRegionSection *section) |
704 |
{ |
705 |
kvm_set_phys_mem(section, false);
|
706 |
} |
707 |
|
708 |
static void kvm_region_nop(MemoryListener *listener, |
709 |
MemoryRegionSection *section) |
710 |
{ |
711 |
} |
712 |
|
713 |
static void kvm_log_sync(MemoryListener *listener, |
714 |
MemoryRegionSection *section) |
715 |
{ |
716 |
int r;
|
717 |
|
718 |
r = kvm_physical_sync_dirty_bitmap(section); |
719 |
if (r < 0) { |
720 |
abort(); |
721 |
} |
722 |
} |
723 |
|
724 |
static void kvm_log_global_start(struct MemoryListener *listener) |
725 |
{ |
726 |
int r;
|
727 |
|
728 |
r = kvm_set_migration_log(1);
|
729 |
assert(r >= 0);
|
730 |
} |
731 |
|
732 |
static void kvm_log_global_stop(struct MemoryListener *listener) |
733 |
{ |
734 |
int r;
|
735 |
|
736 |
r = kvm_set_migration_log(0);
|
737 |
assert(r >= 0);
|
738 |
} |
739 |
|
740 |
static void kvm_mem_ioeventfd_add(MemoryRegionSection *section, |
741 |
bool match_data, uint64_t data, int fd) |
742 |
{ |
743 |
int r;
|
744 |
|
745 |
assert(match_data && section->size == 4);
|
746 |
|
747 |
r = kvm_set_ioeventfd_mmio_long(fd, section->offset_within_address_space, |
748 |
data, true);
|
749 |
if (r < 0) { |
750 |
abort(); |
751 |
} |
752 |
} |
753 |
|
754 |
static void kvm_mem_ioeventfd_del(MemoryRegionSection *section, |
755 |
bool match_data, uint64_t data, int fd) |
756 |
{ |
757 |
int r;
|
758 |
|
759 |
r = kvm_set_ioeventfd_mmio_long(fd, section->offset_within_address_space, |
760 |
data, false);
|
761 |
if (r < 0) { |
762 |
abort(); |
763 |
} |
764 |
} |
765 |
|
766 |
static void kvm_io_ioeventfd_add(MemoryRegionSection *section, |
767 |
bool match_data, uint64_t data, int fd) |
768 |
{ |
769 |
int r;
|
770 |
|
771 |
assert(match_data && section->size == 2);
|
772 |
|
773 |
r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space, |
774 |
data, true);
|
775 |
if (r < 0) { |
776 |
abort(); |
777 |
} |
778 |
} |
779 |
|
780 |
static void kvm_io_ioeventfd_del(MemoryRegionSection *section, |
781 |
bool match_data, uint64_t data, int fd) |
782 |
|
783 |
{ |
784 |
int r;
|
785 |
|
786 |
r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space, |
787 |
data, false);
|
788 |
if (r < 0) { |
789 |
abort(); |
790 |
} |
791 |
} |
792 |
|
793 |
static void kvm_eventfd_add(MemoryListener *listener, |
794 |
MemoryRegionSection *section, |
795 |
bool match_data, uint64_t data, int fd) |
796 |
{ |
797 |
if (section->address_space == get_system_memory()) {
|
798 |
kvm_mem_ioeventfd_add(section, match_data, data, fd); |
799 |
} else {
|
800 |
kvm_io_ioeventfd_add(section, match_data, data, fd); |
801 |
} |
802 |
} |
803 |
|
804 |
static void kvm_eventfd_del(MemoryListener *listener, |
805 |
MemoryRegionSection *section, |
806 |
bool match_data, uint64_t data, int fd) |
807 |
{ |
808 |
if (section->address_space == get_system_memory()) {
|
809 |
kvm_mem_ioeventfd_del(section, match_data, data, fd); |
810 |
} else {
|
811 |
kvm_io_ioeventfd_del(section, match_data, data, fd); |
812 |
} |
813 |
} |
814 |
|
815 |
static MemoryListener kvm_memory_listener = {
|
816 |
.begin = kvm_begin, |
817 |
.commit = kvm_commit, |
818 |
.region_add = kvm_region_add, |
819 |
.region_del = kvm_region_del, |
820 |
.region_nop = kvm_region_nop, |
821 |
.log_start = kvm_log_start, |
822 |
.log_stop = kvm_log_stop, |
823 |
.log_sync = kvm_log_sync, |
824 |
.log_global_start = kvm_log_global_start, |
825 |
.log_global_stop = kvm_log_global_stop, |
826 |
.eventfd_add = kvm_eventfd_add, |
827 |
.eventfd_del = kvm_eventfd_del, |
828 |
.priority = 10,
|
829 |
}; |
830 |
|
831 |
static void kvm_handle_interrupt(CPUState *env, int mask) |
832 |
{ |
833 |
env->interrupt_request |= mask; |
834 |
|
835 |
if (!qemu_cpu_is_self(env)) {
|
836 |
qemu_cpu_kick(env); |
837 |
} |
838 |
} |
839 |
|
840 |
int kvm_irqchip_set_irq(KVMState *s, int irq, int level) |
841 |
{ |
842 |
struct kvm_irq_level event;
|
843 |
int ret;
|
844 |
|
845 |
assert(kvm_irqchip_in_kernel()); |
846 |
|
847 |
event.level = level; |
848 |
event.irq = irq; |
849 |
ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event); |
850 |
if (ret < 0) { |
851 |
perror("kvm_set_irqchip_line");
|
852 |
abort(); |
853 |
} |
854 |
|
855 |
return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status; |
856 |
} |
857 |
|
858 |
#ifdef KVM_CAP_IRQ_ROUTING
|
859 |
static void set_gsi(KVMState *s, unsigned int gsi) |
860 |
{ |
861 |
assert(gsi < s->max_gsi); |
862 |
|
863 |
s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32); |
864 |
} |
865 |
|
866 |
static void kvm_init_irq_routing(KVMState *s) |
867 |
{ |
868 |
int gsi_count;
|
869 |
|
870 |
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING); |
871 |
if (gsi_count > 0) { |
872 |
unsigned int gsi_bits, i; |
873 |
|
874 |
/* Round up so we can search ints using ffs */
|
875 |
gsi_bits = (gsi_count + 31) / 32; |
876 |
s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
|
877 |
s->max_gsi = gsi_bits; |
878 |
|
879 |
/* Mark any over-allocated bits as already in use */
|
880 |
for (i = gsi_count; i < gsi_bits; i++) {
|
881 |
set_gsi(s, i); |
882 |
} |
883 |
} |
884 |
|
885 |
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
886 |
s->nr_allocated_irq_routes = 0;
|
887 |
|
888 |
kvm_arch_init_irq_routing(s); |
889 |
} |
890 |
|
891 |
static void kvm_add_routing_entry(KVMState *s, |
892 |
struct kvm_irq_routing_entry *entry)
|
893 |
{ |
894 |
struct kvm_irq_routing_entry *new;
|
895 |
int n, size;
|
896 |
|
897 |
if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
|
898 |
n = s->nr_allocated_irq_routes * 2;
|
899 |
if (n < 64) { |
900 |
n = 64;
|
901 |
} |
902 |
size = sizeof(struct kvm_irq_routing); |
903 |
size += n * sizeof(*new);
|
904 |
s->irq_routes = g_realloc(s->irq_routes, size); |
905 |
s->nr_allocated_irq_routes = n; |
906 |
} |
907 |
n = s->irq_routes->nr++; |
908 |
new = &s->irq_routes->entries[n]; |
909 |
memset(new, 0, sizeof(*new)); |
910 |
new->gsi = entry->gsi; |
911 |
new->type = entry->type; |
912 |
new->flags = entry->flags; |
913 |
new->u = entry->u; |
914 |
|
915 |
set_gsi(s, entry->gsi); |
916 |
} |
917 |
|
918 |
void kvm_irqchip_add_route(KVMState *s, int irq, int irqchip, int pin) |
919 |
{ |
920 |
struct kvm_irq_routing_entry e;
|
921 |
|
922 |
e.gsi = irq; |
923 |
e.type = KVM_IRQ_ROUTING_IRQCHIP; |
924 |
e.flags = 0;
|
925 |
e.u.irqchip.irqchip = irqchip; |
926 |
e.u.irqchip.pin = pin; |
927 |
kvm_add_routing_entry(s, &e); |
928 |
} |
929 |
|
930 |
int kvm_irqchip_commit_routes(KVMState *s)
|
931 |
{ |
932 |
s->irq_routes->flags = 0;
|
933 |
return kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
|
934 |
} |
935 |
|
936 |
#else /* !KVM_CAP_IRQ_ROUTING */ |
937 |
|
938 |
static void kvm_init_irq_routing(KVMState *s) |
939 |
{ |
940 |
} |
941 |
#endif /* !KVM_CAP_IRQ_ROUTING */ |
942 |
|
943 |
static int kvm_irqchip_create(KVMState *s) |
944 |
{ |
945 |
QemuOptsList *list = qemu_find_opts("machine");
|
946 |
int ret;
|
947 |
|
948 |
if (QTAILQ_EMPTY(&list->head) ||
|
949 |
!qemu_opt_get_bool(QTAILQ_FIRST(&list->head), |
950 |
"kernel_irqchip", false) || |
951 |
!kvm_check_extension(s, KVM_CAP_IRQCHIP)) { |
952 |
return 0; |
953 |
} |
954 |
|
955 |
ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); |
956 |
if (ret < 0) { |
957 |
fprintf(stderr, "Create kernel irqchip failed\n");
|
958 |
return ret;
|
959 |
} |
960 |
|
961 |
s->irqchip_inject_ioctl = KVM_IRQ_LINE; |
962 |
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
|
963 |
s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS; |
964 |
} |
965 |
kvm_kernel_irqchip = true;
|
966 |
|
967 |
kvm_init_irq_routing(s); |
968 |
|
969 |
return 0; |
970 |
} |
971 |
|
972 |
int kvm_init(void) |
973 |
{ |
974 |
static const char upgrade_note[] = |
975 |
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
|
976 |
"(see http://sourceforge.net/projects/kvm).\n";
|
977 |
KVMState *s; |
978 |
const KVMCapabilityInfo *missing_cap;
|
979 |
int ret;
|
980 |
int i;
|
981 |
|
982 |
s = g_malloc0(sizeof(KVMState));
|
983 |
|
984 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
985 |
QTAILQ_INIT(&s->kvm_sw_breakpoints); |
986 |
#endif
|
987 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
988 |
s->slots[i].slot = i; |
989 |
} |
990 |
s->vmfd = -1;
|
991 |
s->fd = qemu_open("/dev/kvm", O_RDWR);
|
992 |
if (s->fd == -1) { |
993 |
fprintf(stderr, "Could not access KVM kernel module: %m\n");
|
994 |
ret = -errno; |
995 |
goto err;
|
996 |
} |
997 |
|
998 |
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
|
999 |
if (ret < KVM_API_VERSION) {
|
1000 |
if (ret > 0) { |
1001 |
ret = -EINVAL; |
1002 |
} |
1003 |
fprintf(stderr, "kvm version too old\n");
|
1004 |
goto err;
|
1005 |
} |
1006 |
|
1007 |
if (ret > KVM_API_VERSION) {
|
1008 |
ret = -EINVAL; |
1009 |
fprintf(stderr, "kvm version not supported\n");
|
1010 |
goto err;
|
1011 |
} |
1012 |
|
1013 |
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
|
1014 |
if (s->vmfd < 0) { |
1015 |
#ifdef TARGET_S390X
|
1016 |
fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
|
1017 |
"your host kernel command line\n");
|
1018 |
#endif
|
1019 |
ret = s->vmfd; |
1020 |
goto err;
|
1021 |
} |
1022 |
|
1023 |
missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); |
1024 |
if (!missing_cap) {
|
1025 |
missing_cap = |
1026 |
kvm_check_extension_list(s, kvm_arch_required_capabilities); |
1027 |
} |
1028 |
if (missing_cap) {
|
1029 |
ret = -EINVAL; |
1030 |
fprintf(stderr, "kvm does not support %s\n%s",
|
1031 |
missing_cap->name, upgrade_note); |
1032 |
goto err;
|
1033 |
} |
1034 |
|
1035 |
s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
1036 |
|
1037 |
s->broken_set_mem_region = 1;
|
1038 |
ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); |
1039 |
if (ret > 0) { |
1040 |
s->broken_set_mem_region = 0;
|
1041 |
} |
1042 |
|
1043 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1044 |
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); |
1045 |
#endif
|
1046 |
|
1047 |
s->robust_singlestep = |
1048 |
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); |
1049 |
|
1050 |
#ifdef KVM_CAP_DEBUGREGS
|
1051 |
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); |
1052 |
#endif
|
1053 |
|
1054 |
#ifdef KVM_CAP_XSAVE
|
1055 |
s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); |
1056 |
#endif
|
1057 |
|
1058 |
#ifdef KVM_CAP_XCRS
|
1059 |
s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); |
1060 |
#endif
|
1061 |
|
1062 |
#ifdef KVM_CAP_PIT_STATE2
|
1063 |
s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); |
1064 |
#endif
|
1065 |
|
1066 |
ret = kvm_arch_init(s); |
1067 |
if (ret < 0) { |
1068 |
goto err;
|
1069 |
} |
1070 |
|
1071 |
ret = kvm_irqchip_create(s); |
1072 |
if (ret < 0) { |
1073 |
goto err;
|
1074 |
} |
1075 |
|
1076 |
kvm_state = s; |
1077 |
memory_listener_register(&kvm_memory_listener, NULL);
|
1078 |
|
1079 |
s->many_ioeventfds = kvm_check_many_ioeventfds(); |
1080 |
|
1081 |
cpu_interrupt_handler = kvm_handle_interrupt; |
1082 |
|
1083 |
return 0; |
1084 |
|
1085 |
err:
|
1086 |
if (s) {
|
1087 |
if (s->vmfd >= 0) { |
1088 |
close(s->vmfd); |
1089 |
} |
1090 |
if (s->fd != -1) { |
1091 |
close(s->fd); |
1092 |
} |
1093 |
} |
1094 |
g_free(s); |
1095 |
|
1096 |
return ret;
|
1097 |
} |
1098 |
|
1099 |
static void kvm_handle_io(uint16_t port, void *data, int direction, int size, |
1100 |
uint32_t count) |
1101 |
{ |
1102 |
int i;
|
1103 |
uint8_t *ptr = data; |
1104 |
|
1105 |
for (i = 0; i < count; i++) { |
1106 |
if (direction == KVM_EXIT_IO_IN) {
|
1107 |
switch (size) {
|
1108 |
case 1: |
1109 |
stb_p(ptr, cpu_inb(port)); |
1110 |
break;
|
1111 |
case 2: |
1112 |
stw_p(ptr, cpu_inw(port)); |
1113 |
break;
|
1114 |
case 4: |
1115 |
stl_p(ptr, cpu_inl(port)); |
1116 |
break;
|
1117 |
} |
1118 |
} else {
|
1119 |
switch (size) {
|
1120 |
case 1: |
1121 |
cpu_outb(port, ldub_p(ptr)); |
1122 |
break;
|
1123 |
case 2: |
1124 |
cpu_outw(port, lduw_p(ptr)); |
1125 |
break;
|
1126 |
case 4: |
1127 |
cpu_outl(port, ldl_p(ptr)); |
1128 |
break;
|
1129 |
} |
1130 |
} |
1131 |
|
1132 |
ptr += size; |
1133 |
} |
1134 |
} |
1135 |
|
1136 |
static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run) |
1137 |
{ |
1138 |
fprintf(stderr, "KVM internal error.");
|
1139 |
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
1140 |
int i;
|
1141 |
|
1142 |
fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
|
1143 |
for (i = 0; i < run->internal.ndata; ++i) { |
1144 |
fprintf(stderr, "extra data[%d]: %"PRIx64"\n", |
1145 |
i, (uint64_t)run->internal.data[i]); |
1146 |
} |
1147 |
} else {
|
1148 |
fprintf(stderr, "\n");
|
1149 |
} |
1150 |
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
1151 |
fprintf(stderr, "emulation failure\n");
|
1152 |
if (!kvm_arch_stop_on_emulation_error(env)) {
|
1153 |
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
1154 |
return EXCP_INTERRUPT;
|
1155 |
} |
1156 |
} |
1157 |
/* FIXME: Should trigger a qmp message to let management know
|
1158 |
* something went wrong.
|
1159 |
*/
|
1160 |
return -1; |
1161 |
} |
1162 |
|
1163 |
void kvm_flush_coalesced_mmio_buffer(void) |
1164 |
{ |
1165 |
KVMState *s = kvm_state; |
1166 |
|
1167 |
if (s->coalesced_flush_in_progress) {
|
1168 |
return;
|
1169 |
} |
1170 |
|
1171 |
s->coalesced_flush_in_progress = true;
|
1172 |
|
1173 |
if (s->coalesced_mmio_ring) {
|
1174 |
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
|
1175 |
while (ring->first != ring->last) {
|
1176 |
struct kvm_coalesced_mmio *ent;
|
1177 |
|
1178 |
ent = &ring->coalesced_mmio[ring->first]; |
1179 |
|
1180 |
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); |
1181 |
smp_wmb(); |
1182 |
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
|
1183 |
} |
1184 |
} |
1185 |
|
1186 |
s->coalesced_flush_in_progress = false;
|
1187 |
} |
1188 |
|
1189 |
static void do_kvm_cpu_synchronize_state(void *_env) |
1190 |
{ |
1191 |
CPUState *env = _env; |
1192 |
|
1193 |
if (!env->kvm_vcpu_dirty) {
|
1194 |
kvm_arch_get_registers(env); |
1195 |
env->kvm_vcpu_dirty = 1;
|
1196 |
} |
1197 |
} |
1198 |
|
1199 |
void kvm_cpu_synchronize_state(CPUState *env)
|
1200 |
{ |
1201 |
if (!env->kvm_vcpu_dirty) {
|
1202 |
run_on_cpu(env, do_kvm_cpu_synchronize_state, env); |
1203 |
} |
1204 |
} |
1205 |
|
1206 |
void kvm_cpu_synchronize_post_reset(CPUState *env)
|
1207 |
{ |
1208 |
kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); |
1209 |
env->kvm_vcpu_dirty = 0;
|
1210 |
} |
1211 |
|
1212 |
void kvm_cpu_synchronize_post_init(CPUState *env)
|
1213 |
{ |
1214 |
kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); |
1215 |
env->kvm_vcpu_dirty = 0;
|
1216 |
} |
1217 |
|
1218 |
int kvm_cpu_exec(CPUState *env)
|
1219 |
{ |
1220 |
struct kvm_run *run = env->kvm_run;
|
1221 |
int ret, run_ret;
|
1222 |
|
1223 |
DPRINTF("kvm_cpu_exec()\n");
|
1224 |
|
1225 |
if (kvm_arch_process_async_events(env)) {
|
1226 |
env->exit_request = 0;
|
1227 |
return EXCP_HLT;
|
1228 |
} |
1229 |
|
1230 |
do {
|
1231 |
if (env->kvm_vcpu_dirty) {
|
1232 |
kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE); |
1233 |
env->kvm_vcpu_dirty = 0;
|
1234 |
} |
1235 |
|
1236 |
kvm_arch_pre_run(env, run); |
1237 |
if (env->exit_request) {
|
1238 |
DPRINTF("interrupt exit requested\n");
|
1239 |
/*
|
1240 |
* KVM requires us to reenter the kernel after IO exits to complete
|
1241 |
* instruction emulation. This self-signal will ensure that we
|
1242 |
* leave ASAP again.
|
1243 |
*/
|
1244 |
qemu_cpu_kick_self(); |
1245 |
} |
1246 |
qemu_mutex_unlock_iothread(); |
1247 |
|
1248 |
run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
|
1249 |
|
1250 |
qemu_mutex_lock_iothread(); |
1251 |
kvm_arch_post_run(env, run); |
1252 |
|
1253 |
kvm_flush_coalesced_mmio_buffer(); |
1254 |
|
1255 |
if (run_ret < 0) { |
1256 |
if (run_ret == -EINTR || run_ret == -EAGAIN) {
|
1257 |
DPRINTF("io window exit\n");
|
1258 |
ret = EXCP_INTERRUPT; |
1259 |
break;
|
1260 |
} |
1261 |
fprintf(stderr, "error: kvm run failed %s\n",
|
1262 |
strerror(-run_ret)); |
1263 |
abort(); |
1264 |
} |
1265 |
|
1266 |
switch (run->exit_reason) {
|
1267 |
case KVM_EXIT_IO:
|
1268 |
DPRINTF("handle_io\n");
|
1269 |
kvm_handle_io(run->io.port, |
1270 |
(uint8_t *)run + run->io.data_offset, |
1271 |
run->io.direction, |
1272 |
run->io.size, |
1273 |
run->io.count); |
1274 |
ret = 0;
|
1275 |
break;
|
1276 |
case KVM_EXIT_MMIO:
|
1277 |
DPRINTF("handle_mmio\n");
|
1278 |
cpu_physical_memory_rw(run->mmio.phys_addr, |
1279 |
run->mmio.data, |
1280 |
run->mmio.len, |
1281 |
run->mmio.is_write); |
1282 |
ret = 0;
|
1283 |
break;
|
1284 |
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
1285 |
DPRINTF("irq_window_open\n");
|
1286 |
ret = EXCP_INTERRUPT; |
1287 |
break;
|
1288 |
case KVM_EXIT_SHUTDOWN:
|
1289 |
DPRINTF("shutdown\n");
|
1290 |
qemu_system_reset_request(); |
1291 |
ret = EXCP_INTERRUPT; |
1292 |
break;
|
1293 |
case KVM_EXIT_UNKNOWN:
|
1294 |
fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", |
1295 |
(uint64_t)run->hw.hardware_exit_reason); |
1296 |
ret = -1;
|
1297 |
break;
|
1298 |
case KVM_EXIT_INTERNAL_ERROR:
|
1299 |
ret = kvm_handle_internal_error(env, run); |
1300 |
break;
|
1301 |
default:
|
1302 |
DPRINTF("kvm_arch_handle_exit\n");
|
1303 |
ret = kvm_arch_handle_exit(env, run); |
1304 |
break;
|
1305 |
} |
1306 |
} while (ret == 0); |
1307 |
|
1308 |
if (ret < 0) { |
1309 |
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
1310 |
vm_stop(RUN_STATE_INTERNAL_ERROR); |
1311 |
} |
1312 |
|
1313 |
env->exit_request = 0;
|
1314 |
return ret;
|
1315 |
} |
1316 |
|
1317 |
int kvm_ioctl(KVMState *s, int type, ...) |
1318 |
{ |
1319 |
int ret;
|
1320 |
void *arg;
|
1321 |
va_list ap; |
1322 |
|
1323 |
va_start(ap, type); |
1324 |
arg = va_arg(ap, void *);
|
1325 |
va_end(ap); |
1326 |
|
1327 |
ret = ioctl(s->fd, type, arg); |
1328 |
if (ret == -1) { |
1329 |
ret = -errno; |
1330 |
} |
1331 |
return ret;
|
1332 |
} |
1333 |
|
1334 |
int kvm_vm_ioctl(KVMState *s, int type, ...) |
1335 |
{ |
1336 |
int ret;
|
1337 |
void *arg;
|
1338 |
va_list ap; |
1339 |
|
1340 |
va_start(ap, type); |
1341 |
arg = va_arg(ap, void *);
|
1342 |
va_end(ap); |
1343 |
|
1344 |
ret = ioctl(s->vmfd, type, arg); |
1345 |
if (ret == -1) { |
1346 |
ret = -errno; |
1347 |
} |
1348 |
return ret;
|
1349 |
} |
1350 |
|
1351 |
int kvm_vcpu_ioctl(CPUState *env, int type, ...) |
1352 |
{ |
1353 |
int ret;
|
1354 |
void *arg;
|
1355 |
va_list ap; |
1356 |
|
1357 |
va_start(ap, type); |
1358 |
arg = va_arg(ap, void *);
|
1359 |
va_end(ap); |
1360 |
|
1361 |
ret = ioctl(env->kvm_fd, type, arg); |
1362 |
if (ret == -1) { |
1363 |
ret = -errno; |
1364 |
} |
1365 |
return ret;
|
1366 |
} |
1367 |
|
1368 |
int kvm_has_sync_mmu(void) |
1369 |
{ |
1370 |
return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
|
1371 |
} |
1372 |
|
1373 |
int kvm_has_vcpu_events(void) |
1374 |
{ |
1375 |
return kvm_state->vcpu_events;
|
1376 |
} |
1377 |
|
1378 |
int kvm_has_robust_singlestep(void) |
1379 |
{ |
1380 |
return kvm_state->robust_singlestep;
|
1381 |
} |
1382 |
|
1383 |
int kvm_has_debugregs(void) |
1384 |
{ |
1385 |
return kvm_state->debugregs;
|
1386 |
} |
1387 |
|
1388 |
int kvm_has_xsave(void) |
1389 |
{ |
1390 |
return kvm_state->xsave;
|
1391 |
} |
1392 |
|
1393 |
int kvm_has_xcrs(void) |
1394 |
{ |
1395 |
return kvm_state->xcrs;
|
1396 |
} |
1397 |
|
1398 |
int kvm_has_pit_state2(void) |
1399 |
{ |
1400 |
return kvm_state->pit_state2;
|
1401 |
} |
1402 |
|
1403 |
int kvm_has_many_ioeventfds(void) |
1404 |
{ |
1405 |
if (!kvm_enabled()) {
|
1406 |
return 0; |
1407 |
} |
1408 |
return kvm_state->many_ioeventfds;
|
1409 |
} |
1410 |
|
1411 |
int kvm_has_gsi_routing(void) |
1412 |
{ |
1413 |
#ifdef KVM_CAP_IRQ_ROUTING
|
1414 |
return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
|
1415 |
#else
|
1416 |
return false; |
1417 |
#endif
|
1418 |
} |
1419 |
|
1420 |
int kvm_allows_irq0_override(void) |
1421 |
{ |
1422 |
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
|
1423 |
} |
1424 |
|
1425 |
void kvm_setup_guest_memory(void *start, size_t size) |
1426 |
{ |
1427 |
if (!kvm_has_sync_mmu()) {
|
1428 |
int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
|
1429 |
|
1430 |
if (ret) {
|
1431 |
perror("qemu_madvise");
|
1432 |
fprintf(stderr, |
1433 |
"Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
|
1434 |
exit(1);
|
1435 |
} |
1436 |
} |
1437 |
} |
1438 |
|
1439 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1440 |
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
|
1441 |
target_ulong pc) |
1442 |
{ |
1443 |
struct kvm_sw_breakpoint *bp;
|
1444 |
|
1445 |
QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { |
1446 |
if (bp->pc == pc) {
|
1447 |
return bp;
|
1448 |
} |
1449 |
} |
1450 |
return NULL; |
1451 |
} |
1452 |
|
1453 |
int kvm_sw_breakpoints_active(CPUState *env)
|
1454 |
{ |
1455 |
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
|
1456 |
} |
1457 |
|
1458 |
struct kvm_set_guest_debug_data {
|
1459 |
struct kvm_guest_debug dbg;
|
1460 |
CPUState *env; |
1461 |
int err;
|
1462 |
}; |
1463 |
|
1464 |
static void kvm_invoke_set_guest_debug(void *data) |
1465 |
{ |
1466 |
struct kvm_set_guest_debug_data *dbg_data = data;
|
1467 |
CPUState *env = dbg_data->env; |
1468 |
|
1469 |
dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); |
1470 |
} |
1471 |
|
1472 |
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
1473 |
{ |
1474 |
struct kvm_set_guest_debug_data data;
|
1475 |
|
1476 |
data.dbg.control = reinject_trap; |
1477 |
|
1478 |
if (env->singlestep_enabled) {
|
1479 |
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; |
1480 |
} |
1481 |
kvm_arch_update_guest_debug(env, &data.dbg); |
1482 |
data.env = env; |
1483 |
|
1484 |
run_on_cpu(env, kvm_invoke_set_guest_debug, &data); |
1485 |
return data.err;
|
1486 |
} |
1487 |
|
1488 |
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
1489 |
target_ulong len, int type)
|
1490 |
{ |
1491 |
struct kvm_sw_breakpoint *bp;
|
1492 |
CPUState *env; |
1493 |
int err;
|
1494 |
|
1495 |
if (type == GDB_BREAKPOINT_SW) {
|
1496 |
bp = kvm_find_sw_breakpoint(current_env, addr); |
1497 |
if (bp) {
|
1498 |
bp->use_count++; |
1499 |
return 0; |
1500 |
} |
1501 |
|
1502 |
bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); |
1503 |
if (!bp) {
|
1504 |
return -ENOMEM;
|
1505 |
} |
1506 |
|
1507 |
bp->pc = addr; |
1508 |
bp->use_count = 1;
|
1509 |
err = kvm_arch_insert_sw_breakpoint(current_env, bp); |
1510 |
if (err) {
|
1511 |
g_free(bp); |
1512 |
return err;
|
1513 |
} |
1514 |
|
1515 |
QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, |
1516 |
bp, entry); |
1517 |
} else {
|
1518 |
err = kvm_arch_insert_hw_breakpoint(addr, len, type); |
1519 |
if (err) {
|
1520 |
return err;
|
1521 |
} |
1522 |
} |
1523 |
|
1524 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1525 |
err = kvm_update_guest_debug(env, 0);
|
1526 |
if (err) {
|
1527 |
return err;
|
1528 |
} |
1529 |
} |
1530 |
return 0; |
1531 |
} |
1532 |
|
1533 |
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
1534 |
target_ulong len, int type)
|
1535 |
{ |
1536 |
struct kvm_sw_breakpoint *bp;
|
1537 |
CPUState *env; |
1538 |
int err;
|
1539 |
|
1540 |
if (type == GDB_BREAKPOINT_SW) {
|
1541 |
bp = kvm_find_sw_breakpoint(current_env, addr); |
1542 |
if (!bp) {
|
1543 |
return -ENOENT;
|
1544 |
} |
1545 |
|
1546 |
if (bp->use_count > 1) { |
1547 |
bp->use_count--; |
1548 |
return 0; |
1549 |
} |
1550 |
|
1551 |
err = kvm_arch_remove_sw_breakpoint(current_env, bp); |
1552 |
if (err) {
|
1553 |
return err;
|
1554 |
} |
1555 |
|
1556 |
QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); |
1557 |
g_free(bp); |
1558 |
} else {
|
1559 |
err = kvm_arch_remove_hw_breakpoint(addr, len, type); |
1560 |
if (err) {
|
1561 |
return err;
|
1562 |
} |
1563 |
} |
1564 |
|
1565 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1566 |
err = kvm_update_guest_debug(env, 0);
|
1567 |
if (err) {
|
1568 |
return err;
|
1569 |
} |
1570 |
} |
1571 |
return 0; |
1572 |
} |
1573 |
|
1574 |
void kvm_remove_all_breakpoints(CPUState *current_env)
|
1575 |
{ |
1576 |
struct kvm_sw_breakpoint *bp, *next;
|
1577 |
KVMState *s = current_env->kvm_state; |
1578 |
CPUState *env; |
1579 |
|
1580 |
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { |
1581 |
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { |
1582 |
/* Try harder to find a CPU that currently sees the breakpoint. */
|
1583 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1584 |
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) { |
1585 |
break;
|
1586 |
} |
1587 |
} |
1588 |
} |
1589 |
} |
1590 |
kvm_arch_remove_all_hw_breakpoints(); |
1591 |
|
1592 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1593 |
kvm_update_guest_debug(env, 0);
|
1594 |
} |
1595 |
} |
1596 |
|
1597 |
#else /* !KVM_CAP_SET_GUEST_DEBUG */ |
1598 |
|
1599 |
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
1600 |
{ |
1601 |
return -EINVAL;
|
1602 |
} |
1603 |
|
1604 |
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
1605 |
target_ulong len, int type)
|
1606 |
{ |
1607 |
return -EINVAL;
|
1608 |
} |
1609 |
|
1610 |
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
1611 |
target_ulong len, int type)
|
1612 |
{ |
1613 |
return -EINVAL;
|
1614 |
} |
1615 |
|
1616 |
void kvm_remove_all_breakpoints(CPUState *current_env)
|
1617 |
{ |
1618 |
} |
1619 |
#endif /* !KVM_CAP_SET_GUEST_DEBUG */ |
1620 |
|
1621 |
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) |
1622 |
{ |
1623 |
struct kvm_signal_mask *sigmask;
|
1624 |
int r;
|
1625 |
|
1626 |
if (!sigset) {
|
1627 |
return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); |
1628 |
} |
1629 |
|
1630 |
sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); |
1631 |
|
1632 |
sigmask->len = 8;
|
1633 |
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
|
1634 |
r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask); |
1635 |
g_free(sigmask); |
1636 |
|
1637 |
return r;
|
1638 |
} |
1639 |
|
1640 |
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign) |
1641 |
{ |
1642 |
int ret;
|
1643 |
struct kvm_ioeventfd iofd;
|
1644 |
|
1645 |
iofd.datamatch = val; |
1646 |
iofd.addr = addr; |
1647 |
iofd.len = 4;
|
1648 |
iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH; |
1649 |
iofd.fd = fd; |
1650 |
|
1651 |
if (!kvm_enabled()) {
|
1652 |
return -ENOSYS;
|
1653 |
} |
1654 |
|
1655 |
if (!assign) {
|
1656 |
iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
1657 |
} |
1658 |
|
1659 |
ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); |
1660 |
|
1661 |
if (ret < 0) { |
1662 |
return -errno;
|
1663 |
} |
1664 |
|
1665 |
return 0; |
1666 |
} |
1667 |
|
1668 |
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) |
1669 |
{ |
1670 |
struct kvm_ioeventfd kick = {
|
1671 |
.datamatch = val, |
1672 |
.addr = addr, |
1673 |
.len = 2,
|
1674 |
.flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, |
1675 |
.fd = fd, |
1676 |
}; |
1677 |
int r;
|
1678 |
if (!kvm_enabled()) {
|
1679 |
return -ENOSYS;
|
1680 |
} |
1681 |
if (!assign) {
|
1682 |
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
1683 |
} |
1684 |
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
1685 |
if (r < 0) { |
1686 |
return r;
|
1687 |
} |
1688 |
return 0; |
1689 |
} |
1690 |
|
1691 |
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) |
1692 |
{ |
1693 |
return kvm_arch_on_sigbus_vcpu(env, code, addr);
|
1694 |
} |
1695 |
|
1696 |
int kvm_on_sigbus(int code, void *addr) |
1697 |
{ |
1698 |
return kvm_arch_on_sigbus(code, addr);
|
1699 |
} |