root / kvm-all.c @ 651eb0f4
History | View | Annotate | Download (52.5 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2008
|
5 |
* Red Hat, Inc. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
* Glauber Costa <gcosta@redhat.com>
|
10 |
*
|
11 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
12 |
* See the COPYING file in the top-level directory.
|
13 |
*
|
14 |
*/
|
15 |
|
16 |
#include <sys/types.h> |
17 |
#include <sys/ioctl.h> |
18 |
#include <sys/mman.h> |
19 |
#include <stdarg.h> |
20 |
|
21 |
#include <linux/kvm.h> |
22 |
|
23 |
#include "qemu-common.h" |
24 |
#include "qemu/atomic.h" |
25 |
#include "qemu/option.h" |
26 |
#include "qemu/config-file.h" |
27 |
#include "sysemu/sysemu.h" |
28 |
#include "hw/hw.h" |
29 |
#include "hw/pci/msi.h" |
30 |
#include "exec/gdbstub.h" |
31 |
#include "sysemu/kvm.h" |
32 |
#include "qemu/bswap.h" |
33 |
#include "exec/memory.h" |
34 |
#include "exec/address-spaces.h" |
35 |
#include "qemu/event_notifier.h" |
36 |
#include "trace.h" |
37 |
|
38 |
/* This check must be after config-host.h is included */
|
39 |
#ifdef CONFIG_EVENTFD
|
40 |
#include <sys/eventfd.h> |
41 |
#endif
|
42 |
|
43 |
#ifdef CONFIG_VALGRIND_H
|
44 |
#include <valgrind/memcheck.h> |
45 |
#endif
|
46 |
|
47 |
/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
|
48 |
#define PAGE_SIZE TARGET_PAGE_SIZE
|
49 |
|
50 |
//#define DEBUG_KVM
|
51 |
|
52 |
#ifdef DEBUG_KVM
|
53 |
#define DPRINTF(fmt, ...) \
|
54 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
55 |
#else
|
56 |
#define DPRINTF(fmt, ...) \
|
57 |
do { } while (0) |
58 |
#endif
|
59 |
|
60 |
#define KVM_MSI_HASHTAB_SIZE 256 |
61 |
|
62 |
typedef struct KVMSlot |
63 |
{ |
64 |
hwaddr start_addr; |
65 |
ram_addr_t memory_size; |
66 |
void *ram;
|
67 |
int slot;
|
68 |
int flags;
|
69 |
} KVMSlot; |
70 |
|
71 |
typedef struct kvm_dirty_log KVMDirtyLog; |
72 |
|
73 |
struct KVMState
|
74 |
{ |
75 |
KVMSlot slots[32];
|
76 |
int fd;
|
77 |
int vmfd;
|
78 |
int coalesced_mmio;
|
79 |
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
80 |
bool coalesced_flush_in_progress;
|
81 |
int broken_set_mem_region;
|
82 |
int migration_log;
|
83 |
int vcpu_events;
|
84 |
int robust_singlestep;
|
85 |
int debugregs;
|
86 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
87 |
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
|
88 |
#endif
|
89 |
int pit_state2;
|
90 |
int xsave, xcrs;
|
91 |
int many_ioeventfds;
|
92 |
int intx_set_mask;
|
93 |
/* The man page (and posix) say ioctl numbers are signed int, but
|
94 |
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
|
95 |
* unsigned, and treating them as signed here can break things */
|
96 |
unsigned irq_set_ioctl;
|
97 |
#ifdef KVM_CAP_IRQ_ROUTING
|
98 |
struct kvm_irq_routing *irq_routes;
|
99 |
int nr_allocated_irq_routes;
|
100 |
uint32_t *used_gsi_bitmap; |
101 |
unsigned int gsi_count; |
102 |
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; |
103 |
bool direct_msi;
|
104 |
#endif
|
105 |
}; |
106 |
|
107 |
KVMState *kvm_state; |
108 |
bool kvm_kernel_irqchip;
|
109 |
bool kvm_async_interrupts_allowed;
|
110 |
bool kvm_irqfds_allowed;
|
111 |
bool kvm_msi_via_irqfd_allowed;
|
112 |
bool kvm_gsi_routing_allowed;
|
113 |
bool kvm_allowed;
|
114 |
bool kvm_readonly_mem_allowed;
|
115 |
|
116 |
static const KVMCapabilityInfo kvm_required_capabilites[] = { |
117 |
KVM_CAP_INFO(USER_MEMORY), |
118 |
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), |
119 |
KVM_CAP_LAST_INFO |
120 |
}; |
121 |
|
122 |
static KVMSlot *kvm_alloc_slot(KVMState *s)
|
123 |
{ |
124 |
int i;
|
125 |
|
126 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
127 |
if (s->slots[i].memory_size == 0) { |
128 |
return &s->slots[i];
|
129 |
} |
130 |
} |
131 |
|
132 |
fprintf(stderr, "%s: no free slot available\n", __func__);
|
133 |
abort(); |
134 |
} |
135 |
|
136 |
static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
|
137 |
hwaddr start_addr, |
138 |
hwaddr end_addr) |
139 |
{ |
140 |
int i;
|
141 |
|
142 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
143 |
KVMSlot *mem = &s->slots[i]; |
144 |
|
145 |
if (start_addr == mem->start_addr &&
|
146 |
end_addr == mem->start_addr + mem->memory_size) { |
147 |
return mem;
|
148 |
} |
149 |
} |
150 |
|
151 |
return NULL; |
152 |
} |
153 |
|
154 |
/*
|
155 |
* Find overlapping slot with lowest start address
|
156 |
*/
|
157 |
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
|
158 |
hwaddr start_addr, |
159 |
hwaddr end_addr) |
160 |
{ |
161 |
KVMSlot *found = NULL;
|
162 |
int i;
|
163 |
|
164 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
165 |
KVMSlot *mem = &s->slots[i]; |
166 |
|
167 |
if (mem->memory_size == 0 || |
168 |
(found && found->start_addr < mem->start_addr)) { |
169 |
continue;
|
170 |
} |
171 |
|
172 |
if (end_addr > mem->start_addr &&
|
173 |
start_addr < mem->start_addr + mem->memory_size) { |
174 |
found = mem; |
175 |
} |
176 |
} |
177 |
|
178 |
return found;
|
179 |
} |
180 |
|
181 |
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, |
182 |
hwaddr *phys_addr) |
183 |
{ |
184 |
int i;
|
185 |
|
186 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
187 |
KVMSlot *mem = &s->slots[i]; |
188 |
|
189 |
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
|
190 |
*phys_addr = mem->start_addr + (ram - mem->ram); |
191 |
return 1; |
192 |
} |
193 |
} |
194 |
|
195 |
return 0; |
196 |
} |
197 |
|
198 |
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
199 |
{ |
200 |
struct kvm_userspace_memory_region mem;
|
201 |
|
202 |
mem.slot = slot->slot; |
203 |
mem.guest_phys_addr = slot->start_addr; |
204 |
mem.userspace_addr = (unsigned long)slot->ram; |
205 |
mem.flags = slot->flags; |
206 |
if (s->migration_log) {
|
207 |
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; |
208 |
} |
209 |
|
210 |
if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
|
211 |
/* Set the slot size to 0 before setting the slot to the desired
|
212 |
* value. This is needed based on KVM commit 75d61fbc. */
|
213 |
mem.memory_size = 0;
|
214 |
kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); |
215 |
} |
216 |
mem.memory_size = slot->memory_size; |
217 |
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
218 |
} |
219 |
|
220 |
static void kvm_reset_vcpu(void *opaque) |
221 |
{ |
222 |
CPUState *cpu = opaque; |
223 |
|
224 |
kvm_arch_reset_vcpu(cpu); |
225 |
} |
226 |
|
227 |
int kvm_init_vcpu(CPUState *cpu)
|
228 |
{ |
229 |
KVMState *s = kvm_state; |
230 |
long mmap_size;
|
231 |
int ret;
|
232 |
|
233 |
DPRINTF("kvm_init_vcpu\n");
|
234 |
|
235 |
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
|
236 |
if (ret < 0) { |
237 |
DPRINTF("kvm_create_vcpu failed\n");
|
238 |
goto err;
|
239 |
} |
240 |
|
241 |
cpu->kvm_fd = ret; |
242 |
cpu->kvm_state = s; |
243 |
cpu->kvm_vcpu_dirty = true;
|
244 |
|
245 |
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
|
246 |
if (mmap_size < 0) { |
247 |
ret = mmap_size; |
248 |
DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
|
249 |
goto err;
|
250 |
} |
251 |
|
252 |
cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
253 |
cpu->kvm_fd, 0);
|
254 |
if (cpu->kvm_run == MAP_FAILED) {
|
255 |
ret = -errno; |
256 |
DPRINTF("mmap'ing vcpu state failed\n");
|
257 |
goto err;
|
258 |
} |
259 |
|
260 |
if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
|
261 |
s->coalesced_mmio_ring = |
262 |
(void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
|
263 |
} |
264 |
|
265 |
ret = kvm_arch_init_vcpu(cpu); |
266 |
if (ret == 0) { |
267 |
qemu_register_reset(kvm_reset_vcpu, cpu); |
268 |
kvm_arch_reset_vcpu(cpu); |
269 |
} |
270 |
err:
|
271 |
return ret;
|
272 |
} |
273 |
|
274 |
/*
|
275 |
* dirty pages logging control
|
276 |
*/
|
277 |
|
278 |
static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly) |
279 |
{ |
280 |
int flags = 0; |
281 |
flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
|
282 |
if (readonly && kvm_readonly_mem_allowed) {
|
283 |
flags |= KVM_MEM_READONLY; |
284 |
} |
285 |
return flags;
|
286 |
} |
287 |
|
288 |
static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty) |
289 |
{ |
290 |
KVMState *s = kvm_state; |
291 |
int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
|
292 |
int old_flags;
|
293 |
|
294 |
old_flags = mem->flags; |
295 |
|
296 |
flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
|
297 |
mem->flags = flags; |
298 |
|
299 |
/* If nothing changed effectively, no need to issue ioctl */
|
300 |
if (s->migration_log) {
|
301 |
flags |= KVM_MEM_LOG_DIRTY_PAGES; |
302 |
} |
303 |
|
304 |
if (flags == old_flags) {
|
305 |
return 0; |
306 |
} |
307 |
|
308 |
return kvm_set_user_memory_region(s, mem);
|
309 |
} |
310 |
|
311 |
static int kvm_dirty_pages_log_change(hwaddr phys_addr, |
312 |
ram_addr_t size, bool log_dirty)
|
313 |
{ |
314 |
KVMState *s = kvm_state; |
315 |
KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
316 |
|
317 |
if (mem == NULL) { |
318 |
fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
319 |
TARGET_FMT_plx "\n", __func__, phys_addr,
|
320 |
(hwaddr)(phys_addr + size - 1));
|
321 |
return -EINVAL;
|
322 |
} |
323 |
return kvm_slot_dirty_pages_log_change(mem, log_dirty);
|
324 |
} |
325 |
|
326 |
static void kvm_log_start(MemoryListener *listener, |
327 |
MemoryRegionSection *section) |
328 |
{ |
329 |
int r;
|
330 |
|
331 |
r = kvm_dirty_pages_log_change(section->offset_within_address_space, |
332 |
section->size, true);
|
333 |
if (r < 0) { |
334 |
abort(); |
335 |
} |
336 |
} |
337 |
|
338 |
static void kvm_log_stop(MemoryListener *listener, |
339 |
MemoryRegionSection *section) |
340 |
{ |
341 |
int r;
|
342 |
|
343 |
r = kvm_dirty_pages_log_change(section->offset_within_address_space, |
344 |
section->size, false);
|
345 |
if (r < 0) { |
346 |
abort(); |
347 |
} |
348 |
} |
349 |
|
350 |
static int kvm_set_migration_log(int enable) |
351 |
{ |
352 |
KVMState *s = kvm_state; |
353 |
KVMSlot *mem; |
354 |
int i, err;
|
355 |
|
356 |
s->migration_log = enable; |
357 |
|
358 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
359 |
mem = &s->slots[i]; |
360 |
|
361 |
if (!mem->memory_size) {
|
362 |
continue;
|
363 |
} |
364 |
if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
|
365 |
continue;
|
366 |
} |
367 |
err = kvm_set_user_memory_region(s, mem); |
368 |
if (err) {
|
369 |
return err;
|
370 |
} |
371 |
} |
372 |
return 0; |
373 |
} |
374 |
|
375 |
/* get kvm's dirty pages bitmap and update qemu's */
|
376 |
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, |
377 |
unsigned long *bitmap) |
378 |
{ |
379 |
unsigned int i, j; |
380 |
unsigned long page_number, c; |
381 |
hwaddr addr, addr1; |
382 |
unsigned int len = ((section->size / getpagesize()) + HOST_LONG_BITS - 1) / HOST_LONG_BITS; |
383 |
unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; |
384 |
|
385 |
/*
|
386 |
* bitmap-traveling is faster than memory-traveling (for addr...)
|
387 |
* especially when most of the memory is not dirty.
|
388 |
*/
|
389 |
for (i = 0; i < len; i++) { |
390 |
if (bitmap[i] != 0) { |
391 |
c = leul_to_cpu(bitmap[i]); |
392 |
do {
|
393 |
j = ffsl(c) - 1;
|
394 |
c &= ~(1ul << j);
|
395 |
page_number = (i * HOST_LONG_BITS + j) * hpratio; |
396 |
addr1 = page_number * TARGET_PAGE_SIZE; |
397 |
addr = section->offset_within_region + addr1; |
398 |
memory_region_set_dirty(section->mr, addr, |
399 |
TARGET_PAGE_SIZE * hpratio); |
400 |
} while (c != 0); |
401 |
} |
402 |
} |
403 |
return 0; |
404 |
} |
405 |
|
406 |
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) |
407 |
|
408 |
/**
|
409 |
* kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
|
410 |
* This function updates qemu's dirty bitmap using
|
411 |
* memory_region_set_dirty(). This means all bits are set
|
412 |
* to dirty.
|
413 |
*
|
414 |
* @start_add: start of logged region.
|
415 |
* @end_addr: end of logged region.
|
416 |
*/
|
417 |
static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) |
418 |
{ |
419 |
KVMState *s = kvm_state; |
420 |
unsigned long size, allocated_size = 0; |
421 |
KVMDirtyLog d; |
422 |
KVMSlot *mem; |
423 |
int ret = 0; |
424 |
hwaddr start_addr = section->offset_within_address_space; |
425 |
hwaddr end_addr = start_addr + section->size; |
426 |
|
427 |
d.dirty_bitmap = NULL;
|
428 |
while (start_addr < end_addr) {
|
429 |
mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); |
430 |
if (mem == NULL) { |
431 |
break;
|
432 |
} |
433 |
|
434 |
/* XXX bad kernel interface alert
|
435 |
* For dirty bitmap, kernel allocates array of size aligned to
|
436 |
* bits-per-long. But for case when the kernel is 64bits and
|
437 |
* the userspace is 32bits, userspace can't align to the same
|
438 |
* bits-per-long, since sizeof(long) is different between kernel
|
439 |
* and user space. This way, userspace will provide buffer which
|
440 |
* may be 4 bytes less than the kernel will use, resulting in
|
441 |
* userspace memory corruption (which is not detectable by valgrind
|
442 |
* too, in most cases).
|
443 |
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
444 |
* a hope that sizeof(long) wont become >8 any time soon.
|
445 |
*/
|
446 |
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), |
447 |
/*HOST_LONG_BITS*/ 64) / 8; |
448 |
if (!d.dirty_bitmap) {
|
449 |
d.dirty_bitmap = g_malloc(size); |
450 |
} else if (size > allocated_size) { |
451 |
d.dirty_bitmap = g_realloc(d.dirty_bitmap, size); |
452 |
} |
453 |
allocated_size = size; |
454 |
memset(d.dirty_bitmap, 0, allocated_size);
|
455 |
|
456 |
d.slot = mem->slot; |
457 |
|
458 |
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { |
459 |
DPRINTF("ioctl failed %d\n", errno);
|
460 |
ret = -1;
|
461 |
break;
|
462 |
} |
463 |
|
464 |
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); |
465 |
start_addr = mem->start_addr + mem->memory_size; |
466 |
} |
467 |
g_free(d.dirty_bitmap); |
468 |
|
469 |
return ret;
|
470 |
} |
471 |
|
472 |
static void kvm_coalesce_mmio_region(MemoryListener *listener, |
473 |
MemoryRegionSection *secion, |
474 |
hwaddr start, hwaddr size) |
475 |
{ |
476 |
KVMState *s = kvm_state; |
477 |
|
478 |
if (s->coalesced_mmio) {
|
479 |
struct kvm_coalesced_mmio_zone zone;
|
480 |
|
481 |
zone.addr = start; |
482 |
zone.size = size; |
483 |
zone.pad = 0;
|
484 |
|
485 |
(void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
|
486 |
} |
487 |
} |
488 |
|
489 |
static void kvm_uncoalesce_mmio_region(MemoryListener *listener, |
490 |
MemoryRegionSection *secion, |
491 |
hwaddr start, hwaddr size) |
492 |
{ |
493 |
KVMState *s = kvm_state; |
494 |
|
495 |
if (s->coalesced_mmio) {
|
496 |
struct kvm_coalesced_mmio_zone zone;
|
497 |
|
498 |
zone.addr = start; |
499 |
zone.size = size; |
500 |
zone.pad = 0;
|
501 |
|
502 |
(void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
|
503 |
} |
504 |
} |
505 |
|
506 |
int kvm_check_extension(KVMState *s, unsigned int extension) |
507 |
{ |
508 |
int ret;
|
509 |
|
510 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); |
511 |
if (ret < 0) { |
512 |
ret = 0;
|
513 |
} |
514 |
|
515 |
return ret;
|
516 |
} |
517 |
|
518 |
static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val, |
519 |
bool assign, uint32_t size, bool datamatch) |
520 |
{ |
521 |
int ret;
|
522 |
struct kvm_ioeventfd iofd;
|
523 |
|
524 |
iofd.datamatch = datamatch ? val : 0;
|
525 |
iofd.addr = addr; |
526 |
iofd.len = size; |
527 |
iofd.flags = 0;
|
528 |
iofd.fd = fd; |
529 |
|
530 |
if (!kvm_enabled()) {
|
531 |
return -ENOSYS;
|
532 |
} |
533 |
|
534 |
if (datamatch) {
|
535 |
iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; |
536 |
} |
537 |
if (!assign) {
|
538 |
iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
539 |
} |
540 |
|
541 |
ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); |
542 |
|
543 |
if (ret < 0) { |
544 |
return -errno;
|
545 |
} |
546 |
|
547 |
return 0; |
548 |
} |
549 |
|
550 |
static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, |
551 |
bool assign, uint32_t size, bool datamatch) |
552 |
{ |
553 |
struct kvm_ioeventfd kick = {
|
554 |
.datamatch = datamatch ? val : 0,
|
555 |
.addr = addr, |
556 |
.flags = KVM_IOEVENTFD_FLAG_PIO, |
557 |
.len = size, |
558 |
.fd = fd, |
559 |
}; |
560 |
int r;
|
561 |
if (!kvm_enabled()) {
|
562 |
return -ENOSYS;
|
563 |
} |
564 |
if (datamatch) {
|
565 |
kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; |
566 |
} |
567 |
if (!assign) {
|
568 |
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
569 |
} |
570 |
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
571 |
if (r < 0) { |
572 |
return r;
|
573 |
} |
574 |
return 0; |
575 |
} |
576 |
|
577 |
|
578 |
static int kvm_check_many_ioeventfds(void) |
579 |
{ |
580 |
/* Userspace can use ioeventfd for io notification. This requires a host
|
581 |
* that supports eventfd(2) and an I/O thread; since eventfd does not
|
582 |
* support SIGIO it cannot interrupt the vcpu.
|
583 |
*
|
584 |
* Older kernels have a 6 device limit on the KVM io bus. Find out so we
|
585 |
* can avoid creating too many ioeventfds.
|
586 |
*/
|
587 |
#if defined(CONFIG_EVENTFD)
|
588 |
int ioeventfds[7]; |
589 |
int i, ret = 0; |
590 |
for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { |
591 |
ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
|
592 |
if (ioeventfds[i] < 0) { |
593 |
break;
|
594 |
} |
595 |
ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true); |
596 |
if (ret < 0) { |
597 |
close(ioeventfds[i]); |
598 |
break;
|
599 |
} |
600 |
} |
601 |
|
602 |
/* Decide whether many devices are supported or not */
|
603 |
ret = i == ARRAY_SIZE(ioeventfds); |
604 |
|
605 |
while (i-- > 0) { |
606 |
kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true); |
607 |
close(ioeventfds[i]); |
608 |
} |
609 |
return ret;
|
610 |
#else
|
611 |
return 0; |
612 |
#endif
|
613 |
} |
614 |
|
615 |
static const KVMCapabilityInfo * |
616 |
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
617 |
{ |
618 |
while (list->name) {
|
619 |
if (!kvm_check_extension(s, list->value)) {
|
620 |
return list;
|
621 |
} |
622 |
list++; |
623 |
} |
624 |
return NULL; |
625 |
} |
626 |
|
627 |
static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) |
628 |
{ |
629 |
KVMState *s = kvm_state; |
630 |
KVMSlot *mem, old; |
631 |
int err;
|
632 |
MemoryRegion *mr = section->mr; |
633 |
bool log_dirty = memory_region_is_logging(mr);
|
634 |
bool writeable = !mr->readonly && !mr->rom_device;
|
635 |
bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
|
636 |
hwaddr start_addr = section->offset_within_address_space; |
637 |
ram_addr_t size = section->size; |
638 |
void *ram = NULL; |
639 |
unsigned delta;
|
640 |
|
641 |
/* kvm works in page size chunks, but the function may be called
|
642 |
with sub-page size and unaligned start address. */
|
643 |
delta = TARGET_PAGE_ALIGN(size) - size; |
644 |
if (delta > size) {
|
645 |
return;
|
646 |
} |
647 |
start_addr += delta; |
648 |
size -= delta; |
649 |
size &= TARGET_PAGE_MASK; |
650 |
if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
|
651 |
return;
|
652 |
} |
653 |
|
654 |
if (!memory_region_is_ram(mr)) {
|
655 |
if (writeable || !kvm_readonly_mem_allowed) {
|
656 |
return;
|
657 |
} else if (!mr->romd_mode) { |
658 |
/* If the memory device is not in romd_mode, then we actually want
|
659 |
* to remove the kvm memory slot so all accesses will trap. */
|
660 |
add = false;
|
661 |
} |
662 |
} |
663 |
|
664 |
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta; |
665 |
|
666 |
while (1) { |
667 |
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); |
668 |
if (!mem) {
|
669 |
break;
|
670 |
} |
671 |
|
672 |
if (add && start_addr >= mem->start_addr &&
|
673 |
(start_addr + size <= mem->start_addr + mem->memory_size) && |
674 |
(ram - start_addr == mem->ram - mem->start_addr)) { |
675 |
/* The new slot fits into the existing one and comes with
|
676 |
* identical parameters - update flags and done. */
|
677 |
kvm_slot_dirty_pages_log_change(mem, log_dirty); |
678 |
return;
|
679 |
} |
680 |
|
681 |
old = *mem; |
682 |
|
683 |
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
684 |
kvm_physical_sync_dirty_bitmap(section); |
685 |
} |
686 |
|
687 |
/* unregister the overlapping slot */
|
688 |
mem->memory_size = 0;
|
689 |
err = kvm_set_user_memory_region(s, mem); |
690 |
if (err) {
|
691 |
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
|
692 |
__func__, strerror(-err)); |
693 |
abort(); |
694 |
} |
695 |
|
696 |
/* Workaround for older KVM versions: we can't join slots, even not by
|
697 |
* unregistering the previous ones and then registering the larger
|
698 |
* slot. We have to maintain the existing fragmentation. Sigh.
|
699 |
*
|
700 |
* This workaround assumes that the new slot starts at the same
|
701 |
* address as the first existing one. If not or if some overlapping
|
702 |
* slot comes around later, we will fail (not seen in practice so far)
|
703 |
* - and actually require a recent KVM version. */
|
704 |
if (s->broken_set_mem_region &&
|
705 |
old.start_addr == start_addr && old.memory_size < size && add) { |
706 |
mem = kvm_alloc_slot(s); |
707 |
mem->memory_size = old.memory_size; |
708 |
mem->start_addr = old.start_addr; |
709 |
mem->ram = old.ram; |
710 |
mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); |
711 |
|
712 |
err = kvm_set_user_memory_region(s, mem); |
713 |
if (err) {
|
714 |
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
|
715 |
strerror(-err)); |
716 |
abort(); |
717 |
} |
718 |
|
719 |
start_addr += old.memory_size; |
720 |
ram += old.memory_size; |
721 |
size -= old.memory_size; |
722 |
continue;
|
723 |
} |
724 |
|
725 |
/* register prefix slot */
|
726 |
if (old.start_addr < start_addr) {
|
727 |
mem = kvm_alloc_slot(s); |
728 |
mem->memory_size = start_addr - old.start_addr; |
729 |
mem->start_addr = old.start_addr; |
730 |
mem->ram = old.ram; |
731 |
mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); |
732 |
|
733 |
err = kvm_set_user_memory_region(s, mem); |
734 |
if (err) {
|
735 |
fprintf(stderr, "%s: error registering prefix slot: %s\n",
|
736 |
__func__, strerror(-err)); |
737 |
#ifdef TARGET_PPC
|
738 |
fprintf(stderr, "%s: This is probably because your kernel's " \
|
739 |
"PAGE_SIZE is too big. Please try to use 4k " \
|
740 |
"PAGE_SIZE!\n", __func__);
|
741 |
#endif
|
742 |
abort(); |
743 |
} |
744 |
} |
745 |
|
746 |
/* register suffix slot */
|
747 |
if (old.start_addr + old.memory_size > start_addr + size) {
|
748 |
ram_addr_t size_delta; |
749 |
|
750 |
mem = kvm_alloc_slot(s); |
751 |
mem->start_addr = start_addr + size; |
752 |
size_delta = mem->start_addr - old.start_addr; |
753 |
mem->memory_size = old.memory_size - size_delta; |
754 |
mem->ram = old.ram + size_delta; |
755 |
mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); |
756 |
|
757 |
err = kvm_set_user_memory_region(s, mem); |
758 |
if (err) {
|
759 |
fprintf(stderr, "%s: error registering suffix slot: %s\n",
|
760 |
__func__, strerror(-err)); |
761 |
abort(); |
762 |
} |
763 |
} |
764 |
} |
765 |
|
766 |
/* in case the KVM bug workaround already "consumed" the new slot */
|
767 |
if (!size) {
|
768 |
return;
|
769 |
} |
770 |
if (!add) {
|
771 |
return;
|
772 |
} |
773 |
mem = kvm_alloc_slot(s); |
774 |
mem->memory_size = size; |
775 |
mem->start_addr = start_addr; |
776 |
mem->ram = ram; |
777 |
mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); |
778 |
|
779 |
err = kvm_set_user_memory_region(s, mem); |
780 |
if (err) {
|
781 |
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
782 |
strerror(-err)); |
783 |
abort(); |
784 |
} |
785 |
} |
786 |
|
787 |
static void kvm_region_add(MemoryListener *listener, |
788 |
MemoryRegionSection *section) |
789 |
{ |
790 |
kvm_set_phys_mem(section, true);
|
791 |
} |
792 |
|
793 |
static void kvm_region_del(MemoryListener *listener, |
794 |
MemoryRegionSection *section) |
795 |
{ |
796 |
kvm_set_phys_mem(section, false);
|
797 |
} |
798 |
|
799 |
static void kvm_log_sync(MemoryListener *listener, |
800 |
MemoryRegionSection *section) |
801 |
{ |
802 |
int r;
|
803 |
|
804 |
r = kvm_physical_sync_dirty_bitmap(section); |
805 |
if (r < 0) { |
806 |
abort(); |
807 |
} |
808 |
} |
809 |
|
810 |
static void kvm_log_global_start(struct MemoryListener *listener) |
811 |
{ |
812 |
int r;
|
813 |
|
814 |
r = kvm_set_migration_log(1);
|
815 |
assert(r >= 0);
|
816 |
} |
817 |
|
818 |
static void kvm_log_global_stop(struct MemoryListener *listener) |
819 |
{ |
820 |
int r;
|
821 |
|
822 |
r = kvm_set_migration_log(0);
|
823 |
assert(r >= 0);
|
824 |
} |
825 |
|
826 |
static void kvm_mem_ioeventfd_add(MemoryListener *listener, |
827 |
MemoryRegionSection *section, |
828 |
bool match_data, uint64_t data,
|
829 |
EventNotifier *e) |
830 |
{ |
831 |
int fd = event_notifier_get_fd(e);
|
832 |
int r;
|
833 |
|
834 |
r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, |
835 |
data, true, section->size, match_data);
|
836 |
if (r < 0) { |
837 |
abort(); |
838 |
} |
839 |
} |
840 |
|
841 |
static void kvm_mem_ioeventfd_del(MemoryListener *listener, |
842 |
MemoryRegionSection *section, |
843 |
bool match_data, uint64_t data,
|
844 |
EventNotifier *e) |
845 |
{ |
846 |
int fd = event_notifier_get_fd(e);
|
847 |
int r;
|
848 |
|
849 |
r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, |
850 |
data, false, section->size, match_data);
|
851 |
if (r < 0) { |
852 |
abort(); |
853 |
} |
854 |
} |
855 |
|
856 |
static void kvm_io_ioeventfd_add(MemoryListener *listener, |
857 |
MemoryRegionSection *section, |
858 |
bool match_data, uint64_t data,
|
859 |
EventNotifier *e) |
860 |
{ |
861 |
int fd = event_notifier_get_fd(e);
|
862 |
int r;
|
863 |
|
864 |
r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, |
865 |
data, true, section->size, match_data);
|
866 |
if (r < 0) { |
867 |
abort(); |
868 |
} |
869 |
} |
870 |
|
871 |
static void kvm_io_ioeventfd_del(MemoryListener *listener, |
872 |
MemoryRegionSection *section, |
873 |
bool match_data, uint64_t data,
|
874 |
EventNotifier *e) |
875 |
|
876 |
{ |
877 |
int fd = event_notifier_get_fd(e);
|
878 |
int r;
|
879 |
|
880 |
r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, |
881 |
data, false, section->size, match_data);
|
882 |
if (r < 0) { |
883 |
abort(); |
884 |
} |
885 |
} |
886 |
|
887 |
static MemoryListener kvm_memory_listener = {
|
888 |
.region_add = kvm_region_add, |
889 |
.region_del = kvm_region_del, |
890 |
.log_start = kvm_log_start, |
891 |
.log_stop = kvm_log_stop, |
892 |
.log_sync = kvm_log_sync, |
893 |
.log_global_start = kvm_log_global_start, |
894 |
.log_global_stop = kvm_log_global_stop, |
895 |
.eventfd_add = kvm_mem_ioeventfd_add, |
896 |
.eventfd_del = kvm_mem_ioeventfd_del, |
897 |
.coalesced_mmio_add = kvm_coalesce_mmio_region, |
898 |
.coalesced_mmio_del = kvm_uncoalesce_mmio_region, |
899 |
.priority = 10,
|
900 |
}; |
901 |
|
902 |
static MemoryListener kvm_io_listener = {
|
903 |
.eventfd_add = kvm_io_ioeventfd_add, |
904 |
.eventfd_del = kvm_io_ioeventfd_del, |
905 |
.priority = 10,
|
906 |
}; |
907 |
|
908 |
static void kvm_handle_interrupt(CPUState *cpu, int mask) |
909 |
{ |
910 |
cpu->interrupt_request |= mask; |
911 |
|
912 |
if (!qemu_cpu_is_self(cpu)) {
|
913 |
qemu_cpu_kick(cpu); |
914 |
} |
915 |
} |
916 |
|
917 |
int kvm_set_irq(KVMState *s, int irq, int level) |
918 |
{ |
919 |
struct kvm_irq_level event;
|
920 |
int ret;
|
921 |
|
922 |
assert(kvm_async_interrupts_enabled()); |
923 |
|
924 |
event.level = level; |
925 |
event.irq = irq; |
926 |
ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); |
927 |
if (ret < 0) { |
928 |
perror("kvm_set_irq");
|
929 |
abort(); |
930 |
} |
931 |
|
932 |
return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; |
933 |
} |
934 |
|
935 |
#ifdef KVM_CAP_IRQ_ROUTING
|
936 |
typedef struct KVMMSIRoute { |
937 |
struct kvm_irq_routing_entry kroute;
|
938 |
QTAILQ_ENTRY(KVMMSIRoute) entry; |
939 |
} KVMMSIRoute; |
940 |
|
941 |
static void set_gsi(KVMState *s, unsigned int gsi) |
942 |
{ |
943 |
s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32); |
944 |
} |
945 |
|
946 |
static void clear_gsi(KVMState *s, unsigned int gsi) |
947 |
{ |
948 |
s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32)); |
949 |
} |
950 |
|
951 |
static void kvm_init_irq_routing(KVMState *s) |
952 |
{ |
953 |
int gsi_count, i;
|
954 |
|
955 |
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING); |
956 |
if (gsi_count > 0) { |
957 |
unsigned int gsi_bits, i; |
958 |
|
959 |
/* Round up so we can search ints using ffs */
|
960 |
gsi_bits = ALIGN(gsi_count, 32);
|
961 |
s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
|
962 |
s->gsi_count = gsi_count; |
963 |
|
964 |
/* Mark any over-allocated bits as already in use */
|
965 |
for (i = gsi_count; i < gsi_bits; i++) {
|
966 |
set_gsi(s, i); |
967 |
} |
968 |
} |
969 |
|
970 |
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
971 |
s->nr_allocated_irq_routes = 0;
|
972 |
|
973 |
if (!s->direct_msi) {
|
974 |
for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { |
975 |
QTAILQ_INIT(&s->msi_hashtab[i]); |
976 |
} |
977 |
} |
978 |
|
979 |
kvm_arch_init_irq_routing(s); |
980 |
} |
981 |
|
982 |
static void kvm_irqchip_commit_routes(KVMState *s) |
983 |
{ |
984 |
int ret;
|
985 |
|
986 |
s->irq_routes->flags = 0;
|
987 |
ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); |
988 |
assert(ret == 0);
|
989 |
} |
990 |
|
991 |
static void kvm_add_routing_entry(KVMState *s, |
992 |
struct kvm_irq_routing_entry *entry)
|
993 |
{ |
994 |
struct kvm_irq_routing_entry *new;
|
995 |
int n, size;
|
996 |
|
997 |
if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
|
998 |
n = s->nr_allocated_irq_routes * 2;
|
999 |
if (n < 64) { |
1000 |
n = 64;
|
1001 |
} |
1002 |
size = sizeof(struct kvm_irq_routing); |
1003 |
size += n * sizeof(*new);
|
1004 |
s->irq_routes = g_realloc(s->irq_routes, size); |
1005 |
s->nr_allocated_irq_routes = n; |
1006 |
} |
1007 |
n = s->irq_routes->nr++; |
1008 |
new = &s->irq_routes->entries[n]; |
1009 |
memset(new, 0, sizeof(*new)); |
1010 |
new->gsi = entry->gsi; |
1011 |
new->type = entry->type; |
1012 |
new->flags = entry->flags; |
1013 |
new->u = entry->u; |
1014 |
|
1015 |
set_gsi(s, entry->gsi); |
1016 |
|
1017 |
kvm_irqchip_commit_routes(s); |
1018 |
} |
1019 |
|
1020 |
static int kvm_update_routing_entry(KVMState *s, |
1021 |
struct kvm_irq_routing_entry *new_entry)
|
1022 |
{ |
1023 |
struct kvm_irq_routing_entry *entry;
|
1024 |
int n;
|
1025 |
|
1026 |
for (n = 0; n < s->irq_routes->nr; n++) { |
1027 |
entry = &s->irq_routes->entries[n]; |
1028 |
if (entry->gsi != new_entry->gsi) {
|
1029 |
continue;
|
1030 |
} |
1031 |
|
1032 |
entry->type = new_entry->type; |
1033 |
entry->flags = new_entry->flags; |
1034 |
entry->u = new_entry->u; |
1035 |
|
1036 |
kvm_irqchip_commit_routes(s); |
1037 |
|
1038 |
return 0; |
1039 |
} |
1040 |
|
1041 |
return -ESRCH;
|
1042 |
} |
1043 |
|
1044 |
void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) |
1045 |
{ |
1046 |
struct kvm_irq_routing_entry e;
|
1047 |
|
1048 |
assert(pin < s->gsi_count); |
1049 |
|
1050 |
e.gsi = irq; |
1051 |
e.type = KVM_IRQ_ROUTING_IRQCHIP; |
1052 |
e.flags = 0;
|
1053 |
e.u.irqchip.irqchip = irqchip; |
1054 |
e.u.irqchip.pin = pin; |
1055 |
kvm_add_routing_entry(s, &e); |
1056 |
} |
1057 |
|
1058 |
void kvm_irqchip_release_virq(KVMState *s, int virq) |
1059 |
{ |
1060 |
struct kvm_irq_routing_entry *e;
|
1061 |
int i;
|
1062 |
|
1063 |
for (i = 0; i < s->irq_routes->nr; i++) { |
1064 |
e = &s->irq_routes->entries[i]; |
1065 |
if (e->gsi == virq) {
|
1066 |
s->irq_routes->nr--; |
1067 |
*e = s->irq_routes->entries[s->irq_routes->nr]; |
1068 |
} |
1069 |
} |
1070 |
clear_gsi(s, virq); |
1071 |
} |
1072 |
|
1073 |
static unsigned int kvm_hash_msi(uint32_t data) |
1074 |
{ |
1075 |
/* This is optimized for IA32 MSI layout. However, no other arch shall
|
1076 |
* repeat the mistake of not providing a direct MSI injection API. */
|
1077 |
return data & 0xff; |
1078 |
} |
1079 |
|
1080 |
static void kvm_flush_dynamic_msi_routes(KVMState *s) |
1081 |
{ |
1082 |
KVMMSIRoute *route, *next; |
1083 |
unsigned int hash; |
1084 |
|
1085 |
for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) { |
1086 |
QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) { |
1087 |
kvm_irqchip_release_virq(s, route->kroute.gsi); |
1088 |
QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry); |
1089 |
g_free(route); |
1090 |
} |
1091 |
} |
1092 |
} |
1093 |
|
1094 |
static int kvm_irqchip_get_virq(KVMState *s) |
1095 |
{ |
1096 |
uint32_t *word = s->used_gsi_bitmap; |
1097 |
int max_words = ALIGN(s->gsi_count, 32) / 32; |
1098 |
int i, bit;
|
1099 |
bool retry = true; |
1100 |
|
1101 |
again:
|
1102 |
/* Return the lowest unused GSI in the bitmap */
|
1103 |
for (i = 0; i < max_words; i++) { |
1104 |
bit = ffs(~word[i]); |
1105 |
if (!bit) {
|
1106 |
continue;
|
1107 |
} |
1108 |
|
1109 |
return bit - 1 + i * 32; |
1110 |
} |
1111 |
if (!s->direct_msi && retry) {
|
1112 |
retry = false;
|
1113 |
kvm_flush_dynamic_msi_routes(s); |
1114 |
goto again;
|
1115 |
} |
1116 |
return -ENOSPC;
|
1117 |
|
1118 |
} |
1119 |
|
1120 |
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
|
1121 |
{ |
1122 |
unsigned int hash = kvm_hash_msi(msg.data); |
1123 |
KVMMSIRoute *route; |
1124 |
|
1125 |
QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { |
1126 |
if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
|
1127 |
route->kroute.u.msi.address_hi == (msg.address >> 32) &&
|
1128 |
route->kroute.u.msi.data == msg.data) { |
1129 |
return route;
|
1130 |
} |
1131 |
} |
1132 |
return NULL; |
1133 |
} |
1134 |
|
1135 |
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
1136 |
{ |
1137 |
struct kvm_msi msi;
|
1138 |
KVMMSIRoute *route; |
1139 |
|
1140 |
if (s->direct_msi) {
|
1141 |
msi.address_lo = (uint32_t)msg.address; |
1142 |
msi.address_hi = msg.address >> 32;
|
1143 |
msi.data = msg.data; |
1144 |
msi.flags = 0;
|
1145 |
memset(msi.pad, 0, sizeof(msi.pad)); |
1146 |
|
1147 |
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
|
1148 |
} |
1149 |
|
1150 |
route = kvm_lookup_msi_route(s, msg); |
1151 |
if (!route) {
|
1152 |
int virq;
|
1153 |
|
1154 |
virq = kvm_irqchip_get_virq(s); |
1155 |
if (virq < 0) { |
1156 |
return virq;
|
1157 |
} |
1158 |
|
1159 |
route = g_malloc(sizeof(KVMMSIRoute));
|
1160 |
route->kroute.gsi = virq; |
1161 |
route->kroute.type = KVM_IRQ_ROUTING_MSI; |
1162 |
route->kroute.flags = 0;
|
1163 |
route->kroute.u.msi.address_lo = (uint32_t)msg.address; |
1164 |
route->kroute.u.msi.address_hi = msg.address >> 32;
|
1165 |
route->kroute.u.msi.data = msg.data; |
1166 |
|
1167 |
kvm_add_routing_entry(s, &route->kroute); |
1168 |
|
1169 |
QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, |
1170 |
entry); |
1171 |
} |
1172 |
|
1173 |
assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); |
1174 |
|
1175 |
return kvm_set_irq(s, route->kroute.gsi, 1); |
1176 |
} |
1177 |
|
1178 |
int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
|
1179 |
{ |
1180 |
struct kvm_irq_routing_entry kroute;
|
1181 |
int virq;
|
1182 |
|
1183 |
if (!kvm_gsi_routing_enabled()) {
|
1184 |
return -ENOSYS;
|
1185 |
} |
1186 |
|
1187 |
virq = kvm_irqchip_get_virq(s); |
1188 |
if (virq < 0) { |
1189 |
return virq;
|
1190 |
} |
1191 |
|
1192 |
kroute.gsi = virq; |
1193 |
kroute.type = KVM_IRQ_ROUTING_MSI; |
1194 |
kroute.flags = 0;
|
1195 |
kroute.u.msi.address_lo = (uint32_t)msg.address; |
1196 |
kroute.u.msi.address_hi = msg.address >> 32;
|
1197 |
kroute.u.msi.data = msg.data; |
1198 |
|
1199 |
kvm_add_routing_entry(s, &kroute); |
1200 |
|
1201 |
return virq;
|
1202 |
} |
1203 |
|
1204 |
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) |
1205 |
{ |
1206 |
struct kvm_irq_routing_entry kroute;
|
1207 |
|
1208 |
if (!kvm_irqchip_in_kernel()) {
|
1209 |
return -ENOSYS;
|
1210 |
} |
1211 |
|
1212 |
kroute.gsi = virq; |
1213 |
kroute.type = KVM_IRQ_ROUTING_MSI; |
1214 |
kroute.flags = 0;
|
1215 |
kroute.u.msi.address_lo = (uint32_t)msg.address; |
1216 |
kroute.u.msi.address_hi = msg.address >> 32;
|
1217 |
kroute.u.msi.data = msg.data; |
1218 |
|
1219 |
return kvm_update_routing_entry(s, &kroute);
|
1220 |
} |
1221 |
|
1222 |
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) |
1223 |
{ |
1224 |
struct kvm_irqfd irqfd = {
|
1225 |
.fd = fd, |
1226 |
.gsi = virq, |
1227 |
.flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
|
1228 |
}; |
1229 |
|
1230 |
if (!kvm_irqfds_enabled()) {
|
1231 |
return -ENOSYS;
|
1232 |
} |
1233 |
|
1234 |
return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
|
1235 |
} |
1236 |
|
1237 |
#else /* !KVM_CAP_IRQ_ROUTING */ |
1238 |
|
1239 |
static void kvm_init_irq_routing(KVMState *s) |
1240 |
{ |
1241 |
} |
1242 |
|
1243 |
void kvm_irqchip_release_virq(KVMState *s, int virq) |
1244 |
{ |
1245 |
} |
1246 |
|
1247 |
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
1248 |
{ |
1249 |
abort(); |
1250 |
} |
1251 |
|
1252 |
int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
|
1253 |
{ |
1254 |
return -ENOSYS;
|
1255 |
} |
1256 |
|
1257 |
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) |
1258 |
{ |
1259 |
abort(); |
1260 |
} |
1261 |
|
1262 |
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) |
1263 |
{ |
1264 |
return -ENOSYS;
|
1265 |
} |
1266 |
#endif /* !KVM_CAP_IRQ_ROUTING */ |
1267 |
|
1268 |
int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq) |
1269 |
{ |
1270 |
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true); |
1271 |
} |
1272 |
|
1273 |
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq) |
1274 |
{ |
1275 |
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false); |
1276 |
} |
1277 |
|
1278 |
static int kvm_irqchip_create(KVMState *s) |
1279 |
{ |
1280 |
QemuOptsList *list = qemu_find_opts("machine");
|
1281 |
int ret;
|
1282 |
|
1283 |
if (QTAILQ_EMPTY(&list->head) ||
|
1284 |
!qemu_opt_get_bool(QTAILQ_FIRST(&list->head), |
1285 |
"kernel_irqchip", true) || |
1286 |
!kvm_check_extension(s, KVM_CAP_IRQCHIP)) { |
1287 |
return 0; |
1288 |
} |
1289 |
|
1290 |
ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); |
1291 |
if (ret < 0) { |
1292 |
fprintf(stderr, "Create kernel irqchip failed\n");
|
1293 |
return ret;
|
1294 |
} |
1295 |
|
1296 |
kvm_kernel_irqchip = true;
|
1297 |
/* If we have an in-kernel IRQ chip then we must have asynchronous
|
1298 |
* interrupt delivery (though the reverse is not necessarily true)
|
1299 |
*/
|
1300 |
kvm_async_interrupts_allowed = true;
|
1301 |
|
1302 |
kvm_init_irq_routing(s); |
1303 |
|
1304 |
return 0; |
1305 |
} |
1306 |
|
1307 |
static int kvm_max_vcpus(KVMState *s) |
1308 |
{ |
1309 |
int ret;
|
1310 |
|
1311 |
/* Find number of supported CPUs using the recommended
|
1312 |
* procedure from the kernel API documentation to cope with
|
1313 |
* older kernels that may be missing capabilities.
|
1314 |
*/
|
1315 |
ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); |
1316 |
if (ret) {
|
1317 |
return ret;
|
1318 |
} |
1319 |
ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS); |
1320 |
if (ret) {
|
1321 |
return ret;
|
1322 |
} |
1323 |
|
1324 |
return 4; |
1325 |
} |
1326 |
|
1327 |
int kvm_init(void) |
1328 |
{ |
1329 |
static const char upgrade_note[] = |
1330 |
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
|
1331 |
"(see http://sourceforge.net/projects/kvm).\n";
|
1332 |
KVMState *s; |
1333 |
const KVMCapabilityInfo *missing_cap;
|
1334 |
int ret;
|
1335 |
int i;
|
1336 |
int max_vcpus;
|
1337 |
|
1338 |
s = g_malloc0(sizeof(KVMState));
|
1339 |
|
1340 |
/*
|
1341 |
* On systems where the kernel can support different base page
|
1342 |
* sizes, host page size may be different from TARGET_PAGE_SIZE,
|
1343 |
* even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
|
1344 |
* page size for the system though.
|
1345 |
*/
|
1346 |
assert(TARGET_PAGE_SIZE <= getpagesize()); |
1347 |
|
1348 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1349 |
QTAILQ_INIT(&s->kvm_sw_breakpoints); |
1350 |
#endif
|
1351 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
1352 |
s->slots[i].slot = i; |
1353 |
} |
1354 |
s->vmfd = -1;
|
1355 |
s->fd = qemu_open("/dev/kvm", O_RDWR);
|
1356 |
if (s->fd == -1) { |
1357 |
fprintf(stderr, "Could not access KVM kernel module: %m\n");
|
1358 |
ret = -errno; |
1359 |
goto err;
|
1360 |
} |
1361 |
|
1362 |
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
|
1363 |
if (ret < KVM_API_VERSION) {
|
1364 |
if (ret > 0) { |
1365 |
ret = -EINVAL; |
1366 |
} |
1367 |
fprintf(stderr, "kvm version too old\n");
|
1368 |
goto err;
|
1369 |
} |
1370 |
|
1371 |
if (ret > KVM_API_VERSION) {
|
1372 |
ret = -EINVAL; |
1373 |
fprintf(stderr, "kvm version not supported\n");
|
1374 |
goto err;
|
1375 |
} |
1376 |
|
1377 |
max_vcpus = kvm_max_vcpus(s); |
1378 |
if (smp_cpus > max_vcpus) {
|
1379 |
ret = -EINVAL; |
1380 |
fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
|
1381 |
"supported by KVM (%d)\n", smp_cpus, max_vcpus);
|
1382 |
goto err;
|
1383 |
} |
1384 |
|
1385 |
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
|
1386 |
if (s->vmfd < 0) { |
1387 |
#ifdef TARGET_S390X
|
1388 |
fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
|
1389 |
"your host kernel command line\n");
|
1390 |
#endif
|
1391 |
ret = s->vmfd; |
1392 |
goto err;
|
1393 |
} |
1394 |
|
1395 |
missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); |
1396 |
if (!missing_cap) {
|
1397 |
missing_cap = |
1398 |
kvm_check_extension_list(s, kvm_arch_required_capabilities); |
1399 |
} |
1400 |
if (missing_cap) {
|
1401 |
ret = -EINVAL; |
1402 |
fprintf(stderr, "kvm does not support %s\n%s",
|
1403 |
missing_cap->name, upgrade_note); |
1404 |
goto err;
|
1405 |
} |
1406 |
|
1407 |
s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
1408 |
|
1409 |
s->broken_set_mem_region = 1;
|
1410 |
ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); |
1411 |
if (ret > 0) { |
1412 |
s->broken_set_mem_region = 0;
|
1413 |
} |
1414 |
|
1415 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1416 |
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); |
1417 |
#endif
|
1418 |
|
1419 |
s->robust_singlestep = |
1420 |
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); |
1421 |
|
1422 |
#ifdef KVM_CAP_DEBUGREGS
|
1423 |
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); |
1424 |
#endif
|
1425 |
|
1426 |
#ifdef KVM_CAP_XSAVE
|
1427 |
s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); |
1428 |
#endif
|
1429 |
|
1430 |
#ifdef KVM_CAP_XCRS
|
1431 |
s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); |
1432 |
#endif
|
1433 |
|
1434 |
#ifdef KVM_CAP_PIT_STATE2
|
1435 |
s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); |
1436 |
#endif
|
1437 |
|
1438 |
#ifdef KVM_CAP_IRQ_ROUTING
|
1439 |
s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
|
1440 |
#endif
|
1441 |
|
1442 |
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); |
1443 |
|
1444 |
s->irq_set_ioctl = KVM_IRQ_LINE; |
1445 |
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
|
1446 |
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; |
1447 |
} |
1448 |
|
1449 |
#ifdef KVM_CAP_READONLY_MEM
|
1450 |
kvm_readonly_mem_allowed = |
1451 |
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
|
1452 |
#endif
|
1453 |
|
1454 |
ret = kvm_arch_init(s); |
1455 |
if (ret < 0) { |
1456 |
goto err;
|
1457 |
} |
1458 |
|
1459 |
ret = kvm_irqchip_create(s); |
1460 |
if (ret < 0) { |
1461 |
goto err;
|
1462 |
} |
1463 |
|
1464 |
kvm_state = s; |
1465 |
memory_listener_register(&kvm_memory_listener, &address_space_memory); |
1466 |
memory_listener_register(&kvm_io_listener, &address_space_io); |
1467 |
|
1468 |
s->many_ioeventfds = kvm_check_many_ioeventfds(); |
1469 |
|
1470 |
cpu_interrupt_handler = kvm_handle_interrupt; |
1471 |
|
1472 |
return 0; |
1473 |
|
1474 |
err:
|
1475 |
if (s->vmfd >= 0) { |
1476 |
close(s->vmfd); |
1477 |
} |
1478 |
if (s->fd != -1) { |
1479 |
close(s->fd); |
1480 |
} |
1481 |
g_free(s); |
1482 |
|
1483 |
return ret;
|
1484 |
} |
1485 |
|
1486 |
static void kvm_handle_io(uint16_t port, void *data, int direction, int size, |
1487 |
uint32_t count) |
1488 |
{ |
1489 |
int i;
|
1490 |
uint8_t *ptr = data; |
1491 |
|
1492 |
for (i = 0; i < count; i++) { |
1493 |
if (direction == KVM_EXIT_IO_IN) {
|
1494 |
switch (size) {
|
1495 |
case 1: |
1496 |
stb_p(ptr, cpu_inb(port)); |
1497 |
break;
|
1498 |
case 2: |
1499 |
stw_p(ptr, cpu_inw(port)); |
1500 |
break;
|
1501 |
case 4: |
1502 |
stl_p(ptr, cpu_inl(port)); |
1503 |
break;
|
1504 |
} |
1505 |
} else {
|
1506 |
switch (size) {
|
1507 |
case 1: |
1508 |
cpu_outb(port, ldub_p(ptr)); |
1509 |
break;
|
1510 |
case 2: |
1511 |
cpu_outw(port, lduw_p(ptr)); |
1512 |
break;
|
1513 |
case 4: |
1514 |
cpu_outl(port, ldl_p(ptr)); |
1515 |
break;
|
1516 |
} |
1517 |
} |
1518 |
|
1519 |
ptr += size; |
1520 |
} |
1521 |
} |
1522 |
|
1523 |
static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) |
1524 |
{ |
1525 |
CPUState *cpu = ENV_GET_CPU(env); |
1526 |
|
1527 |
fprintf(stderr, "KVM internal error.");
|
1528 |
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
1529 |
int i;
|
1530 |
|
1531 |
fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
|
1532 |
for (i = 0; i < run->internal.ndata; ++i) { |
1533 |
fprintf(stderr, "extra data[%d]: %"PRIx64"\n", |
1534 |
i, (uint64_t)run->internal.data[i]); |
1535 |
} |
1536 |
} else {
|
1537 |
fprintf(stderr, "\n");
|
1538 |
} |
1539 |
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
1540 |
fprintf(stderr, "emulation failure\n");
|
1541 |
if (!kvm_arch_stop_on_emulation_error(cpu)) {
|
1542 |
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
1543 |
return EXCP_INTERRUPT;
|
1544 |
} |
1545 |
} |
1546 |
/* FIXME: Should trigger a qmp message to let management know
|
1547 |
* something went wrong.
|
1548 |
*/
|
1549 |
return -1; |
1550 |
} |
1551 |
|
1552 |
void kvm_flush_coalesced_mmio_buffer(void) |
1553 |
{ |
1554 |
KVMState *s = kvm_state; |
1555 |
|
1556 |
if (s->coalesced_flush_in_progress) {
|
1557 |
return;
|
1558 |
} |
1559 |
|
1560 |
s->coalesced_flush_in_progress = true;
|
1561 |
|
1562 |
if (s->coalesced_mmio_ring) {
|
1563 |
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
|
1564 |
while (ring->first != ring->last) {
|
1565 |
struct kvm_coalesced_mmio *ent;
|
1566 |
|
1567 |
ent = &ring->coalesced_mmio[ring->first]; |
1568 |
|
1569 |
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); |
1570 |
smp_wmb(); |
1571 |
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
|
1572 |
} |
1573 |
} |
1574 |
|
1575 |
s->coalesced_flush_in_progress = false;
|
1576 |
} |
1577 |
|
1578 |
static void do_kvm_cpu_synchronize_state(void *arg) |
1579 |
{ |
1580 |
CPUState *cpu = arg; |
1581 |
|
1582 |
if (!cpu->kvm_vcpu_dirty) {
|
1583 |
kvm_arch_get_registers(cpu); |
1584 |
cpu->kvm_vcpu_dirty = true;
|
1585 |
} |
1586 |
} |
1587 |
|
1588 |
void kvm_cpu_synchronize_state(CPUArchState *env)
|
1589 |
{ |
1590 |
CPUState *cpu = ENV_GET_CPU(env); |
1591 |
|
1592 |
if (!cpu->kvm_vcpu_dirty) {
|
1593 |
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu); |
1594 |
} |
1595 |
} |
1596 |
|
1597 |
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
|
1598 |
{ |
1599 |
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); |
1600 |
cpu->kvm_vcpu_dirty = false;
|
1601 |
} |
1602 |
|
1603 |
void kvm_cpu_synchronize_post_init(CPUState *cpu)
|
1604 |
{ |
1605 |
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); |
1606 |
cpu->kvm_vcpu_dirty = false;
|
1607 |
} |
1608 |
|
1609 |
int kvm_cpu_exec(CPUArchState *env)
|
1610 |
{ |
1611 |
CPUState *cpu = ENV_GET_CPU(env); |
1612 |
struct kvm_run *run = cpu->kvm_run;
|
1613 |
int ret, run_ret;
|
1614 |
|
1615 |
DPRINTF("kvm_cpu_exec()\n");
|
1616 |
|
1617 |
if (kvm_arch_process_async_events(cpu)) {
|
1618 |
cpu->exit_request = 0;
|
1619 |
return EXCP_HLT;
|
1620 |
} |
1621 |
|
1622 |
do {
|
1623 |
if (cpu->kvm_vcpu_dirty) {
|
1624 |
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); |
1625 |
cpu->kvm_vcpu_dirty = false;
|
1626 |
} |
1627 |
|
1628 |
kvm_arch_pre_run(cpu, run); |
1629 |
if (cpu->exit_request) {
|
1630 |
DPRINTF("interrupt exit requested\n");
|
1631 |
/*
|
1632 |
* KVM requires us to reenter the kernel after IO exits to complete
|
1633 |
* instruction emulation. This self-signal will ensure that we
|
1634 |
* leave ASAP again.
|
1635 |
*/
|
1636 |
qemu_cpu_kick_self(); |
1637 |
} |
1638 |
qemu_mutex_unlock_iothread(); |
1639 |
|
1640 |
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
|
1641 |
|
1642 |
qemu_mutex_lock_iothread(); |
1643 |
kvm_arch_post_run(cpu, run); |
1644 |
|
1645 |
if (run_ret < 0) { |
1646 |
if (run_ret == -EINTR || run_ret == -EAGAIN) {
|
1647 |
DPRINTF("io window exit\n");
|
1648 |
ret = EXCP_INTERRUPT; |
1649 |
break;
|
1650 |
} |
1651 |
fprintf(stderr, "error: kvm run failed %s\n",
|
1652 |
strerror(-run_ret)); |
1653 |
abort(); |
1654 |
} |
1655 |
|
1656 |
trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); |
1657 |
switch (run->exit_reason) {
|
1658 |
case KVM_EXIT_IO:
|
1659 |
DPRINTF("handle_io\n");
|
1660 |
kvm_handle_io(run->io.port, |
1661 |
(uint8_t *)run + run->io.data_offset, |
1662 |
run->io.direction, |
1663 |
run->io.size, |
1664 |
run->io.count); |
1665 |
ret = 0;
|
1666 |
break;
|
1667 |
case KVM_EXIT_MMIO:
|
1668 |
DPRINTF("handle_mmio\n");
|
1669 |
cpu_physical_memory_rw(run->mmio.phys_addr, |
1670 |
run->mmio.data, |
1671 |
run->mmio.len, |
1672 |
run->mmio.is_write); |
1673 |
ret = 0;
|
1674 |
break;
|
1675 |
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
1676 |
DPRINTF("irq_window_open\n");
|
1677 |
ret = EXCP_INTERRUPT; |
1678 |
break;
|
1679 |
case KVM_EXIT_SHUTDOWN:
|
1680 |
DPRINTF("shutdown\n");
|
1681 |
qemu_system_reset_request(); |
1682 |
ret = EXCP_INTERRUPT; |
1683 |
break;
|
1684 |
case KVM_EXIT_UNKNOWN:
|
1685 |
fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", |
1686 |
(uint64_t)run->hw.hardware_exit_reason); |
1687 |
ret = -1;
|
1688 |
break;
|
1689 |
case KVM_EXIT_INTERNAL_ERROR:
|
1690 |
ret = kvm_handle_internal_error(env, run); |
1691 |
break;
|
1692 |
default:
|
1693 |
DPRINTF("kvm_arch_handle_exit\n");
|
1694 |
ret = kvm_arch_handle_exit(cpu, run); |
1695 |
break;
|
1696 |
} |
1697 |
} while (ret == 0); |
1698 |
|
1699 |
if (ret < 0) { |
1700 |
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
1701 |
vm_stop(RUN_STATE_INTERNAL_ERROR); |
1702 |
} |
1703 |
|
1704 |
cpu->exit_request = 0;
|
1705 |
return ret;
|
1706 |
} |
1707 |
|
1708 |
int kvm_ioctl(KVMState *s, int type, ...) |
1709 |
{ |
1710 |
int ret;
|
1711 |
void *arg;
|
1712 |
va_list ap; |
1713 |
|
1714 |
va_start(ap, type); |
1715 |
arg = va_arg(ap, void *);
|
1716 |
va_end(ap); |
1717 |
|
1718 |
trace_kvm_ioctl(type, arg); |
1719 |
ret = ioctl(s->fd, type, arg); |
1720 |
if (ret == -1) { |
1721 |
ret = -errno; |
1722 |
} |
1723 |
return ret;
|
1724 |
} |
1725 |
|
1726 |
int kvm_vm_ioctl(KVMState *s, int type, ...) |
1727 |
{ |
1728 |
int ret;
|
1729 |
void *arg;
|
1730 |
va_list ap; |
1731 |
|
1732 |
va_start(ap, type); |
1733 |
arg = va_arg(ap, void *);
|
1734 |
va_end(ap); |
1735 |
|
1736 |
trace_kvm_vm_ioctl(type, arg); |
1737 |
ret = ioctl(s->vmfd, type, arg); |
1738 |
if (ret == -1) { |
1739 |
ret = -errno; |
1740 |
} |
1741 |
return ret;
|
1742 |
} |
1743 |
|
1744 |
int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) |
1745 |
{ |
1746 |
int ret;
|
1747 |
void *arg;
|
1748 |
va_list ap; |
1749 |
|
1750 |
va_start(ap, type); |
1751 |
arg = va_arg(ap, void *);
|
1752 |
va_end(ap); |
1753 |
|
1754 |
trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); |
1755 |
ret = ioctl(cpu->kvm_fd, type, arg); |
1756 |
if (ret == -1) { |
1757 |
ret = -errno; |
1758 |
} |
1759 |
return ret;
|
1760 |
} |
1761 |
|
1762 |
int kvm_has_sync_mmu(void) |
1763 |
{ |
1764 |
return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
|
1765 |
} |
1766 |
|
1767 |
int kvm_has_vcpu_events(void) |
1768 |
{ |
1769 |
return kvm_state->vcpu_events;
|
1770 |
} |
1771 |
|
1772 |
int kvm_has_robust_singlestep(void) |
1773 |
{ |
1774 |
return kvm_state->robust_singlestep;
|
1775 |
} |
1776 |
|
1777 |
int kvm_has_debugregs(void) |
1778 |
{ |
1779 |
return kvm_state->debugregs;
|
1780 |
} |
1781 |
|
1782 |
int kvm_has_xsave(void) |
1783 |
{ |
1784 |
return kvm_state->xsave;
|
1785 |
} |
1786 |
|
1787 |
int kvm_has_xcrs(void) |
1788 |
{ |
1789 |
return kvm_state->xcrs;
|
1790 |
} |
1791 |
|
1792 |
int kvm_has_pit_state2(void) |
1793 |
{ |
1794 |
return kvm_state->pit_state2;
|
1795 |
} |
1796 |
|
1797 |
int kvm_has_many_ioeventfds(void) |
1798 |
{ |
1799 |
if (!kvm_enabled()) {
|
1800 |
return 0; |
1801 |
} |
1802 |
return kvm_state->many_ioeventfds;
|
1803 |
} |
1804 |
|
1805 |
int kvm_has_gsi_routing(void) |
1806 |
{ |
1807 |
#ifdef KVM_CAP_IRQ_ROUTING
|
1808 |
return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
|
1809 |
#else
|
1810 |
return false; |
1811 |
#endif
|
1812 |
} |
1813 |
|
1814 |
int kvm_has_intx_set_mask(void) |
1815 |
{ |
1816 |
return kvm_state->intx_set_mask;
|
1817 |
} |
1818 |
|
1819 |
void *kvm_ram_alloc(ram_addr_t size)
|
1820 |
{ |
1821 |
#ifdef TARGET_S390X
|
1822 |
void *mem;
|
1823 |
|
1824 |
mem = kvm_arch_ram_alloc(size); |
1825 |
if (mem) {
|
1826 |
return mem;
|
1827 |
} |
1828 |
#endif
|
1829 |
return qemu_anon_ram_alloc(size);
|
1830 |
} |
1831 |
|
1832 |
void kvm_setup_guest_memory(void *start, size_t size) |
1833 |
{ |
1834 |
#ifdef CONFIG_VALGRIND_H
|
1835 |
VALGRIND_MAKE_MEM_DEFINED(start, size); |
1836 |
#endif
|
1837 |
if (!kvm_has_sync_mmu()) {
|
1838 |
int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
|
1839 |
|
1840 |
if (ret) {
|
1841 |
perror("qemu_madvise");
|
1842 |
fprintf(stderr, |
1843 |
"Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
|
1844 |
exit(1);
|
1845 |
} |
1846 |
} |
1847 |
} |
1848 |
|
1849 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1850 |
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
|
1851 |
target_ulong pc) |
1852 |
{ |
1853 |
struct kvm_sw_breakpoint *bp;
|
1854 |
|
1855 |
QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { |
1856 |
if (bp->pc == pc) {
|
1857 |
return bp;
|
1858 |
} |
1859 |
} |
1860 |
return NULL; |
1861 |
} |
1862 |
|
1863 |
int kvm_sw_breakpoints_active(CPUState *cpu)
|
1864 |
{ |
1865 |
return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
|
1866 |
} |
1867 |
|
1868 |
struct kvm_set_guest_debug_data {
|
1869 |
struct kvm_guest_debug dbg;
|
1870 |
CPUState *cpu; |
1871 |
int err;
|
1872 |
}; |
1873 |
|
1874 |
static void kvm_invoke_set_guest_debug(void *data) |
1875 |
{ |
1876 |
struct kvm_set_guest_debug_data *dbg_data = data;
|
1877 |
|
1878 |
dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG, |
1879 |
&dbg_data->dbg); |
1880 |
} |
1881 |
|
1882 |
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) |
1883 |
{ |
1884 |
CPUState *cpu = ENV_GET_CPU(env); |
1885 |
struct kvm_set_guest_debug_data data;
|
1886 |
|
1887 |
data.dbg.control = reinject_trap; |
1888 |
|
1889 |
if (env->singlestep_enabled) {
|
1890 |
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; |
1891 |
} |
1892 |
kvm_arch_update_guest_debug(cpu, &data.dbg); |
1893 |
data.cpu = cpu; |
1894 |
|
1895 |
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data); |
1896 |
return data.err;
|
1897 |
} |
1898 |
|
1899 |
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
1900 |
target_ulong len, int type)
|
1901 |
{ |
1902 |
CPUState *current_cpu = ENV_GET_CPU(current_env); |
1903 |
struct kvm_sw_breakpoint *bp;
|
1904 |
CPUArchState *env; |
1905 |
int err;
|
1906 |
|
1907 |
if (type == GDB_BREAKPOINT_SW) {
|
1908 |
bp = kvm_find_sw_breakpoint(current_cpu, addr); |
1909 |
if (bp) {
|
1910 |
bp->use_count++; |
1911 |
return 0; |
1912 |
} |
1913 |
|
1914 |
bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); |
1915 |
if (!bp) {
|
1916 |
return -ENOMEM;
|
1917 |
} |
1918 |
|
1919 |
bp->pc = addr; |
1920 |
bp->use_count = 1;
|
1921 |
err = kvm_arch_insert_sw_breakpoint(current_cpu, bp); |
1922 |
if (err) {
|
1923 |
g_free(bp); |
1924 |
return err;
|
1925 |
} |
1926 |
|
1927 |
QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints, |
1928 |
bp, entry); |
1929 |
} else {
|
1930 |
err = kvm_arch_insert_hw_breakpoint(addr, len, type); |
1931 |
if (err) {
|
1932 |
return err;
|
1933 |
} |
1934 |
} |
1935 |
|
1936 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1937 |
err = kvm_update_guest_debug(env, 0);
|
1938 |
if (err) {
|
1939 |
return err;
|
1940 |
} |
1941 |
} |
1942 |
return 0; |
1943 |
} |
1944 |
|
1945 |
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
1946 |
target_ulong len, int type)
|
1947 |
{ |
1948 |
CPUState *current_cpu = ENV_GET_CPU(current_env); |
1949 |
struct kvm_sw_breakpoint *bp;
|
1950 |
CPUArchState *env; |
1951 |
int err;
|
1952 |
|
1953 |
if (type == GDB_BREAKPOINT_SW) {
|
1954 |
bp = kvm_find_sw_breakpoint(current_cpu, addr); |
1955 |
if (!bp) {
|
1956 |
return -ENOENT;
|
1957 |
} |
1958 |
|
1959 |
if (bp->use_count > 1) { |
1960 |
bp->use_count--; |
1961 |
return 0; |
1962 |
} |
1963 |
|
1964 |
err = kvm_arch_remove_sw_breakpoint(current_cpu, bp); |
1965 |
if (err) {
|
1966 |
return err;
|
1967 |
} |
1968 |
|
1969 |
QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry); |
1970 |
g_free(bp); |
1971 |
} else {
|
1972 |
err = kvm_arch_remove_hw_breakpoint(addr, len, type); |
1973 |
if (err) {
|
1974 |
return err;
|
1975 |
} |
1976 |
} |
1977 |
|
1978 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1979 |
err = kvm_update_guest_debug(env, 0);
|
1980 |
if (err) {
|
1981 |
return err;
|
1982 |
} |
1983 |
} |
1984 |
return 0; |
1985 |
} |
1986 |
|
1987 |
void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
1988 |
{ |
1989 |
CPUState *current_cpu = ENV_GET_CPU(current_env); |
1990 |
struct kvm_sw_breakpoint *bp, *next;
|
1991 |
KVMState *s = current_cpu->kvm_state; |
1992 |
CPUArchState *env; |
1993 |
CPUState *cpu; |
1994 |
|
1995 |
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { |
1996 |
if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) { |
1997 |
/* Try harder to find a CPU that currently sees the breakpoint. */
|
1998 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1999 |
cpu = ENV_GET_CPU(env); |
2000 |
if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) { |
2001 |
break;
|
2002 |
} |
2003 |
} |
2004 |
} |
2005 |
QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); |
2006 |
g_free(bp); |
2007 |
} |
2008 |
kvm_arch_remove_all_hw_breakpoints(); |
2009 |
|
2010 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
2011 |
kvm_update_guest_debug(env, 0);
|
2012 |
} |
2013 |
} |
2014 |
|
2015 |
#else /* !KVM_CAP_SET_GUEST_DEBUG */ |
2016 |
|
2017 |
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) |
2018 |
{ |
2019 |
return -EINVAL;
|
2020 |
} |
2021 |
|
2022 |
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
2023 |
target_ulong len, int type)
|
2024 |
{ |
2025 |
return -EINVAL;
|
2026 |
} |
2027 |
|
2028 |
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
2029 |
target_ulong len, int type)
|
2030 |
{ |
2031 |
return -EINVAL;
|
2032 |
} |
2033 |
|
2034 |
void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
2035 |
{ |
2036 |
} |
2037 |
#endif /* !KVM_CAP_SET_GUEST_DEBUG */ |
2038 |
|
2039 |
int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset) |
2040 |
{ |
2041 |
CPUState *cpu = ENV_GET_CPU(env); |
2042 |
struct kvm_signal_mask *sigmask;
|
2043 |
int r;
|
2044 |
|
2045 |
if (!sigset) {
|
2046 |
return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL); |
2047 |
} |
2048 |
|
2049 |
sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); |
2050 |
|
2051 |
sigmask->len = 8;
|
2052 |
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
|
2053 |
r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); |
2054 |
g_free(sigmask); |
2055 |
|
2056 |
return r;
|
2057 |
} |
2058 |
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) |
2059 |
{ |
2060 |
return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
|
2061 |
} |
2062 |
|
2063 |
int kvm_on_sigbus(int code, void *addr) |
2064 |
{ |
2065 |
return kvm_arch_on_sigbus(code, addr);
|
2066 |
} |