root / kvm-all.c @ 7c80eef8
History | View | Annotate | Download (30.8 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2008
|
5 |
* Red Hat, Inc. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
* Glauber Costa <gcosta@redhat.com>
|
10 |
*
|
11 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
12 |
* See the COPYING file in the top-level directory.
|
13 |
*
|
14 |
*/
|
15 |
|
16 |
#include <sys/types.h> |
17 |
#include <sys/ioctl.h> |
18 |
#include <sys/mman.h> |
19 |
#include <stdarg.h> |
20 |
|
21 |
#include <linux/kvm.h> |
22 |
|
23 |
#include "qemu-common.h" |
24 |
#include "qemu-barrier.h" |
25 |
#include "sysemu.h" |
26 |
#include "hw/hw.h" |
27 |
#include "gdbstub.h" |
28 |
#include "kvm.h" |
29 |
|
30 |
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
|
31 |
#define PAGE_SIZE TARGET_PAGE_SIZE
|
32 |
|
33 |
//#define DEBUG_KVM
|
34 |
|
35 |
#ifdef DEBUG_KVM
|
36 |
#define DPRINTF(fmt, ...) \
|
37 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
38 |
#else
|
39 |
#define DPRINTF(fmt, ...) \
|
40 |
do { } while (0) |
41 |
#endif
|
42 |
|
43 |
typedef struct KVMSlot |
44 |
{ |
45 |
target_phys_addr_t start_addr; |
46 |
ram_addr_t memory_size; |
47 |
ram_addr_t phys_offset; |
48 |
int slot;
|
49 |
int flags;
|
50 |
} KVMSlot; |
51 |
|
52 |
typedef struct kvm_dirty_log KVMDirtyLog; |
53 |
|
54 |
struct KVMState
|
55 |
{ |
56 |
KVMSlot slots[32];
|
57 |
int fd;
|
58 |
int vmfd;
|
59 |
int coalesced_mmio;
|
60 |
#ifdef KVM_CAP_COALESCED_MMIO
|
61 |
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
62 |
#endif
|
63 |
int broken_set_mem_region;
|
64 |
int migration_log;
|
65 |
int vcpu_events;
|
66 |
int robust_singlestep;
|
67 |
int debugregs;
|
68 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
69 |
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
|
70 |
#endif
|
71 |
int irqchip_in_kernel;
|
72 |
int pit_in_kernel;
|
73 |
}; |
74 |
|
75 |
static KVMState *kvm_state;
|
76 |
|
77 |
static KVMSlot *kvm_alloc_slot(KVMState *s)
|
78 |
{ |
79 |
int i;
|
80 |
|
81 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
82 |
/* KVM private memory slots */
|
83 |
if (i >= 8 && i < 12) |
84 |
continue;
|
85 |
if (s->slots[i].memory_size == 0) |
86 |
return &s->slots[i];
|
87 |
} |
88 |
|
89 |
fprintf(stderr, "%s: no free slot available\n", __func__);
|
90 |
abort(); |
91 |
} |
92 |
|
93 |
static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
|
94 |
target_phys_addr_t start_addr, |
95 |
target_phys_addr_t end_addr) |
96 |
{ |
97 |
int i;
|
98 |
|
99 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
100 |
KVMSlot *mem = &s->slots[i]; |
101 |
|
102 |
if (start_addr == mem->start_addr &&
|
103 |
end_addr == mem->start_addr + mem->memory_size) { |
104 |
return mem;
|
105 |
} |
106 |
} |
107 |
|
108 |
return NULL; |
109 |
} |
110 |
|
111 |
/*
|
112 |
* Find overlapping slot with lowest start address
|
113 |
*/
|
114 |
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
|
115 |
target_phys_addr_t start_addr, |
116 |
target_phys_addr_t end_addr) |
117 |
{ |
118 |
KVMSlot *found = NULL;
|
119 |
int i;
|
120 |
|
121 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
122 |
KVMSlot *mem = &s->slots[i]; |
123 |
|
124 |
if (mem->memory_size == 0 || |
125 |
(found && found->start_addr < mem->start_addr)) { |
126 |
continue;
|
127 |
} |
128 |
|
129 |
if (end_addr > mem->start_addr &&
|
130 |
start_addr < mem->start_addr + mem->memory_size) { |
131 |
found = mem; |
132 |
} |
133 |
} |
134 |
|
135 |
return found;
|
136 |
} |
137 |
|
138 |
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
139 |
{ |
140 |
struct kvm_userspace_memory_region mem;
|
141 |
|
142 |
mem.slot = slot->slot; |
143 |
mem.guest_phys_addr = slot->start_addr; |
144 |
mem.memory_size = slot->memory_size; |
145 |
mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset); |
146 |
mem.flags = slot->flags; |
147 |
if (s->migration_log) {
|
148 |
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; |
149 |
} |
150 |
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
151 |
} |
152 |
|
153 |
static void kvm_reset_vcpu(void *opaque) |
154 |
{ |
155 |
CPUState *env = opaque; |
156 |
|
157 |
kvm_arch_reset_vcpu(env); |
158 |
} |
159 |
|
160 |
int kvm_irqchip_in_kernel(void) |
161 |
{ |
162 |
return kvm_state->irqchip_in_kernel;
|
163 |
} |
164 |
|
165 |
int kvm_pit_in_kernel(void) |
166 |
{ |
167 |
return kvm_state->pit_in_kernel;
|
168 |
} |
169 |
|
170 |
|
171 |
int kvm_init_vcpu(CPUState *env)
|
172 |
{ |
173 |
KVMState *s = kvm_state; |
174 |
long mmap_size;
|
175 |
int ret;
|
176 |
|
177 |
DPRINTF("kvm_init_vcpu\n");
|
178 |
|
179 |
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); |
180 |
if (ret < 0) { |
181 |
DPRINTF("kvm_create_vcpu failed\n");
|
182 |
goto err;
|
183 |
} |
184 |
|
185 |
env->kvm_fd = ret; |
186 |
env->kvm_state = s; |
187 |
|
188 |
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
|
189 |
if (mmap_size < 0) { |
190 |
DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
|
191 |
goto err;
|
192 |
} |
193 |
|
194 |
env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
195 |
env->kvm_fd, 0);
|
196 |
if (env->kvm_run == MAP_FAILED) {
|
197 |
ret = -errno; |
198 |
DPRINTF("mmap'ing vcpu state failed\n");
|
199 |
goto err;
|
200 |
} |
201 |
|
202 |
#ifdef KVM_CAP_COALESCED_MMIO
|
203 |
if (s->coalesced_mmio && !s->coalesced_mmio_ring)
|
204 |
s->coalesced_mmio_ring = (void *) env->kvm_run +
|
205 |
s->coalesced_mmio * PAGE_SIZE; |
206 |
#endif
|
207 |
|
208 |
ret = kvm_arch_init_vcpu(env); |
209 |
if (ret == 0) { |
210 |
qemu_register_reset(kvm_reset_vcpu, env); |
211 |
kvm_arch_reset_vcpu(env); |
212 |
} |
213 |
err:
|
214 |
return ret;
|
215 |
} |
216 |
|
217 |
/*
|
218 |
* dirty pages logging control
|
219 |
*/
|
220 |
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, |
221 |
ram_addr_t size, int flags, int mask) |
222 |
{ |
223 |
KVMState *s = kvm_state; |
224 |
KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
225 |
int old_flags;
|
226 |
|
227 |
if (mem == NULL) { |
228 |
fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
229 |
TARGET_FMT_plx "\n", __func__, phys_addr,
|
230 |
(target_phys_addr_t)(phys_addr + size - 1));
|
231 |
return -EINVAL;
|
232 |
} |
233 |
|
234 |
old_flags = mem->flags; |
235 |
|
236 |
flags = (mem->flags & ~mask) | flags; |
237 |
mem->flags = flags; |
238 |
|
239 |
/* If nothing changed effectively, no need to issue ioctl */
|
240 |
if (s->migration_log) {
|
241 |
flags |= KVM_MEM_LOG_DIRTY_PAGES; |
242 |
} |
243 |
if (flags == old_flags) {
|
244 |
return 0; |
245 |
} |
246 |
|
247 |
return kvm_set_user_memory_region(s, mem);
|
248 |
} |
249 |
|
250 |
int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
|
251 |
{ |
252 |
return kvm_dirty_pages_log_change(phys_addr, size,
|
253 |
KVM_MEM_LOG_DIRTY_PAGES, |
254 |
KVM_MEM_LOG_DIRTY_PAGES); |
255 |
} |
256 |
|
257 |
int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
|
258 |
{ |
259 |
return kvm_dirty_pages_log_change(phys_addr, size,
|
260 |
0,
|
261 |
KVM_MEM_LOG_DIRTY_PAGES); |
262 |
} |
263 |
|
264 |
static int kvm_set_migration_log(int enable) |
265 |
{ |
266 |
KVMState *s = kvm_state; |
267 |
KVMSlot *mem; |
268 |
int i, err;
|
269 |
|
270 |
s->migration_log = enable; |
271 |
|
272 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
273 |
mem = &s->slots[i]; |
274 |
|
275 |
if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
|
276 |
continue;
|
277 |
} |
278 |
err = kvm_set_user_memory_region(s, mem); |
279 |
if (err) {
|
280 |
return err;
|
281 |
} |
282 |
} |
283 |
return 0; |
284 |
} |
285 |
|
286 |
static int test_le_bit(unsigned long nr, unsigned char *addr) |
287 |
{ |
288 |
return (addr[nr >> 3] >> (nr & 7)) & 1; |
289 |
} |
290 |
|
291 |
/**
|
292 |
* kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
|
293 |
* This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
|
294 |
* This means all bits are set to dirty.
|
295 |
*
|
296 |
* @start_add: start of logged region.
|
297 |
* @end_addr: end of logged region.
|
298 |
*/
|
299 |
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
300 |
target_phys_addr_t end_addr) |
301 |
{ |
302 |
KVMState *s = kvm_state; |
303 |
unsigned long size, allocated_size = 0; |
304 |
target_phys_addr_t phys_addr; |
305 |
ram_addr_t addr; |
306 |
KVMDirtyLog d; |
307 |
KVMSlot *mem; |
308 |
int ret = 0; |
309 |
|
310 |
d.dirty_bitmap = NULL;
|
311 |
while (start_addr < end_addr) {
|
312 |
mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); |
313 |
if (mem == NULL) { |
314 |
break;
|
315 |
} |
316 |
|
317 |
size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8; |
318 |
if (!d.dirty_bitmap) {
|
319 |
d.dirty_bitmap = qemu_malloc(size); |
320 |
} else if (size > allocated_size) { |
321 |
d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size); |
322 |
} |
323 |
allocated_size = size; |
324 |
memset(d.dirty_bitmap, 0, allocated_size);
|
325 |
|
326 |
d.slot = mem->slot; |
327 |
|
328 |
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { |
329 |
DPRINTF("ioctl failed %d\n", errno);
|
330 |
ret = -1;
|
331 |
break;
|
332 |
} |
333 |
|
334 |
for (phys_addr = mem->start_addr, addr = mem->phys_offset;
|
335 |
phys_addr < mem->start_addr + mem->memory_size; |
336 |
phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { |
337 |
unsigned char *bitmap = (unsigned char *)d.dirty_bitmap; |
338 |
unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
|
339 |
|
340 |
if (test_le_bit(nr, bitmap)) {
|
341 |
cpu_physical_memory_set_dirty(addr); |
342 |
} |
343 |
} |
344 |
start_addr = phys_addr; |
345 |
} |
346 |
qemu_free(d.dirty_bitmap); |
347 |
|
348 |
return ret;
|
349 |
} |
350 |
|
351 |
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
352 |
{ |
353 |
int ret = -ENOSYS;
|
354 |
#ifdef KVM_CAP_COALESCED_MMIO
|
355 |
KVMState *s = kvm_state; |
356 |
|
357 |
if (s->coalesced_mmio) {
|
358 |
struct kvm_coalesced_mmio_zone zone;
|
359 |
|
360 |
zone.addr = start; |
361 |
zone.size = size; |
362 |
|
363 |
ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); |
364 |
} |
365 |
#endif
|
366 |
|
367 |
return ret;
|
368 |
} |
369 |
|
370 |
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
371 |
{ |
372 |
int ret = -ENOSYS;
|
373 |
#ifdef KVM_CAP_COALESCED_MMIO
|
374 |
KVMState *s = kvm_state; |
375 |
|
376 |
if (s->coalesced_mmio) {
|
377 |
struct kvm_coalesced_mmio_zone zone;
|
378 |
|
379 |
zone.addr = start; |
380 |
zone.size = size; |
381 |
|
382 |
ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); |
383 |
} |
384 |
#endif
|
385 |
|
386 |
return ret;
|
387 |
} |
388 |
|
389 |
int kvm_check_extension(KVMState *s, unsigned int extension) |
390 |
{ |
391 |
int ret;
|
392 |
|
393 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); |
394 |
if (ret < 0) { |
395 |
ret = 0;
|
396 |
} |
397 |
|
398 |
return ret;
|
399 |
} |
400 |
|
401 |
static void kvm_set_phys_mem(target_phys_addr_t start_addr, |
402 |
ram_addr_t size, |
403 |
ram_addr_t phys_offset) |
404 |
{ |
405 |
KVMState *s = kvm_state; |
406 |
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; |
407 |
KVMSlot *mem, old; |
408 |
int err;
|
409 |
|
410 |
if (start_addr & ~TARGET_PAGE_MASK) {
|
411 |
if (flags >= IO_MEM_UNASSIGNED) {
|
412 |
if (!kvm_lookup_overlapping_slot(s, start_addr,
|
413 |
start_addr + size)) { |
414 |
return;
|
415 |
} |
416 |
fprintf(stderr, "Unaligned split of a KVM memory slot\n");
|
417 |
} else {
|
418 |
fprintf(stderr, "Only page-aligned memory slots supported\n");
|
419 |
} |
420 |
abort(); |
421 |
} |
422 |
|
423 |
/* KVM does not support read-only slots */
|
424 |
phys_offset &= ~IO_MEM_ROM; |
425 |
|
426 |
while (1) { |
427 |
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); |
428 |
if (!mem) {
|
429 |
break;
|
430 |
} |
431 |
|
432 |
if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
|
433 |
(start_addr + size <= mem->start_addr + mem->memory_size) && |
434 |
(phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { |
435 |
/* The new slot fits into the existing one and comes with
|
436 |
* identical parameters - nothing to be done. */
|
437 |
return;
|
438 |
} |
439 |
|
440 |
old = *mem; |
441 |
|
442 |
/* unregister the overlapping slot */
|
443 |
mem->memory_size = 0;
|
444 |
err = kvm_set_user_memory_region(s, mem); |
445 |
if (err) {
|
446 |
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
|
447 |
__func__, strerror(-err)); |
448 |
abort(); |
449 |
} |
450 |
|
451 |
/* Workaround for older KVM versions: we can't join slots, even not by
|
452 |
* unregistering the previous ones and then registering the larger
|
453 |
* slot. We have to maintain the existing fragmentation. Sigh.
|
454 |
*
|
455 |
* This workaround assumes that the new slot starts at the same
|
456 |
* address as the first existing one. If not or if some overlapping
|
457 |
* slot comes around later, we will fail (not seen in practice so far)
|
458 |
* - and actually require a recent KVM version. */
|
459 |
if (s->broken_set_mem_region &&
|
460 |
old.start_addr == start_addr && old.memory_size < size && |
461 |
flags < IO_MEM_UNASSIGNED) { |
462 |
mem = kvm_alloc_slot(s); |
463 |
mem->memory_size = old.memory_size; |
464 |
mem->start_addr = old.start_addr; |
465 |
mem->phys_offset = old.phys_offset; |
466 |
mem->flags = 0;
|
467 |
|
468 |
err = kvm_set_user_memory_region(s, mem); |
469 |
if (err) {
|
470 |
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
|
471 |
strerror(-err)); |
472 |
abort(); |
473 |
} |
474 |
|
475 |
start_addr += old.memory_size; |
476 |
phys_offset += old.memory_size; |
477 |
size -= old.memory_size; |
478 |
continue;
|
479 |
} |
480 |
|
481 |
/* register prefix slot */
|
482 |
if (old.start_addr < start_addr) {
|
483 |
mem = kvm_alloc_slot(s); |
484 |
mem->memory_size = start_addr - old.start_addr; |
485 |
mem->start_addr = old.start_addr; |
486 |
mem->phys_offset = old.phys_offset; |
487 |
mem->flags = 0;
|
488 |
|
489 |
err = kvm_set_user_memory_region(s, mem); |
490 |
if (err) {
|
491 |
fprintf(stderr, "%s: error registering prefix slot: %s\n",
|
492 |
__func__, strerror(-err)); |
493 |
abort(); |
494 |
} |
495 |
} |
496 |
|
497 |
/* register suffix slot */
|
498 |
if (old.start_addr + old.memory_size > start_addr + size) {
|
499 |
ram_addr_t size_delta; |
500 |
|
501 |
mem = kvm_alloc_slot(s); |
502 |
mem->start_addr = start_addr + size; |
503 |
size_delta = mem->start_addr - old.start_addr; |
504 |
mem->memory_size = old.memory_size - size_delta; |
505 |
mem->phys_offset = old.phys_offset + size_delta; |
506 |
mem->flags = 0;
|
507 |
|
508 |
err = kvm_set_user_memory_region(s, mem); |
509 |
if (err) {
|
510 |
fprintf(stderr, "%s: error registering suffix slot: %s\n",
|
511 |
__func__, strerror(-err)); |
512 |
abort(); |
513 |
} |
514 |
} |
515 |
} |
516 |
|
517 |
/* in case the KVM bug workaround already "consumed" the new slot */
|
518 |
if (!size)
|
519 |
return;
|
520 |
|
521 |
/* KVM does not need to know about this memory */
|
522 |
if (flags >= IO_MEM_UNASSIGNED)
|
523 |
return;
|
524 |
|
525 |
mem = kvm_alloc_slot(s); |
526 |
mem->memory_size = size; |
527 |
mem->start_addr = start_addr; |
528 |
mem->phys_offset = phys_offset; |
529 |
mem->flags = 0;
|
530 |
|
531 |
err = kvm_set_user_memory_region(s, mem); |
532 |
if (err) {
|
533 |
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
534 |
strerror(-err)); |
535 |
abort(); |
536 |
} |
537 |
} |
538 |
|
539 |
static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, |
540 |
target_phys_addr_t start_addr, |
541 |
ram_addr_t size, |
542 |
ram_addr_t phys_offset) |
543 |
{ |
544 |
kvm_set_phys_mem(start_addr, size, phys_offset); |
545 |
} |
546 |
|
547 |
static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, |
548 |
target_phys_addr_t start_addr, |
549 |
target_phys_addr_t end_addr) |
550 |
{ |
551 |
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
552 |
} |
553 |
|
554 |
static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, |
555 |
int enable)
|
556 |
{ |
557 |
return kvm_set_migration_log(enable);
|
558 |
} |
559 |
|
560 |
static CPUPhysMemoryClient kvm_cpu_phys_memory_client = {
|
561 |
.set_memory = kvm_client_set_memory, |
562 |
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap, |
563 |
.migration_log = kvm_client_migration_log, |
564 |
}; |
565 |
|
566 |
int kvm_init(int smp_cpus) |
567 |
{ |
568 |
static const char upgrade_note[] = |
569 |
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
|
570 |
"(see http://sourceforge.net/projects/kvm).\n";
|
571 |
KVMState *s; |
572 |
int ret;
|
573 |
int i;
|
574 |
|
575 |
if (smp_cpus > 1) { |
576 |
fprintf(stderr, "No SMP KVM support, use '-smp 1'\n");
|
577 |
return -EINVAL;
|
578 |
} |
579 |
|
580 |
s = qemu_mallocz(sizeof(KVMState));
|
581 |
|
582 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
583 |
QTAILQ_INIT(&s->kvm_sw_breakpoints); |
584 |
#endif
|
585 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) |
586 |
s->slots[i].slot = i; |
587 |
|
588 |
s->vmfd = -1;
|
589 |
s->fd = qemu_open("/dev/kvm", O_RDWR);
|
590 |
if (s->fd == -1) { |
591 |
fprintf(stderr, "Could not access KVM kernel module: %m\n");
|
592 |
ret = -errno; |
593 |
goto err;
|
594 |
} |
595 |
|
596 |
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
|
597 |
if (ret < KVM_API_VERSION) {
|
598 |
if (ret > 0) |
599 |
ret = -EINVAL; |
600 |
fprintf(stderr, "kvm version too old\n");
|
601 |
goto err;
|
602 |
} |
603 |
|
604 |
if (ret > KVM_API_VERSION) {
|
605 |
ret = -EINVAL; |
606 |
fprintf(stderr, "kvm version not supported\n");
|
607 |
goto err;
|
608 |
} |
609 |
|
610 |
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
|
611 |
if (s->vmfd < 0) { |
612 |
#ifdef TARGET_S390X
|
613 |
fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
|
614 |
"your host kernel command line\n");
|
615 |
#endif
|
616 |
goto err;
|
617 |
} |
618 |
|
619 |
/* initially, KVM allocated its own memory and we had to jump through
|
620 |
* hooks to make phys_ram_base point to this. Modern versions of KVM
|
621 |
* just use a user allocated buffer so we can use regular pages
|
622 |
* unmodified. Make sure we have a sufficiently modern version of KVM.
|
623 |
*/
|
624 |
if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) {
|
625 |
ret = -EINVAL; |
626 |
fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
|
627 |
upgrade_note); |
628 |
goto err;
|
629 |
} |
630 |
|
631 |
/* There was a nasty bug in < kvm-80 that prevents memory slots from being
|
632 |
* destroyed properly. Since we rely on this capability, refuse to work
|
633 |
* with any kernel without this capability. */
|
634 |
if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) {
|
635 |
ret = -EINVAL; |
636 |
|
637 |
fprintf(stderr, |
638 |
"KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
|
639 |
upgrade_note); |
640 |
goto err;
|
641 |
} |
642 |
|
643 |
s->coalesced_mmio = 0;
|
644 |
#ifdef KVM_CAP_COALESCED_MMIO
|
645 |
s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
646 |
s->coalesced_mmio_ring = NULL;
|
647 |
#endif
|
648 |
|
649 |
s->broken_set_mem_region = 1;
|
650 |
#ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
|
651 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); |
652 |
if (ret > 0) { |
653 |
s->broken_set_mem_region = 0;
|
654 |
} |
655 |
#endif
|
656 |
|
657 |
s->vcpu_events = 0;
|
658 |
#ifdef KVM_CAP_VCPU_EVENTS
|
659 |
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); |
660 |
#endif
|
661 |
|
662 |
s->robust_singlestep = 0;
|
663 |
#ifdef KVM_CAP_X86_ROBUST_SINGLESTEP
|
664 |
s->robust_singlestep = |
665 |
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); |
666 |
#endif
|
667 |
|
668 |
s->debugregs = 0;
|
669 |
#ifdef KVM_CAP_DEBUGREGS
|
670 |
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); |
671 |
#endif
|
672 |
|
673 |
ret = kvm_arch_init(s, smp_cpus); |
674 |
if (ret < 0) |
675 |
goto err;
|
676 |
|
677 |
kvm_state = s; |
678 |
cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); |
679 |
|
680 |
return 0; |
681 |
|
682 |
err:
|
683 |
if (s) {
|
684 |
if (s->vmfd != -1) |
685 |
close(s->vmfd); |
686 |
if (s->fd != -1) |
687 |
close(s->fd); |
688 |
} |
689 |
qemu_free(s); |
690 |
|
691 |
return ret;
|
692 |
} |
693 |
|
694 |
static int kvm_handle_io(uint16_t port, void *data, int direction, int size, |
695 |
uint32_t count) |
696 |
{ |
697 |
int i;
|
698 |
uint8_t *ptr = data; |
699 |
|
700 |
for (i = 0; i < count; i++) { |
701 |
if (direction == KVM_EXIT_IO_IN) {
|
702 |
switch (size) {
|
703 |
case 1: |
704 |
stb_p(ptr, cpu_inb(port)); |
705 |
break;
|
706 |
case 2: |
707 |
stw_p(ptr, cpu_inw(port)); |
708 |
break;
|
709 |
case 4: |
710 |
stl_p(ptr, cpu_inl(port)); |
711 |
break;
|
712 |
} |
713 |
} else {
|
714 |
switch (size) {
|
715 |
case 1: |
716 |
cpu_outb(port, ldub_p(ptr)); |
717 |
break;
|
718 |
case 2: |
719 |
cpu_outw(port, lduw_p(ptr)); |
720 |
break;
|
721 |
case 4: |
722 |
cpu_outl(port, ldl_p(ptr)); |
723 |
break;
|
724 |
} |
725 |
} |
726 |
|
727 |
ptr += size; |
728 |
} |
729 |
|
730 |
return 1; |
731 |
} |
732 |
|
733 |
#ifdef KVM_CAP_INTERNAL_ERROR_DATA
|
734 |
static void kvm_handle_internal_error(CPUState *env, struct kvm_run *run) |
735 |
{ |
736 |
|
737 |
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
738 |
int i;
|
739 |
|
740 |
fprintf(stderr, "KVM internal error. Suberror: %d\n",
|
741 |
run->internal.suberror); |
742 |
|
743 |
for (i = 0; i < run->internal.ndata; ++i) { |
744 |
fprintf(stderr, "extra data[%d]: %"PRIx64"\n", |
745 |
i, (uint64_t)run->internal.data[i]); |
746 |
} |
747 |
} |
748 |
cpu_dump_state(env, stderr, fprintf, 0);
|
749 |
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
750 |
fprintf(stderr, "emulation failure\n");
|
751 |
} |
752 |
/* FIXME: Should trigger a qmp message to let management know
|
753 |
* something went wrong.
|
754 |
*/
|
755 |
vm_stop(0);
|
756 |
} |
757 |
#endif
|
758 |
|
759 |
void kvm_flush_coalesced_mmio_buffer(void) |
760 |
{ |
761 |
#ifdef KVM_CAP_COALESCED_MMIO
|
762 |
KVMState *s = kvm_state; |
763 |
if (s->coalesced_mmio_ring) {
|
764 |
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
|
765 |
while (ring->first != ring->last) {
|
766 |
struct kvm_coalesced_mmio *ent;
|
767 |
|
768 |
ent = &ring->coalesced_mmio[ring->first]; |
769 |
|
770 |
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); |
771 |
smp_wmb(); |
772 |
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
|
773 |
} |
774 |
} |
775 |
#endif
|
776 |
} |
777 |
|
778 |
void kvm_cpu_synchronize_state(CPUState *env)
|
779 |
{ |
780 |
if (!env->kvm_vcpu_dirty) {
|
781 |
kvm_arch_get_registers(env); |
782 |
env->kvm_vcpu_dirty = 1;
|
783 |
} |
784 |
} |
785 |
|
786 |
void kvm_cpu_synchronize_post_reset(CPUState *env)
|
787 |
{ |
788 |
kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); |
789 |
env->kvm_vcpu_dirty = 0;
|
790 |
} |
791 |
|
792 |
void kvm_cpu_synchronize_post_init(CPUState *env)
|
793 |
{ |
794 |
kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); |
795 |
env->kvm_vcpu_dirty = 0;
|
796 |
} |
797 |
|
798 |
int kvm_cpu_exec(CPUState *env)
|
799 |
{ |
800 |
struct kvm_run *run = env->kvm_run;
|
801 |
int ret;
|
802 |
|
803 |
DPRINTF("kvm_cpu_exec()\n");
|
804 |
|
805 |
do {
|
806 |
#ifndef CONFIG_IOTHREAD
|
807 |
if (env->exit_request) {
|
808 |
DPRINTF("interrupt exit requested\n");
|
809 |
ret = 0;
|
810 |
break;
|
811 |
} |
812 |
#endif
|
813 |
|
814 |
if (env->kvm_vcpu_dirty) {
|
815 |
kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE); |
816 |
env->kvm_vcpu_dirty = 0;
|
817 |
} |
818 |
|
819 |
kvm_arch_pre_run(env, run); |
820 |
qemu_mutex_unlock_iothread(); |
821 |
ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
|
822 |
qemu_mutex_lock_iothread(); |
823 |
kvm_arch_post_run(env, run); |
824 |
|
825 |
if (ret == -EINTR || ret == -EAGAIN) {
|
826 |
cpu_exit(env); |
827 |
DPRINTF("io window exit\n");
|
828 |
ret = 0;
|
829 |
break;
|
830 |
} |
831 |
|
832 |
if (ret < 0) { |
833 |
DPRINTF("kvm run failed %s\n", strerror(-ret));
|
834 |
abort(); |
835 |
} |
836 |
|
837 |
kvm_flush_coalesced_mmio_buffer(); |
838 |
|
839 |
ret = 0; /* exit loop */ |
840 |
switch (run->exit_reason) {
|
841 |
case KVM_EXIT_IO:
|
842 |
DPRINTF("handle_io\n");
|
843 |
ret = kvm_handle_io(run->io.port, |
844 |
(uint8_t *)run + run->io.data_offset, |
845 |
run->io.direction, |
846 |
run->io.size, |
847 |
run->io.count); |
848 |
break;
|
849 |
case KVM_EXIT_MMIO:
|
850 |
DPRINTF("handle_mmio\n");
|
851 |
cpu_physical_memory_rw(run->mmio.phys_addr, |
852 |
run->mmio.data, |
853 |
run->mmio.len, |
854 |
run->mmio.is_write); |
855 |
ret = 1;
|
856 |
break;
|
857 |
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
858 |
DPRINTF("irq_window_open\n");
|
859 |
break;
|
860 |
case KVM_EXIT_SHUTDOWN:
|
861 |
DPRINTF("shutdown\n");
|
862 |
qemu_system_reset_request(); |
863 |
ret = 1;
|
864 |
break;
|
865 |
case KVM_EXIT_UNKNOWN:
|
866 |
DPRINTF("kvm_exit_unknown\n");
|
867 |
break;
|
868 |
case KVM_EXIT_FAIL_ENTRY:
|
869 |
DPRINTF("kvm_exit_fail_entry\n");
|
870 |
break;
|
871 |
case KVM_EXIT_EXCEPTION:
|
872 |
DPRINTF("kvm_exit_exception\n");
|
873 |
break;
|
874 |
#ifdef KVM_CAP_INTERNAL_ERROR_DATA
|
875 |
case KVM_EXIT_INTERNAL_ERROR:
|
876 |
kvm_handle_internal_error(env, run); |
877 |
break;
|
878 |
#endif
|
879 |
case KVM_EXIT_DEBUG:
|
880 |
DPRINTF("kvm_exit_debug\n");
|
881 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
882 |
if (kvm_arch_debug(&run->debug.arch)) {
|
883 |
gdb_set_stop_cpu(env); |
884 |
vm_stop(EXCP_DEBUG); |
885 |
env->exception_index = EXCP_DEBUG; |
886 |
return 0; |
887 |
} |
888 |
/* re-enter, this exception was guest-internal */
|
889 |
ret = 1;
|
890 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
891 |
break;
|
892 |
default:
|
893 |
DPRINTF("kvm_arch_handle_exit\n");
|
894 |
ret = kvm_arch_handle_exit(env, run); |
895 |
break;
|
896 |
} |
897 |
} while (ret > 0); |
898 |
|
899 |
if (env->exit_request) {
|
900 |
env->exit_request = 0;
|
901 |
env->exception_index = EXCP_INTERRUPT; |
902 |
} |
903 |
|
904 |
return ret;
|
905 |
} |
906 |
|
907 |
int kvm_ioctl(KVMState *s, int type, ...) |
908 |
{ |
909 |
int ret;
|
910 |
void *arg;
|
911 |
va_list ap; |
912 |
|
913 |
va_start(ap, type); |
914 |
arg = va_arg(ap, void *);
|
915 |
va_end(ap); |
916 |
|
917 |
ret = ioctl(s->fd, type, arg); |
918 |
if (ret == -1) |
919 |
ret = -errno; |
920 |
|
921 |
return ret;
|
922 |
} |
923 |
|
924 |
int kvm_vm_ioctl(KVMState *s, int type, ...) |
925 |
{ |
926 |
int ret;
|
927 |
void *arg;
|
928 |
va_list ap; |
929 |
|
930 |
va_start(ap, type); |
931 |
arg = va_arg(ap, void *);
|
932 |
va_end(ap); |
933 |
|
934 |
ret = ioctl(s->vmfd, type, arg); |
935 |
if (ret == -1) |
936 |
ret = -errno; |
937 |
|
938 |
return ret;
|
939 |
} |
940 |
|
941 |
int kvm_vcpu_ioctl(CPUState *env, int type, ...) |
942 |
{ |
943 |
int ret;
|
944 |
void *arg;
|
945 |
va_list ap; |
946 |
|
947 |
va_start(ap, type); |
948 |
arg = va_arg(ap, void *);
|
949 |
va_end(ap); |
950 |
|
951 |
ret = ioctl(env->kvm_fd, type, arg); |
952 |
if (ret == -1) |
953 |
ret = -errno; |
954 |
|
955 |
return ret;
|
956 |
} |
957 |
|
958 |
int kvm_has_sync_mmu(void) |
959 |
{ |
960 |
#ifdef KVM_CAP_SYNC_MMU
|
961 |
KVMState *s = kvm_state; |
962 |
|
963 |
return kvm_check_extension(s, KVM_CAP_SYNC_MMU);
|
964 |
#else
|
965 |
return 0; |
966 |
#endif
|
967 |
} |
968 |
|
969 |
int kvm_has_vcpu_events(void) |
970 |
{ |
971 |
return kvm_state->vcpu_events;
|
972 |
} |
973 |
|
974 |
int kvm_has_robust_singlestep(void) |
975 |
{ |
976 |
return kvm_state->robust_singlestep;
|
977 |
} |
978 |
|
979 |
int kvm_has_debugregs(void) |
980 |
{ |
981 |
return kvm_state->debugregs;
|
982 |
} |
983 |
|
984 |
void kvm_setup_guest_memory(void *start, size_t size) |
985 |
{ |
986 |
if (!kvm_has_sync_mmu()) {
|
987 |
#ifdef MADV_DONTFORK
|
988 |
int ret = madvise(start, size, MADV_DONTFORK);
|
989 |
|
990 |
if (ret) {
|
991 |
perror("madvice");
|
992 |
exit(1);
|
993 |
} |
994 |
#else
|
995 |
fprintf(stderr, |
996 |
"Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
|
997 |
exit(1);
|
998 |
#endif
|
999 |
} |
1000 |
} |
1001 |
|
1002 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1003 |
static void on_vcpu(CPUState *env, void (*func)(void *data), void *data) |
1004 |
{ |
1005 |
#ifdef CONFIG_IOTHREAD
|
1006 |
if (env != cpu_single_env) {
|
1007 |
abort(); |
1008 |
} |
1009 |
#endif
|
1010 |
func(data); |
1011 |
} |
1012 |
|
1013 |
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
|
1014 |
target_ulong pc) |
1015 |
{ |
1016 |
struct kvm_sw_breakpoint *bp;
|
1017 |
|
1018 |
QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { |
1019 |
if (bp->pc == pc)
|
1020 |
return bp;
|
1021 |
} |
1022 |
return NULL; |
1023 |
} |
1024 |
|
1025 |
int kvm_sw_breakpoints_active(CPUState *env)
|
1026 |
{ |
1027 |
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
|
1028 |
} |
1029 |
|
1030 |
struct kvm_set_guest_debug_data {
|
1031 |
struct kvm_guest_debug dbg;
|
1032 |
CPUState *env; |
1033 |
int err;
|
1034 |
}; |
1035 |
|
1036 |
static void kvm_invoke_set_guest_debug(void *data) |
1037 |
{ |
1038 |
struct kvm_set_guest_debug_data *dbg_data = data;
|
1039 |
CPUState *env = dbg_data->env; |
1040 |
|
1041 |
dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); |
1042 |
} |
1043 |
|
1044 |
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
1045 |
{ |
1046 |
struct kvm_set_guest_debug_data data;
|
1047 |
|
1048 |
data.dbg.control = reinject_trap; |
1049 |
|
1050 |
if (env->singlestep_enabled) {
|
1051 |
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; |
1052 |
} |
1053 |
kvm_arch_update_guest_debug(env, &data.dbg); |
1054 |
data.env = env; |
1055 |
|
1056 |
on_vcpu(env, kvm_invoke_set_guest_debug, &data); |
1057 |
return data.err;
|
1058 |
} |
1059 |
|
1060 |
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
1061 |
target_ulong len, int type)
|
1062 |
{ |
1063 |
struct kvm_sw_breakpoint *bp;
|
1064 |
CPUState *env; |
1065 |
int err;
|
1066 |
|
1067 |
if (type == GDB_BREAKPOINT_SW) {
|
1068 |
bp = kvm_find_sw_breakpoint(current_env, addr); |
1069 |
if (bp) {
|
1070 |
bp->use_count++; |
1071 |
return 0; |
1072 |
} |
1073 |
|
1074 |
bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); |
1075 |
if (!bp)
|
1076 |
return -ENOMEM;
|
1077 |
|
1078 |
bp->pc = addr; |
1079 |
bp->use_count = 1;
|
1080 |
err = kvm_arch_insert_sw_breakpoint(current_env, bp); |
1081 |
if (err) {
|
1082 |
free(bp); |
1083 |
return err;
|
1084 |
} |
1085 |
|
1086 |
QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, |
1087 |
bp, entry); |
1088 |
} else {
|
1089 |
err = kvm_arch_insert_hw_breakpoint(addr, len, type); |
1090 |
if (err)
|
1091 |
return err;
|
1092 |
} |
1093 |
|
1094 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1095 |
err = kvm_update_guest_debug(env, 0);
|
1096 |
if (err)
|
1097 |
return err;
|
1098 |
} |
1099 |
return 0; |
1100 |
} |
1101 |
|
1102 |
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
1103 |
target_ulong len, int type)
|
1104 |
{ |
1105 |
struct kvm_sw_breakpoint *bp;
|
1106 |
CPUState *env; |
1107 |
int err;
|
1108 |
|
1109 |
if (type == GDB_BREAKPOINT_SW) {
|
1110 |
bp = kvm_find_sw_breakpoint(current_env, addr); |
1111 |
if (!bp)
|
1112 |
return -ENOENT;
|
1113 |
|
1114 |
if (bp->use_count > 1) { |
1115 |
bp->use_count--; |
1116 |
return 0; |
1117 |
} |
1118 |
|
1119 |
err = kvm_arch_remove_sw_breakpoint(current_env, bp); |
1120 |
if (err)
|
1121 |
return err;
|
1122 |
|
1123 |
QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); |
1124 |
qemu_free(bp); |
1125 |
} else {
|
1126 |
err = kvm_arch_remove_hw_breakpoint(addr, len, type); |
1127 |
if (err)
|
1128 |
return err;
|
1129 |
} |
1130 |
|
1131 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1132 |
err = kvm_update_guest_debug(env, 0);
|
1133 |
if (err)
|
1134 |
return err;
|
1135 |
} |
1136 |
return 0; |
1137 |
} |
1138 |
|
1139 |
void kvm_remove_all_breakpoints(CPUState *current_env)
|
1140 |
{ |
1141 |
struct kvm_sw_breakpoint *bp, *next;
|
1142 |
KVMState *s = current_env->kvm_state; |
1143 |
CPUState *env; |
1144 |
|
1145 |
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { |
1146 |
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { |
1147 |
/* Try harder to find a CPU that currently sees the breakpoint. */
|
1148 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1149 |
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) |
1150 |
break;
|
1151 |
} |
1152 |
} |
1153 |
} |
1154 |
kvm_arch_remove_all_hw_breakpoints(); |
1155 |
|
1156 |
for (env = first_cpu; env != NULL; env = env->next_cpu) |
1157 |
kvm_update_guest_debug(env, 0);
|
1158 |
} |
1159 |
|
1160 |
#else /* !KVM_CAP_SET_GUEST_DEBUG */ |
1161 |
|
1162 |
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
1163 |
{ |
1164 |
return -EINVAL;
|
1165 |
} |
1166 |
|
1167 |
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
1168 |
target_ulong len, int type)
|
1169 |
{ |
1170 |
return -EINVAL;
|
1171 |
} |
1172 |
|
1173 |
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
1174 |
target_ulong len, int type)
|
1175 |
{ |
1176 |
return -EINVAL;
|
1177 |
} |
1178 |
|
1179 |
void kvm_remove_all_breakpoints(CPUState *current_env)
|
1180 |
{ |
1181 |
} |
1182 |
#endif /* !KVM_CAP_SET_GUEST_DEBUG */ |
1183 |
|
1184 |
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) |
1185 |
{ |
1186 |
struct kvm_signal_mask *sigmask;
|
1187 |
int r;
|
1188 |
|
1189 |
if (!sigset)
|
1190 |
return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); |
1191 |
|
1192 |
sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset)); |
1193 |
|
1194 |
sigmask->len = 8;
|
1195 |
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
|
1196 |
r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask); |
1197 |
free(sigmask); |
1198 |
|
1199 |
return r;
|
1200 |
} |
1201 |
|
1202 |
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) |
1203 |
{ |
1204 |
#ifdef KVM_IOEVENTFD
|
1205 |
struct kvm_ioeventfd kick = {
|
1206 |
.datamatch = val, |
1207 |
.addr = addr, |
1208 |
.len = 2,
|
1209 |
.flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, |
1210 |
.fd = fd, |
1211 |
}; |
1212 |
int r;
|
1213 |
if (!kvm_enabled())
|
1214 |
return -ENOSYS;
|
1215 |
if (!assign)
|
1216 |
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
1217 |
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
1218 |
if (r < 0) |
1219 |
return r;
|
1220 |
return 0; |
1221 |
#else
|
1222 |
return -ENOSYS;
|
1223 |
#endif
|
1224 |
} |