Revision a426e122
b/kvm-all.c | ||
---|---|---|
88 | 88 |
|
89 | 89 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
90 | 90 |
/* KVM private memory slots */ |
91 |
if (i >= 8 && i < 12) |
|
91 |
if (i >= 8 && i < 12) {
|
|
92 | 92 |
continue; |
93 |
if (s->slots[i].memory_size == 0) |
|
93 |
} |
|
94 |
if (s->slots[i].memory_size == 0) { |
|
94 | 95 |
return &s->slots[i]; |
96 |
} |
|
95 | 97 |
} |
96 | 98 |
|
97 | 99 |
fprintf(stderr, "%s: no free slot available\n", __func__); |
... | ... | |
226 | 228 |
} |
227 | 229 |
|
228 | 230 |
#ifdef KVM_CAP_COALESCED_MMIO |
229 |
if (s->coalesced_mmio && !s->coalesced_mmio_ring) |
|
230 |
s->coalesced_mmio_ring = (void *) env->kvm_run + |
|
231 |
s->coalesced_mmio * PAGE_SIZE; |
|
231 |
if (s->coalesced_mmio && !s->coalesced_mmio_ring) { |
|
232 |
s->coalesced_mmio_ring = |
|
233 |
(void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE; |
|
234 |
} |
|
232 | 235 |
#endif |
233 | 236 |
|
234 | 237 |
ret = kvm_arch_init_vcpu(env); |
... | ... | |
275 | 278 |
|
276 | 279 |
int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) |
277 | 280 |
{ |
278 |
return kvm_dirty_pages_log_change(phys_addr, size, |
|
279 |
KVM_MEM_LOG_DIRTY_PAGES, |
|
280 |
KVM_MEM_LOG_DIRTY_PAGES); |
|
281 |
return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES, |
|
282 |
KVM_MEM_LOG_DIRTY_PAGES); |
|
281 | 283 |
} |
282 | 284 |
|
283 | 285 |
int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size) |
284 | 286 |
{ |
285 |
return kvm_dirty_pages_log_change(phys_addr, size, |
|
286 |
0, |
|
287 |
KVM_MEM_LOG_DIRTY_PAGES); |
|
287 |
return kvm_dirty_pages_log_change(phys_addr, size, 0, |
|
288 |
KVM_MEM_LOG_DIRTY_PAGES); |
|
288 | 289 |
} |
289 | 290 |
|
290 | 291 |
static int kvm_set_migration_log(int enable) |
... | ... | |
356 | 357 |
* @end_addr: end of logged region. |
357 | 358 |
*/ |
358 | 359 |
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
359 |
target_phys_addr_t end_addr)
|
|
360 |
target_phys_addr_t end_addr)
|
|
360 | 361 |
{ |
361 | 362 |
KVMState *s = kvm_state; |
362 | 363 |
unsigned long size, allocated_size = 0; |
... | ... | |
480 | 481 |
#endif |
481 | 482 |
} |
482 | 483 |
|
483 |
static void kvm_set_phys_mem(target_phys_addr_t start_addr, |
|
484 |
ram_addr_t size, |
|
485 |
ram_addr_t phys_offset) |
|
484 |
static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, |
|
485 |
ram_addr_t phys_offset) |
|
486 | 486 |
{ |
487 | 487 |
KVMState *s = kvm_state; |
488 | 488 |
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; |
... | ... | |
589 | 589 |
} |
590 | 590 |
|
591 | 591 |
/* in case the KVM bug workaround already "consumed" the new slot */ |
592 |
if (!size) |
|
592 |
if (!size) {
|
|
593 | 593 |
return; |
594 |
|
|
594 |
} |
|
595 | 595 |
/* KVM does not need to know about this memory */ |
596 |
if (flags >= IO_MEM_UNASSIGNED) |
|
596 |
if (flags >= IO_MEM_UNASSIGNED) {
|
|
597 | 597 |
return; |
598 |
|
|
598 |
} |
|
599 | 599 |
mem = kvm_alloc_slot(s); |
600 | 600 |
mem->memory_size = size; |
601 | 601 |
mem->start_addr = start_addr; |
... | ... | |
611 | 611 |
} |
612 | 612 |
|
613 | 613 |
static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, |
614 |
target_phys_addr_t start_addr, |
|
615 |
ram_addr_t size, |
|
616 |
ram_addr_t phys_offset) |
|
614 |
target_phys_addr_t start_addr, |
|
615 |
ram_addr_t size, ram_addr_t phys_offset) |
|
617 | 616 |
{ |
618 |
kvm_set_phys_mem(start_addr, size, phys_offset);
|
|
617 |
kvm_set_phys_mem(start_addr, size, phys_offset);
|
|
619 | 618 |
} |
620 | 619 |
|
621 | 620 |
static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, |
622 |
target_phys_addr_t start_addr,
|
|
623 |
target_phys_addr_t end_addr)
|
|
621 |
target_phys_addr_t start_addr,
|
|
622 |
target_phys_addr_t end_addr)
|
|
624 | 623 |
{ |
625 |
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
|
624 |
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
|
626 | 625 |
} |
627 | 626 |
|
628 | 627 |
static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, |
629 |
int enable)
|
|
628 |
int enable)
|
|
630 | 629 |
{ |
631 |
return kvm_set_migration_log(enable);
|
|
630 |
return kvm_set_migration_log(enable);
|
|
632 | 631 |
} |
633 | 632 |
|
634 | 633 |
static CPUPhysMemoryClient kvm_cpu_phys_memory_client = { |
635 |
.set_memory = kvm_client_set_memory,
|
|
636 |
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
|
|
637 |
.migration_log = kvm_client_migration_log,
|
|
634 |
.set_memory = kvm_client_set_memory,
|
|
635 |
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
|
|
636 |
.migration_log = kvm_client_migration_log,
|
|
638 | 637 |
}; |
639 | 638 |
|
640 | 639 |
int kvm_init(int smp_cpus) |
... | ... | |
651 | 650 |
#ifdef KVM_CAP_SET_GUEST_DEBUG |
652 | 651 |
QTAILQ_INIT(&s->kvm_sw_breakpoints); |
653 | 652 |
#endif |
654 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) |
|
653 |
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
|
655 | 654 |
s->slots[i].slot = i; |
656 |
|
|
655 |
} |
|
657 | 656 |
s->vmfd = -1; |
658 | 657 |
s->fd = qemu_open("/dev/kvm", O_RDWR); |
659 | 658 |
if (s->fd == -1) { |
... | ... | |
664 | 663 |
|
665 | 664 |
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); |
666 | 665 |
if (ret < KVM_API_VERSION) { |
667 |
if (ret > 0) |
|
666 |
if (ret > 0) {
|
|
668 | 667 |
ret = -EINVAL; |
668 |
} |
|
669 | 669 |
fprintf(stderr, "kvm version too old\n"); |
670 | 670 |
goto err; |
671 | 671 |
} |
... | ... | |
750 | 750 |
#endif |
751 | 751 |
|
752 | 752 |
ret = kvm_arch_init(s, smp_cpus); |
753 |
if (ret < 0) |
|
753 |
if (ret < 0) {
|
|
754 | 754 |
goto err; |
755 |
} |
|
755 | 756 |
|
756 | 757 |
kvm_state = s; |
757 | 758 |
cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); |
... | ... | |
762 | 763 |
|
763 | 764 |
err: |
764 | 765 |
if (s) { |
765 |
if (s->vmfd != -1) |
|
766 |
if (s->vmfd != -1) {
|
|
766 | 767 |
close(s->vmfd); |
767 |
if (s->fd != -1) |
|
768 |
} |
|
769 |
if (s->fd != -1) { |
|
768 | 770 |
close(s->fd); |
771 |
} |
|
769 | 772 |
} |
770 | 773 |
qemu_free(s); |
771 | 774 |
|
... | ... | |
829 | 832 |
cpu_dump_state(env, stderr, fprintf, 0); |
830 | 833 |
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { |
831 | 834 |
fprintf(stderr, "emulation failure\n"); |
832 |
if (!kvm_arch_stop_on_emulation_error(env)) |
|
833 |
return; |
|
835 |
if (!kvm_arch_stop_on_emulation_error(env)) { |
|
836 |
return; |
|
837 |
} |
|
834 | 838 |
} |
835 | 839 |
/* FIXME: Should trigger a qmp message to let management know |
836 | 840 |
* something went wrong. |
... | ... | |
870 | 874 |
|
871 | 875 |
void kvm_cpu_synchronize_state(CPUState *env) |
872 | 876 |
{ |
873 |
if (!env->kvm_vcpu_dirty) |
|
877 |
if (!env->kvm_vcpu_dirty) {
|
|
874 | 878 |
run_on_cpu(env, do_kvm_cpu_synchronize_state, env); |
879 |
} |
|
875 | 880 |
} |
876 | 881 |
|
877 | 882 |
void kvm_cpu_synchronize_post_reset(CPUState *env) |
... | ... | |
1011 | 1016 |
va_end(ap); |
1012 | 1017 |
|
1013 | 1018 |
ret = ioctl(s->fd, type, arg); |
1014 |
if (ret == -1) |
|
1019 |
if (ret == -1) {
|
|
1015 | 1020 |
ret = -errno; |
1016 |
|
|
1021 |
} |
|
1017 | 1022 |
return ret; |
1018 | 1023 |
} |
1019 | 1024 |
|
... | ... | |
1028 | 1033 |
va_end(ap); |
1029 | 1034 |
|
1030 | 1035 |
ret = ioctl(s->vmfd, type, arg); |
1031 |
if (ret == -1) |
|
1036 |
if (ret == -1) {
|
|
1032 | 1037 |
ret = -errno; |
1033 |
|
|
1038 |
} |
|
1034 | 1039 |
return ret; |
1035 | 1040 |
} |
1036 | 1041 |
|
... | ... | |
1045 | 1050 |
va_end(ap); |
1046 | 1051 |
|
1047 | 1052 |
ret = ioctl(env->kvm_fd, type, arg); |
1048 |
if (ret == -1) |
|
1053 |
if (ret == -1) {
|
|
1049 | 1054 |
ret = -errno; |
1050 |
|
|
1055 |
} |
|
1051 | 1056 |
return ret; |
1052 | 1057 |
} |
1053 | 1058 |
|
... | ... | |
1116 | 1121 |
struct kvm_sw_breakpoint *bp; |
1117 | 1122 |
|
1118 | 1123 |
QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { |
1119 |
if (bp->pc == pc) |
|
1124 |
if (bp->pc == pc) {
|
|
1120 | 1125 |
return bp; |
1126 |
} |
|
1121 | 1127 |
} |
1122 | 1128 |
return NULL; |
1123 | 1129 |
} |
... | ... | |
1172 | 1178 |
} |
1173 | 1179 |
|
1174 | 1180 |
bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); |
1175 |
if (!bp) |
|
1181 |
if (!bp) {
|
|
1176 | 1182 |
return -ENOMEM; |
1183 |
} |
|
1177 | 1184 |
|
1178 | 1185 |
bp->pc = addr; |
1179 | 1186 |
bp->use_count = 1; |
... | ... | |
1187 | 1194 |
bp, entry); |
1188 | 1195 |
} else { |
1189 | 1196 |
err = kvm_arch_insert_hw_breakpoint(addr, len, type); |
1190 |
if (err) |
|
1197 |
if (err) {
|
|
1191 | 1198 |
return err; |
1199 |
} |
|
1192 | 1200 |
} |
1193 | 1201 |
|
1194 | 1202 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1195 | 1203 |
err = kvm_update_guest_debug(env, 0); |
1196 |
if (err) |
|
1204 |
if (err) {
|
|
1197 | 1205 |
return err; |
1206 |
} |
|
1198 | 1207 |
} |
1199 | 1208 |
return 0; |
1200 | 1209 |
} |
... | ... | |
1208 | 1217 |
|
1209 | 1218 |
if (type == GDB_BREAKPOINT_SW) { |
1210 | 1219 |
bp = kvm_find_sw_breakpoint(current_env, addr); |
1211 |
if (!bp) |
|
1220 |
if (!bp) {
|
|
1212 | 1221 |
return -ENOENT; |
1222 |
} |
|
1213 | 1223 |
|
1214 | 1224 |
if (bp->use_count > 1) { |
1215 | 1225 |
bp->use_count--; |
... | ... | |
1217 | 1227 |
} |
1218 | 1228 |
|
1219 | 1229 |
err = kvm_arch_remove_sw_breakpoint(current_env, bp); |
1220 |
if (err) |
|
1230 |
if (err) {
|
|
1221 | 1231 |
return err; |
1232 |
} |
|
1222 | 1233 |
|
1223 | 1234 |
QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); |
1224 | 1235 |
qemu_free(bp); |
1225 | 1236 |
} else { |
1226 | 1237 |
err = kvm_arch_remove_hw_breakpoint(addr, len, type); |
1227 |
if (err) |
|
1238 |
if (err) {
|
|
1228 | 1239 |
return err; |
1240 |
} |
|
1229 | 1241 |
} |
1230 | 1242 |
|
1231 | 1243 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1232 | 1244 |
err = kvm_update_guest_debug(env, 0); |
1233 |
if (err) |
|
1245 |
if (err) {
|
|
1234 | 1246 |
return err; |
1247 |
} |
|
1235 | 1248 |
} |
1236 | 1249 |
return 0; |
1237 | 1250 |
} |
... | ... | |
1246 | 1259 |
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { |
1247 | 1260 |
/* Try harder to find a CPU that currently sees the breakpoint. */ |
1248 | 1261 |
for (env = first_cpu; env != NULL; env = env->next_cpu) { |
1249 |
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) |
|
1262 |
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
|
|
1250 | 1263 |
break; |
1264 |
} |
|
1251 | 1265 |
} |
1252 | 1266 |
} |
1253 | 1267 |
} |
1254 | 1268 |
kvm_arch_remove_all_hw_breakpoints(); |
1255 | 1269 |
|
1256 |
for (env = first_cpu; env != NULL; env = env->next_cpu) |
|
1270 |
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
|
1257 | 1271 |
kvm_update_guest_debug(env, 0); |
1272 |
} |
|
1258 | 1273 |
} |
1259 | 1274 |
|
1260 | 1275 |
#else /* !KVM_CAP_SET_GUEST_DEBUG */ |
... | ... | |
1286 | 1301 |
struct kvm_signal_mask *sigmask; |
1287 | 1302 |
int r; |
1288 | 1303 |
|
1289 |
if (!sigset) |
|
1304 |
if (!sigset) {
|
|
1290 | 1305 |
return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); |
1306 |
} |
|
1291 | 1307 |
|
1292 | 1308 |
sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset)); |
1293 | 1309 |
|
... | ... | |
1342 | 1358 |
.fd = fd, |
1343 | 1359 |
}; |
1344 | 1360 |
int r; |
1345 |
if (!kvm_enabled()) |
|
1361 |
if (!kvm_enabled()) {
|
|
1346 | 1362 |
return -ENOSYS; |
1347 |
if (!assign) |
|
1363 |
} |
|
1364 |
if (!assign) { |
|
1348 | 1365 |
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
1366 |
} |
|
1349 | 1367 |
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
1350 |
if (r < 0) |
|
1368 |
if (r < 0) {
|
|
1351 | 1369 |
return r; |
1370 |
} |
|
1352 | 1371 |
return 0; |
1353 | 1372 |
#else |
1354 | 1373 |
return -ENOSYS; |
Also available in: Unified diff