root / target-i386 / kvm.c @ d362e757
History | View | Annotate | Download (57 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright (C) 2006-2008 Qumranet Technologies
|
5 |
* Copyright IBM, Corp. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
11 |
* See the COPYING file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include <sys/types.h> |
16 |
#include <sys/ioctl.h> |
17 |
#include <sys/mman.h> |
18 |
#include <sys/utsname.h> |
19 |
|
20 |
#include <linux/kvm.h> |
21 |
#include <linux/kvm_para.h> |
22 |
|
23 |
#include "qemu-common.h" |
24 |
#include "sysemu.h" |
25 |
#include "kvm.h" |
26 |
#include "cpu.h" |
27 |
#include "gdbstub.h" |
28 |
#include "host-utils.h" |
29 |
#include "hw/pc.h" |
30 |
#include "hw/apic.h" |
31 |
#include "ioport.h" |
32 |
#include "hyperv.h" |
33 |
|
34 |
//#define DEBUG_KVM
|
35 |
|
36 |
#ifdef DEBUG_KVM
|
37 |
#define DPRINTF(fmt, ...) \
|
38 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
39 |
#else
|
40 |
#define DPRINTF(fmt, ...) \
|
41 |
do { } while (0) |
42 |
#endif
|
43 |
|
44 |
#define MSR_KVM_WALL_CLOCK 0x11 |
45 |
#define MSR_KVM_SYSTEM_TIME 0x12 |
46 |
|
47 |
#ifndef BUS_MCEERR_AR
|
48 |
#define BUS_MCEERR_AR 4 |
49 |
#endif
|
50 |
#ifndef BUS_MCEERR_AO
|
51 |
#define BUS_MCEERR_AO 5 |
52 |
#endif
|
53 |
|
54 |
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
55 |
KVM_CAP_INFO(SET_TSS_ADDR), |
56 |
KVM_CAP_INFO(EXT_CPUID), |
57 |
KVM_CAP_INFO(MP_STATE), |
58 |
KVM_CAP_LAST_INFO |
59 |
}; |
60 |
|
61 |
static bool has_msr_star; |
62 |
static bool has_msr_hsave_pa; |
63 |
static bool has_msr_tsc_deadline; |
64 |
static bool has_msr_async_pf_en; |
65 |
static bool has_msr_misc_enable; |
66 |
static int lm_capable_kernel; |
67 |
|
68 |
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) |
69 |
{ |
70 |
struct kvm_cpuid2 *cpuid;
|
71 |
int r, size;
|
72 |
|
73 |
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
74 |
cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
|
75 |
cpuid->nent = max; |
76 |
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); |
77 |
if (r == 0 && cpuid->nent >= max) { |
78 |
r = -E2BIG; |
79 |
} |
80 |
if (r < 0) { |
81 |
if (r == -E2BIG) {
|
82 |
g_free(cpuid); |
83 |
return NULL; |
84 |
} else {
|
85 |
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
86 |
strerror(-r)); |
87 |
exit(1);
|
88 |
} |
89 |
} |
90 |
return cpuid;
|
91 |
} |
92 |
|
93 |
struct kvm_para_features {
|
94 |
int cap;
|
95 |
int feature;
|
96 |
} para_features[] = { |
97 |
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, |
98 |
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, |
99 |
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, |
100 |
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF }, |
101 |
{ -1, -1 } |
102 |
}; |
103 |
|
104 |
static int get_para_features(KVMState *s) |
105 |
{ |
106 |
int i, features = 0; |
107 |
|
108 |
for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) { |
109 |
if (kvm_check_extension(s, para_features[i].cap)) {
|
110 |
features |= (1 << para_features[i].feature);
|
111 |
} |
112 |
} |
113 |
|
114 |
return features;
|
115 |
} |
116 |
|
117 |
|
118 |
uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, |
119 |
uint32_t index, int reg)
|
120 |
{ |
121 |
struct kvm_cpuid2 *cpuid;
|
122 |
int i, max;
|
123 |
uint32_t ret = 0;
|
124 |
uint32_t cpuid_1_edx; |
125 |
int has_kvm_features = 0; |
126 |
|
127 |
max = 1;
|
128 |
while ((cpuid = try_get_cpuid(s, max)) == NULL) { |
129 |
max *= 2;
|
130 |
} |
131 |
|
132 |
for (i = 0; i < cpuid->nent; ++i) { |
133 |
if (cpuid->entries[i].function == function &&
|
134 |
cpuid->entries[i].index == index) { |
135 |
if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
|
136 |
has_kvm_features = 1;
|
137 |
} |
138 |
switch (reg) {
|
139 |
case R_EAX:
|
140 |
ret = cpuid->entries[i].eax; |
141 |
break;
|
142 |
case R_EBX:
|
143 |
ret = cpuid->entries[i].ebx; |
144 |
break;
|
145 |
case R_ECX:
|
146 |
ret = cpuid->entries[i].ecx; |
147 |
break;
|
148 |
case R_EDX:
|
149 |
ret = cpuid->entries[i].edx; |
150 |
switch (function) {
|
151 |
case 1: |
152 |
/* KVM before 2.6.30 misreports the following features */
|
153 |
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; |
154 |
break;
|
155 |
case 0x80000001: |
156 |
/* On Intel, kvm returns cpuid according to the Intel spec,
|
157 |
* so add missing bits according to the AMD spec:
|
158 |
*/
|
159 |
cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); |
160 |
ret |= cpuid_1_edx & 0x183f7ff;
|
161 |
break;
|
162 |
} |
163 |
break;
|
164 |
} |
165 |
} |
166 |
} |
167 |
|
168 |
g_free(cpuid); |
169 |
|
170 |
/* fallback for older kernels */
|
171 |
if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
|
172 |
ret = get_para_features(s); |
173 |
} |
174 |
|
175 |
return ret;
|
176 |
} |
177 |
|
178 |
typedef struct HWPoisonPage { |
179 |
ram_addr_t ram_addr; |
180 |
QLIST_ENTRY(HWPoisonPage) list; |
181 |
} HWPoisonPage; |
182 |
|
183 |
static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
|
184 |
QLIST_HEAD_INITIALIZER(hwpoison_page_list); |
185 |
|
186 |
static void kvm_unpoison_all(void *param) |
187 |
{ |
188 |
HWPoisonPage *page, *next_page; |
189 |
|
190 |
QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { |
191 |
QLIST_REMOVE(page, list); |
192 |
qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); |
193 |
g_free(page); |
194 |
} |
195 |
} |
196 |
|
197 |
static void kvm_hwpoison_page_add(ram_addr_t ram_addr) |
198 |
{ |
199 |
HWPoisonPage *page; |
200 |
|
201 |
QLIST_FOREACH(page, &hwpoison_page_list, list) { |
202 |
if (page->ram_addr == ram_addr) {
|
203 |
return;
|
204 |
} |
205 |
} |
206 |
page = g_malloc(sizeof(HWPoisonPage));
|
207 |
page->ram_addr = ram_addr; |
208 |
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); |
209 |
} |
210 |
|
211 |
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, |
212 |
int *max_banks)
|
213 |
{ |
214 |
int r;
|
215 |
|
216 |
r = kvm_check_extension(s, KVM_CAP_MCE); |
217 |
if (r > 0) { |
218 |
*max_banks = r; |
219 |
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
220 |
} |
221 |
return -ENOSYS;
|
222 |
} |
223 |
|
224 |
static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code) |
225 |
{ |
226 |
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | |
227 |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; |
228 |
uint64_t mcg_status = MCG_STATUS_MCIP; |
229 |
|
230 |
if (code == BUS_MCEERR_AR) {
|
231 |
status |= MCI_STATUS_AR | 0x134;
|
232 |
mcg_status |= MCG_STATUS_EIPV; |
233 |
} else {
|
234 |
status |= 0xc0;
|
235 |
mcg_status |= MCG_STATUS_RIPV; |
236 |
} |
237 |
cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr, |
238 |
(MCM_ADDR_PHYS << 6) | 0xc, |
239 |
cpu_x86_support_mca_broadcast(env) ? |
240 |
MCE_INJECT_BROADCAST : 0);
|
241 |
} |
242 |
|
243 |
static void hardware_memory_error(void) |
244 |
{ |
245 |
fprintf(stderr, "Hardware memory error!\n");
|
246 |
exit(1);
|
247 |
} |
248 |
|
249 |
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr) |
250 |
{ |
251 |
ram_addr_t ram_addr; |
252 |
target_phys_addr_t paddr; |
253 |
|
254 |
if ((env->mcg_cap & MCG_SER_P) && addr
|
255 |
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) { |
256 |
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
|
257 |
!kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) { |
258 |
fprintf(stderr, "Hardware memory error for memory used by "
|
259 |
"QEMU itself instead of guest system!\n");
|
260 |
/* Hope we are lucky for AO MCE */
|
261 |
if (code == BUS_MCEERR_AO) {
|
262 |
return 0; |
263 |
} else {
|
264 |
hardware_memory_error(); |
265 |
} |
266 |
} |
267 |
kvm_hwpoison_page_add(ram_addr); |
268 |
kvm_mce_inject(env, paddr, code); |
269 |
} else {
|
270 |
if (code == BUS_MCEERR_AO) {
|
271 |
return 0; |
272 |
} else if (code == BUS_MCEERR_AR) { |
273 |
hardware_memory_error(); |
274 |
} else {
|
275 |
return 1; |
276 |
} |
277 |
} |
278 |
return 0; |
279 |
} |
280 |
|
281 |
int kvm_arch_on_sigbus(int code, void *addr) |
282 |
{ |
283 |
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
284 |
ram_addr_t ram_addr; |
285 |
target_phys_addr_t paddr; |
286 |
|
287 |
/* Hope we are lucky for AO MCE */
|
288 |
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
|
289 |
!kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr, |
290 |
&paddr)) { |
291 |
fprintf(stderr, "Hardware memory error for memory used by "
|
292 |
"QEMU itself instead of guest system!: %p\n", addr);
|
293 |
return 0; |
294 |
} |
295 |
kvm_hwpoison_page_add(ram_addr); |
296 |
kvm_mce_inject(first_cpu, paddr, code); |
297 |
} else {
|
298 |
if (code == BUS_MCEERR_AO) {
|
299 |
return 0; |
300 |
} else if (code == BUS_MCEERR_AR) { |
301 |
hardware_memory_error(); |
302 |
} else {
|
303 |
return 1; |
304 |
} |
305 |
} |
306 |
return 0; |
307 |
} |
308 |
|
309 |
static int kvm_inject_mce_oldstyle(CPUState *env) |
310 |
{ |
311 |
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
|
312 |
unsigned int bank, bank_num = env->mcg_cap & 0xff; |
313 |
struct kvm_x86_mce mce;
|
314 |
|
315 |
env->exception_injected = -1;
|
316 |
|
317 |
/*
|
318 |
* There must be at least one bank in use if an MCE is pending.
|
319 |
* Find it and use its values for the event injection.
|
320 |
*/
|
321 |
for (bank = 0; bank < bank_num; bank++) { |
322 |
if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { |
323 |
break;
|
324 |
} |
325 |
} |
326 |
assert(bank < bank_num); |
327 |
|
328 |
mce.bank = bank; |
329 |
mce.status = env->mce_banks[bank * 4 + 1]; |
330 |
mce.mcg_status = env->mcg_status; |
331 |
mce.addr = env->mce_banks[bank * 4 + 2]; |
332 |
mce.misc = env->mce_banks[bank * 4 + 3]; |
333 |
|
334 |
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
|
335 |
} |
336 |
return 0; |
337 |
} |
338 |
|
339 |
static void cpu_update_state(void *opaque, int running, RunState state) |
340 |
{ |
341 |
CPUState *env = opaque; |
342 |
|
343 |
if (running) {
|
344 |
env->tsc_valid = false;
|
345 |
} |
346 |
} |
347 |
|
348 |
int kvm_arch_init_vcpu(CPUState *env)
|
349 |
{ |
350 |
struct {
|
351 |
struct kvm_cpuid2 cpuid;
|
352 |
struct kvm_cpuid_entry2 entries[100]; |
353 |
} QEMU_PACKED cpuid_data; |
354 |
KVMState *s = env->kvm_state; |
355 |
uint32_t limit, i, j, cpuid_i; |
356 |
uint32_t unused; |
357 |
struct kvm_cpuid_entry2 *c;
|
358 |
uint32_t signature[3];
|
359 |
int r;
|
360 |
|
361 |
env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); |
362 |
|
363 |
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR; |
364 |
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX); |
365 |
env->cpuid_ext_features |= i; |
366 |
|
367 |
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
|
368 |
0, R_EDX);
|
369 |
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
|
370 |
0, R_ECX);
|
371 |
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
|
372 |
0, R_EDX);
|
373 |
|
374 |
cpuid_i = 0;
|
375 |
|
376 |
/* Paravirtualization CPUIDs */
|
377 |
c = &cpuid_data.entries[cpuid_i++]; |
378 |
memset(c, 0, sizeof(*c)); |
379 |
c->function = KVM_CPUID_SIGNATURE; |
380 |
if (!hyperv_enabled()) {
|
381 |
memcpy(signature, "KVMKVMKVM\0\0\0", 12); |
382 |
c->eax = 0;
|
383 |
} else {
|
384 |
memcpy(signature, "Microsoft Hv", 12); |
385 |
c->eax = HYPERV_CPUID_MIN; |
386 |
} |
387 |
c->ebx = signature[0];
|
388 |
c->ecx = signature[1];
|
389 |
c->edx = signature[2];
|
390 |
|
391 |
c = &cpuid_data.entries[cpuid_i++]; |
392 |
memset(c, 0, sizeof(*c)); |
393 |
c->function = KVM_CPUID_FEATURES; |
394 |
c->eax = env->cpuid_kvm_features & |
395 |
kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
|
396 |
|
397 |
if (hyperv_enabled()) {
|
398 |
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); |
399 |
c->eax = signature[0];
|
400 |
|
401 |
c = &cpuid_data.entries[cpuid_i++]; |
402 |
memset(c, 0, sizeof(*c)); |
403 |
c->function = HYPERV_CPUID_VERSION; |
404 |
c->eax = 0x00001bbc;
|
405 |
c->ebx = 0x00060001;
|
406 |
|
407 |
c = &cpuid_data.entries[cpuid_i++]; |
408 |
memset(c, 0, sizeof(*c)); |
409 |
c->function = HYPERV_CPUID_FEATURES; |
410 |
if (hyperv_relaxed_timing_enabled()) {
|
411 |
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; |
412 |
} |
413 |
if (hyperv_vapic_recommended()) {
|
414 |
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; |
415 |
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; |
416 |
} |
417 |
|
418 |
c = &cpuid_data.entries[cpuid_i++]; |
419 |
memset(c, 0, sizeof(*c)); |
420 |
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; |
421 |
if (hyperv_relaxed_timing_enabled()) {
|
422 |
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; |
423 |
} |
424 |
if (hyperv_vapic_recommended()) {
|
425 |
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; |
426 |
} |
427 |
c->ebx = hyperv_get_spinlock_retries(); |
428 |
|
429 |
c = &cpuid_data.entries[cpuid_i++]; |
430 |
memset(c, 0, sizeof(*c)); |
431 |
c->function = HYPERV_CPUID_IMPLEMENT_LIMITS; |
432 |
c->eax = 0x40;
|
433 |
c->ebx = 0x40;
|
434 |
|
435 |
c = &cpuid_data.entries[cpuid_i++]; |
436 |
memset(c, 0, sizeof(*c)); |
437 |
c->function = KVM_CPUID_SIGNATURE_NEXT; |
438 |
memcpy(signature, "KVMKVMKVM\0\0\0", 12); |
439 |
c->eax = 0;
|
440 |
c->ebx = signature[0];
|
441 |
c->ecx = signature[1];
|
442 |
c->edx = signature[2];
|
443 |
} |
444 |
|
445 |
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
|
446 |
|
447 |
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
448 |
|
449 |
for (i = 0; i <= limit; i++) { |
450 |
c = &cpuid_data.entries[cpuid_i++]; |
451 |
|
452 |
switch (i) {
|
453 |
case 2: { |
454 |
/* Keep reading function 2 till all the input is received */
|
455 |
int times;
|
456 |
|
457 |
c->function = i; |
458 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
459 |
KVM_CPUID_FLAG_STATE_READ_NEXT; |
460 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
461 |
times = c->eax & 0xff;
|
462 |
|
463 |
for (j = 1; j < times; ++j) { |
464 |
c = &cpuid_data.entries[cpuid_i++]; |
465 |
c->function = i; |
466 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
467 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
468 |
} |
469 |
break;
|
470 |
} |
471 |
case 4: |
472 |
case 0xb: |
473 |
case 0xd: |
474 |
for (j = 0; ; j++) { |
475 |
if (i == 0xd && j == 64) { |
476 |
break;
|
477 |
} |
478 |
c->function = i; |
479 |
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
480 |
c->index = j; |
481 |
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
482 |
|
483 |
if (i == 4 && c->eax == 0) { |
484 |
break;
|
485 |
} |
486 |
if (i == 0xb && !(c->ecx & 0xff00)) { |
487 |
break;
|
488 |
} |
489 |
if (i == 0xd && c->eax == 0) { |
490 |
continue;
|
491 |
} |
492 |
c = &cpuid_data.entries[cpuid_i++]; |
493 |
} |
494 |
break;
|
495 |
default:
|
496 |
c->function = i; |
497 |
c->flags = 0;
|
498 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
499 |
break;
|
500 |
} |
501 |
} |
502 |
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
503 |
|
504 |
for (i = 0x80000000; i <= limit; i++) { |
505 |
c = &cpuid_data.entries[cpuid_i++]; |
506 |
|
507 |
c->function = i; |
508 |
c->flags = 0;
|
509 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
510 |
} |
511 |
|
512 |
/* Call Centaur's CPUID instructions they are supported. */
|
513 |
if (env->cpuid_xlevel2 > 0) { |
514 |
env->cpuid_ext4_features &= |
515 |
kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); |
516 |
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); |
517 |
|
518 |
for (i = 0xC0000000; i <= limit; i++) { |
519 |
c = &cpuid_data.entries[cpuid_i++]; |
520 |
|
521 |
c->function = i; |
522 |
c->flags = 0;
|
523 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
524 |
} |
525 |
} |
526 |
|
527 |
cpuid_data.cpuid.nent = cpuid_i; |
528 |
|
529 |
if (((env->cpuid_version >> 8)&0xF) >= 6 |
530 |
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA) |
531 |
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
|
532 |
uint64_t mcg_cap; |
533 |
int banks;
|
534 |
int ret;
|
535 |
|
536 |
ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks); |
537 |
if (ret < 0) { |
538 |
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
|
539 |
return ret;
|
540 |
} |
541 |
|
542 |
if (banks > MCE_BANKS_DEF) {
|
543 |
banks = MCE_BANKS_DEF; |
544 |
} |
545 |
mcg_cap &= MCE_CAP_DEF; |
546 |
mcg_cap |= banks; |
547 |
ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap); |
548 |
if (ret < 0) { |
549 |
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
|
550 |
return ret;
|
551 |
} |
552 |
|
553 |
env->mcg_cap = mcg_cap; |
554 |
} |
555 |
|
556 |
qemu_add_vm_change_state_handler(cpu_update_state, env); |
557 |
|
558 |
r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data); |
559 |
if (r) {
|
560 |
return r;
|
561 |
} |
562 |
|
563 |
r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL); |
564 |
if (r && env->tsc_khz) {
|
565 |
r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz); |
566 |
if (r < 0) { |
567 |
fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
|
568 |
return r;
|
569 |
} |
570 |
} |
571 |
|
572 |
if (kvm_has_xsave()) {
|
573 |
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
574 |
} |
575 |
|
576 |
return 0; |
577 |
} |
578 |
|
579 |
void kvm_arch_reset_vcpu(CPUState *env)
|
580 |
{ |
581 |
env->exception_injected = -1;
|
582 |
env->interrupt_injected = -1;
|
583 |
env->xcr0 = 1;
|
584 |
if (kvm_irqchip_in_kernel()) {
|
585 |
env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE : |
586 |
KVM_MP_STATE_UNINITIALIZED; |
587 |
} else {
|
588 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
589 |
} |
590 |
} |
591 |
|
592 |
static int kvm_get_supported_msrs(KVMState *s) |
593 |
{ |
594 |
static int kvm_supported_msrs; |
595 |
int ret = 0; |
596 |
|
597 |
/* first time */
|
598 |
if (kvm_supported_msrs == 0) { |
599 |
struct kvm_msr_list msr_list, *kvm_msr_list;
|
600 |
|
601 |
kvm_supported_msrs = -1;
|
602 |
|
603 |
/* Obtain MSR list from KVM. These are the MSRs that we must
|
604 |
* save/restore */
|
605 |
msr_list.nmsrs = 0;
|
606 |
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); |
607 |
if (ret < 0 && ret != -E2BIG) { |
608 |
return ret;
|
609 |
} |
610 |
/* Old kernel modules had a bug and could write beyond the provided
|
611 |
memory. Allocate at least a safe amount of 1K. */
|
612 |
kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + |
613 |
msr_list.nmsrs * |
614 |
sizeof(msr_list.indices[0]))); |
615 |
|
616 |
kvm_msr_list->nmsrs = msr_list.nmsrs; |
617 |
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
618 |
if (ret >= 0) { |
619 |
int i;
|
620 |
|
621 |
for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
622 |
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
623 |
has_msr_star = true;
|
624 |
continue;
|
625 |
} |
626 |
if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
|
627 |
has_msr_hsave_pa = true;
|
628 |
continue;
|
629 |
} |
630 |
if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
|
631 |
has_msr_tsc_deadline = true;
|
632 |
continue;
|
633 |
} |
634 |
if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
|
635 |
has_msr_misc_enable = true;
|
636 |
continue;
|
637 |
} |
638 |
} |
639 |
} |
640 |
|
641 |
g_free(kvm_msr_list); |
642 |
} |
643 |
|
644 |
return ret;
|
645 |
} |
646 |
|
647 |
int kvm_arch_init(KVMState *s)
|
648 |
{ |
649 |
QemuOptsList *list = qemu_find_opts("machine");
|
650 |
uint64_t identity_base = 0xfffbc000;
|
651 |
uint64_t shadow_mem; |
652 |
int ret;
|
653 |
struct utsname utsname;
|
654 |
|
655 |
ret = kvm_get_supported_msrs(s); |
656 |
if (ret < 0) { |
657 |
return ret;
|
658 |
} |
659 |
|
660 |
uname(&utsname); |
661 |
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; |
662 |
|
663 |
/*
|
664 |
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
|
665 |
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
|
666 |
* Since these must be part of guest physical memory, we need to allocate
|
667 |
* them, both by setting their start addresses in the kernel and by
|
668 |
* creating a corresponding e820 entry. We need 4 pages before the BIOS.
|
669 |
*
|
670 |
* Older KVM versions may not support setting the identity map base. In
|
671 |
* that case we need to stick with the default, i.e. a 256K maximum BIOS
|
672 |
* size.
|
673 |
*/
|
674 |
if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
675 |
/* Allows up to 16M BIOSes. */
|
676 |
identity_base = 0xfeffc000;
|
677 |
|
678 |
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); |
679 |
if (ret < 0) { |
680 |
return ret;
|
681 |
} |
682 |
} |
683 |
|
684 |
/* Set TSS base one page after EPT identity map. */
|
685 |
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
|
686 |
if (ret < 0) { |
687 |
return ret;
|
688 |
} |
689 |
|
690 |
/* Tell fw_cfg to notify the BIOS to reserve the range. */
|
691 |
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
|
692 |
if (ret < 0) { |
693 |
fprintf(stderr, "e820_add_entry() table is full\n");
|
694 |
return ret;
|
695 |
} |
696 |
qemu_register_reset(kvm_unpoison_all, NULL);
|
697 |
|
698 |
if (!QTAILQ_EMPTY(&list->head)) {
|
699 |
shadow_mem = qemu_opt_get_size(QTAILQ_FIRST(&list->head), |
700 |
"kvm_shadow_mem", -1); |
701 |
if (shadow_mem != -1) { |
702 |
shadow_mem /= 4096;
|
703 |
ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); |
704 |
if (ret < 0) { |
705 |
return ret;
|
706 |
} |
707 |
} |
708 |
} |
709 |
return 0; |
710 |
} |
711 |
|
712 |
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
713 |
{ |
714 |
lhs->selector = rhs->selector; |
715 |
lhs->base = rhs->base; |
716 |
lhs->limit = rhs->limit; |
717 |
lhs->type = 3;
|
718 |
lhs->present = 1;
|
719 |
lhs->dpl = 3;
|
720 |
lhs->db = 0;
|
721 |
lhs->s = 1;
|
722 |
lhs->l = 0;
|
723 |
lhs->g = 0;
|
724 |
lhs->avl = 0;
|
725 |
lhs->unusable = 0;
|
726 |
} |
727 |
|
728 |
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
729 |
{ |
730 |
unsigned flags = rhs->flags;
|
731 |
lhs->selector = rhs->selector; |
732 |
lhs->base = rhs->base; |
733 |
lhs->limit = rhs->limit; |
734 |
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
735 |
lhs->present = (flags & DESC_P_MASK) != 0;
|
736 |
lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
|
737 |
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
738 |
lhs->s = (flags & DESC_S_MASK) != 0;
|
739 |
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
740 |
lhs->g = (flags & DESC_G_MASK) != 0;
|
741 |
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
742 |
lhs->unusable = 0;
|
743 |
} |
744 |
|
745 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
746 |
{ |
747 |
lhs->selector = rhs->selector; |
748 |
lhs->base = rhs->base; |
749 |
lhs->limit = rhs->limit; |
750 |
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | |
751 |
(rhs->present * DESC_P_MASK) | |
752 |
(rhs->dpl << DESC_DPL_SHIFT) | |
753 |
(rhs->db << DESC_B_SHIFT) | |
754 |
(rhs->s * DESC_S_MASK) | |
755 |
(rhs->l << DESC_L_SHIFT) | |
756 |
(rhs->g * DESC_G_MASK) | |
757 |
(rhs->avl * DESC_AVL_MASK); |
758 |
} |
759 |
|
760 |
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
761 |
{ |
762 |
if (set) {
|
763 |
*kvm_reg = *qemu_reg; |
764 |
} else {
|
765 |
*qemu_reg = *kvm_reg; |
766 |
} |
767 |
} |
768 |
|
769 |
static int kvm_getput_regs(CPUState *env, int set) |
770 |
{ |
771 |
struct kvm_regs regs;
|
772 |
int ret = 0; |
773 |
|
774 |
if (!set) {
|
775 |
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
776 |
if (ret < 0) { |
777 |
return ret;
|
778 |
} |
779 |
} |
780 |
|
781 |
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
782 |
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
783 |
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
784 |
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
785 |
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
786 |
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
787 |
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
788 |
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
789 |
#ifdef TARGET_X86_64
|
790 |
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
791 |
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
792 |
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
793 |
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
794 |
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
795 |
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
796 |
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
797 |
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
798 |
#endif
|
799 |
|
800 |
kvm_getput_reg(®s.rflags, &env->eflags, set); |
801 |
kvm_getput_reg(®s.rip, &env->eip, set); |
802 |
|
803 |
if (set) {
|
804 |
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
805 |
} |
806 |
|
807 |
return ret;
|
808 |
} |
809 |
|
810 |
static int kvm_put_fpu(CPUState *env) |
811 |
{ |
812 |
struct kvm_fpu fpu;
|
813 |
int i;
|
814 |
|
815 |
memset(&fpu, 0, sizeof fpu); |
816 |
fpu.fsw = env->fpus & ~(7 << 11); |
817 |
fpu.fsw |= (env->fpstt & 7) << 11; |
818 |
fpu.fcw = env->fpuc; |
819 |
fpu.last_opcode = env->fpop; |
820 |
fpu.last_ip = env->fpip; |
821 |
fpu.last_dp = env->fpdp; |
822 |
for (i = 0; i < 8; ++i) { |
823 |
fpu.ftwx |= (!env->fptags[i]) << i; |
824 |
} |
825 |
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
826 |
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
827 |
fpu.mxcsr = env->mxcsr; |
828 |
|
829 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
830 |
} |
831 |
|
832 |
#define XSAVE_FCW_FSW 0 |
833 |
#define XSAVE_FTW_FOP 1 |
834 |
#define XSAVE_CWD_RIP 2 |
835 |
#define XSAVE_CWD_RDP 4 |
836 |
#define XSAVE_MXCSR 6 |
837 |
#define XSAVE_ST_SPACE 8 |
838 |
#define XSAVE_XMM_SPACE 40 |
839 |
#define XSAVE_XSTATE_BV 128 |
840 |
#define XSAVE_YMMH_SPACE 144 |
841 |
|
842 |
static int kvm_put_xsave(CPUState *env) |
843 |
{ |
844 |
struct kvm_xsave* xsave = env->kvm_xsave_buf;
|
845 |
uint16_t cwd, swd, twd; |
846 |
int i, r;
|
847 |
|
848 |
if (!kvm_has_xsave()) {
|
849 |
return kvm_put_fpu(env);
|
850 |
} |
851 |
|
852 |
memset(xsave, 0, sizeof(struct kvm_xsave)); |
853 |
twd = 0;
|
854 |
swd = env->fpus & ~(7 << 11); |
855 |
swd |= (env->fpstt & 7) << 11; |
856 |
cwd = env->fpuc; |
857 |
for (i = 0; i < 8; ++i) { |
858 |
twd |= (!env->fptags[i]) << i; |
859 |
} |
860 |
xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
|
861 |
xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
|
862 |
memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
|
863 |
memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
|
864 |
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, |
865 |
sizeof env->fpregs);
|
866 |
memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, |
867 |
sizeof env->xmm_regs);
|
868 |
xsave->region[XSAVE_MXCSR] = env->mxcsr; |
869 |
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; |
870 |
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, |
871 |
sizeof env->ymmh_regs);
|
872 |
r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); |
873 |
return r;
|
874 |
} |
875 |
|
876 |
static int kvm_put_xcrs(CPUState *env) |
877 |
{ |
878 |
struct kvm_xcrs xcrs;
|
879 |
|
880 |
if (!kvm_has_xcrs()) {
|
881 |
return 0; |
882 |
} |
883 |
|
884 |
xcrs.nr_xcrs = 1;
|
885 |
xcrs.flags = 0;
|
886 |
xcrs.xcrs[0].xcr = 0; |
887 |
xcrs.xcrs[0].value = env->xcr0;
|
888 |
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
|
889 |
} |
890 |
|
891 |
static int kvm_put_sregs(CPUState *env) |
892 |
{ |
893 |
struct kvm_sregs sregs;
|
894 |
|
895 |
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); |
896 |
if (env->interrupt_injected >= 0) { |
897 |
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
898 |
(uint64_t)1 << (env->interrupt_injected % 64); |
899 |
} |
900 |
|
901 |
if ((env->eflags & VM_MASK)) {
|
902 |
set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
903 |
set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
904 |
set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
905 |
set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
906 |
set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
907 |
set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
908 |
} else {
|
909 |
set_seg(&sregs.cs, &env->segs[R_CS]); |
910 |
set_seg(&sregs.ds, &env->segs[R_DS]); |
911 |
set_seg(&sregs.es, &env->segs[R_ES]); |
912 |
set_seg(&sregs.fs, &env->segs[R_FS]); |
913 |
set_seg(&sregs.gs, &env->segs[R_GS]); |
914 |
set_seg(&sregs.ss, &env->segs[R_SS]); |
915 |
} |
916 |
|
917 |
set_seg(&sregs.tr, &env->tr); |
918 |
set_seg(&sregs.ldt, &env->ldt); |
919 |
|
920 |
sregs.idt.limit = env->idt.limit; |
921 |
sregs.idt.base = env->idt.base; |
922 |
sregs.gdt.limit = env->gdt.limit; |
923 |
sregs.gdt.base = env->gdt.base; |
924 |
|
925 |
sregs.cr0 = env->cr[0];
|
926 |
sregs.cr2 = env->cr[2];
|
927 |
sregs.cr3 = env->cr[3];
|
928 |
sregs.cr4 = env->cr[4];
|
929 |
|
930 |
sregs.cr8 = cpu_get_apic_tpr(env->apic_state); |
931 |
sregs.apic_base = cpu_get_apic_base(env->apic_state); |
932 |
|
933 |
sregs.efer = env->efer; |
934 |
|
935 |
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
936 |
} |
937 |
|
938 |
static void kvm_msr_entry_set(struct kvm_msr_entry *entry, |
939 |
uint32_t index, uint64_t value) |
940 |
{ |
941 |
entry->index = index; |
942 |
entry->data = value; |
943 |
} |
944 |
|
945 |
static int kvm_put_msrs(CPUState *env, int level) |
946 |
{ |
947 |
struct {
|
948 |
struct kvm_msrs info;
|
949 |
struct kvm_msr_entry entries[100]; |
950 |
} msr_data; |
951 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
952 |
int n = 0; |
953 |
|
954 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
955 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
956 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
957 |
kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat); |
958 |
if (has_msr_star) {
|
959 |
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); |
960 |
} |
961 |
if (has_msr_hsave_pa) {
|
962 |
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave); |
963 |
} |
964 |
if (has_msr_tsc_deadline) {
|
965 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline); |
966 |
} |
967 |
if (has_msr_misc_enable) {
|
968 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE, |
969 |
env->msr_ia32_misc_enable); |
970 |
} |
971 |
#ifdef TARGET_X86_64
|
972 |
if (lm_capable_kernel) {
|
973 |
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); |
974 |
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); |
975 |
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); |
976 |
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); |
977 |
} |
978 |
#endif
|
979 |
if (level == KVM_PUT_FULL_STATE) {
|
980 |
/*
|
981 |
* KVM is yet unable to synchronize TSC values of multiple VCPUs on
|
982 |
* writeback. Until this is fixed, we only write the offset to SMP
|
983 |
* guests after migration, desynchronizing the VCPUs, but avoiding
|
984 |
* huge jump-backs that would occur without any writeback at all.
|
985 |
*/
|
986 |
if (smp_cpus == 1 || env->tsc != 0) { |
987 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); |
988 |
} |
989 |
} |
990 |
/*
|
991 |
* The following paravirtual MSRs have side effects on the guest or are
|
992 |
* too heavy for normal writeback. Limit them to reset or full state
|
993 |
* updates.
|
994 |
*/
|
995 |
if (level >= KVM_PUT_RESET_STATE) {
|
996 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, |
997 |
env->system_time_msr); |
998 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); |
999 |
if (has_msr_async_pf_en) {
|
1000 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN, |
1001 |
env->async_pf_en_msr); |
1002 |
} |
1003 |
if (hyperv_hypercall_available()) {
|
1004 |
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
|
1005 |
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
|
1006 |
} |
1007 |
if (hyperv_vapic_recommended()) {
|
1008 |
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
|
1009 |
} |
1010 |
} |
1011 |
if (env->mcg_cap) {
|
1012 |
int i;
|
1013 |
|
1014 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); |
1015 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl); |
1016 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
1017 |
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]); |
1018 |
} |
1019 |
} |
1020 |
|
1021 |
msr_data.info.nmsrs = n; |
1022 |
|
1023 |
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
1024 |
|
1025 |
} |
1026 |
|
1027 |
|
1028 |
static int kvm_get_fpu(CPUState *env) |
1029 |
{ |
1030 |
struct kvm_fpu fpu;
|
1031 |
int i, ret;
|
1032 |
|
1033 |
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); |
1034 |
if (ret < 0) { |
1035 |
return ret;
|
1036 |
} |
1037 |
|
1038 |
env->fpstt = (fpu.fsw >> 11) & 7; |
1039 |
env->fpus = fpu.fsw; |
1040 |
env->fpuc = fpu.fcw; |
1041 |
env->fpop = fpu.last_opcode; |
1042 |
env->fpip = fpu.last_ip; |
1043 |
env->fpdp = fpu.last_dp; |
1044 |
for (i = 0; i < 8; ++i) { |
1045 |
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
1046 |
} |
1047 |
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
1048 |
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
1049 |
env->mxcsr = fpu.mxcsr; |
1050 |
|
1051 |
return 0; |
1052 |
} |
1053 |
|
1054 |
static int kvm_get_xsave(CPUState *env) |
1055 |
{ |
1056 |
struct kvm_xsave* xsave = env->kvm_xsave_buf;
|
1057 |
int ret, i;
|
1058 |
uint16_t cwd, swd, twd; |
1059 |
|
1060 |
if (!kvm_has_xsave()) {
|
1061 |
return kvm_get_fpu(env);
|
1062 |
} |
1063 |
|
1064 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); |
1065 |
if (ret < 0) { |
1066 |
return ret;
|
1067 |
} |
1068 |
|
1069 |
cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW]; |
1070 |
swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
|
1071 |
twd = (uint16_t)xsave->region[XSAVE_FTW_FOP]; |
1072 |
env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
|
1073 |
env->fpstt = (swd >> 11) & 7; |
1074 |
env->fpus = swd; |
1075 |
env->fpuc = cwd; |
1076 |
for (i = 0; i < 8; ++i) { |
1077 |
env->fptags[i] = !((twd >> i) & 1);
|
1078 |
} |
1079 |
memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
|
1080 |
memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
|
1081 |
env->mxcsr = xsave->region[XSAVE_MXCSR]; |
1082 |
memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], |
1083 |
sizeof env->fpregs);
|
1084 |
memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], |
1085 |
sizeof env->xmm_regs);
|
1086 |
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; |
1087 |
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], |
1088 |
sizeof env->ymmh_regs);
|
1089 |
return 0; |
1090 |
} |
1091 |
|
1092 |
static int kvm_get_xcrs(CPUState *env) |
1093 |
{ |
1094 |
int i, ret;
|
1095 |
struct kvm_xcrs xcrs;
|
1096 |
|
1097 |
if (!kvm_has_xcrs()) {
|
1098 |
return 0; |
1099 |
} |
1100 |
|
1101 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); |
1102 |
if (ret < 0) { |
1103 |
return ret;
|
1104 |
} |
1105 |
|
1106 |
for (i = 0; i < xcrs.nr_xcrs; i++) { |
1107 |
/* Only support xcr0 now */
|
1108 |
if (xcrs.xcrs[0].xcr == 0) { |
1109 |
env->xcr0 = xcrs.xcrs[0].value;
|
1110 |
break;
|
1111 |
} |
1112 |
} |
1113 |
return 0; |
1114 |
} |
1115 |
|
1116 |
static int kvm_get_sregs(CPUState *env) |
1117 |
{ |
1118 |
struct kvm_sregs sregs;
|
1119 |
uint32_t hflags; |
1120 |
int bit, i, ret;
|
1121 |
|
1122 |
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
1123 |
if (ret < 0) { |
1124 |
return ret;
|
1125 |
} |
1126 |
|
1127 |
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
1128 |
to find it and save its number instead (-1 for none). */
|
1129 |
env->interrupt_injected = -1;
|
1130 |
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { |
1131 |
if (sregs.interrupt_bitmap[i]) {
|
1132 |
bit = ctz64(sregs.interrupt_bitmap[i]); |
1133 |
env->interrupt_injected = i * 64 + bit;
|
1134 |
break;
|
1135 |
} |
1136 |
} |
1137 |
|
1138 |
get_seg(&env->segs[R_CS], &sregs.cs); |
1139 |
get_seg(&env->segs[R_DS], &sregs.ds); |
1140 |
get_seg(&env->segs[R_ES], &sregs.es); |
1141 |
get_seg(&env->segs[R_FS], &sregs.fs); |
1142 |
get_seg(&env->segs[R_GS], &sregs.gs); |
1143 |
get_seg(&env->segs[R_SS], &sregs.ss); |
1144 |
|
1145 |
get_seg(&env->tr, &sregs.tr); |
1146 |
get_seg(&env->ldt, &sregs.ldt); |
1147 |
|
1148 |
env->idt.limit = sregs.idt.limit; |
1149 |
env->idt.base = sregs.idt.base; |
1150 |
env->gdt.limit = sregs.gdt.limit; |
1151 |
env->gdt.base = sregs.gdt.base; |
1152 |
|
1153 |
env->cr[0] = sregs.cr0;
|
1154 |
env->cr[2] = sregs.cr2;
|
1155 |
env->cr[3] = sregs.cr3;
|
1156 |
env->cr[4] = sregs.cr4;
|
1157 |
|
1158 |
env->efer = sregs.efer; |
1159 |
|
1160 |
/* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
|
1161 |
|
1162 |
#define HFLAG_COPY_MASK \
|
1163 |
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
1164 |
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
1165 |
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
1166 |
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
1167 |
|
1168 |
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
1169 |
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
1170 |
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
1171 |
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
1172 |
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
1173 |
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
1174 |
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); |
1175 |
|
1176 |
if (env->efer & MSR_EFER_LMA) {
|
1177 |
hflags |= HF_LMA_MASK; |
1178 |
} |
1179 |
|
1180 |
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
1181 |
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
1182 |
} else {
|
1183 |
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
1184 |
(DESC_B_SHIFT - HF_CS32_SHIFT); |
1185 |
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
1186 |
(DESC_B_SHIFT - HF_SS32_SHIFT); |
1187 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || |
1188 |
!(hflags & HF_CS32_MASK)) { |
1189 |
hflags |= HF_ADDSEG_MASK; |
1190 |
} else {
|
1191 |
hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | |
1192 |
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
|
1193 |
} |
1194 |
} |
1195 |
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; |
1196 |
|
1197 |
return 0; |
1198 |
} |
1199 |
|
1200 |
static int kvm_get_msrs(CPUState *env) |
1201 |
{ |
1202 |
struct {
|
1203 |
struct kvm_msrs info;
|
1204 |
struct kvm_msr_entry entries[100]; |
1205 |
} msr_data; |
1206 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
1207 |
int ret, i, n;
|
1208 |
|
1209 |
n = 0;
|
1210 |
msrs[n++].index = MSR_IA32_SYSENTER_CS; |
1211 |
msrs[n++].index = MSR_IA32_SYSENTER_ESP; |
1212 |
msrs[n++].index = MSR_IA32_SYSENTER_EIP; |
1213 |
msrs[n++].index = MSR_PAT; |
1214 |
if (has_msr_star) {
|
1215 |
msrs[n++].index = MSR_STAR; |
1216 |
} |
1217 |
if (has_msr_hsave_pa) {
|
1218 |
msrs[n++].index = MSR_VM_HSAVE_PA; |
1219 |
} |
1220 |
if (has_msr_tsc_deadline) {
|
1221 |
msrs[n++].index = MSR_IA32_TSCDEADLINE; |
1222 |
} |
1223 |
if (has_msr_misc_enable) {
|
1224 |
msrs[n++].index = MSR_IA32_MISC_ENABLE; |
1225 |
} |
1226 |
|
1227 |
if (!env->tsc_valid) {
|
1228 |
msrs[n++].index = MSR_IA32_TSC; |
1229 |
env->tsc_valid = !runstate_is_running(); |
1230 |
} |
1231 |
|
1232 |
#ifdef TARGET_X86_64
|
1233 |
if (lm_capable_kernel) {
|
1234 |
msrs[n++].index = MSR_CSTAR; |
1235 |
msrs[n++].index = MSR_KERNELGSBASE; |
1236 |
msrs[n++].index = MSR_FMASK; |
1237 |
msrs[n++].index = MSR_LSTAR; |
1238 |
} |
1239 |
#endif
|
1240 |
msrs[n++].index = MSR_KVM_SYSTEM_TIME; |
1241 |
msrs[n++].index = MSR_KVM_WALL_CLOCK; |
1242 |
if (has_msr_async_pf_en) {
|
1243 |
msrs[n++].index = MSR_KVM_ASYNC_PF_EN; |
1244 |
} |
1245 |
|
1246 |
if (env->mcg_cap) {
|
1247 |
msrs[n++].index = MSR_MCG_STATUS; |
1248 |
msrs[n++].index = MSR_MCG_CTL; |
1249 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
1250 |
msrs[n++].index = MSR_MC0_CTL + i; |
1251 |
} |
1252 |
} |
1253 |
|
1254 |
msr_data.info.nmsrs = n; |
1255 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); |
1256 |
if (ret < 0) { |
1257 |
return ret;
|
1258 |
} |
1259 |
|
1260 |
for (i = 0; i < ret; i++) { |
1261 |
switch (msrs[i].index) {
|
1262 |
case MSR_IA32_SYSENTER_CS:
|
1263 |
env->sysenter_cs = msrs[i].data; |
1264 |
break;
|
1265 |
case MSR_IA32_SYSENTER_ESP:
|
1266 |
env->sysenter_esp = msrs[i].data; |
1267 |
break;
|
1268 |
case MSR_IA32_SYSENTER_EIP:
|
1269 |
env->sysenter_eip = msrs[i].data; |
1270 |
break;
|
1271 |
case MSR_PAT:
|
1272 |
env->pat = msrs[i].data; |
1273 |
break;
|
1274 |
case MSR_STAR:
|
1275 |
env->star = msrs[i].data; |
1276 |
break;
|
1277 |
#ifdef TARGET_X86_64
|
1278 |
case MSR_CSTAR:
|
1279 |
env->cstar = msrs[i].data; |
1280 |
break;
|
1281 |
case MSR_KERNELGSBASE:
|
1282 |
env->kernelgsbase = msrs[i].data; |
1283 |
break;
|
1284 |
case MSR_FMASK:
|
1285 |
env->fmask = msrs[i].data; |
1286 |
break;
|
1287 |
case MSR_LSTAR:
|
1288 |
env->lstar = msrs[i].data; |
1289 |
break;
|
1290 |
#endif
|
1291 |
case MSR_IA32_TSC:
|
1292 |
env->tsc = msrs[i].data; |
1293 |
break;
|
1294 |
case MSR_IA32_TSCDEADLINE:
|
1295 |
env->tsc_deadline = msrs[i].data; |
1296 |
break;
|
1297 |
case MSR_VM_HSAVE_PA:
|
1298 |
env->vm_hsave = msrs[i].data; |
1299 |
break;
|
1300 |
case MSR_KVM_SYSTEM_TIME:
|
1301 |
env->system_time_msr = msrs[i].data; |
1302 |
break;
|
1303 |
case MSR_KVM_WALL_CLOCK:
|
1304 |
env->wall_clock_msr = msrs[i].data; |
1305 |
break;
|
1306 |
case MSR_MCG_STATUS:
|
1307 |
env->mcg_status = msrs[i].data; |
1308 |
break;
|
1309 |
case MSR_MCG_CTL:
|
1310 |
env->mcg_ctl = msrs[i].data; |
1311 |
break;
|
1312 |
case MSR_IA32_MISC_ENABLE:
|
1313 |
env->msr_ia32_misc_enable = msrs[i].data; |
1314 |
break;
|
1315 |
default:
|
1316 |
if (msrs[i].index >= MSR_MC0_CTL &&
|
1317 |
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { |
1318 |
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; |
1319 |
} |
1320 |
break;
|
1321 |
case MSR_KVM_ASYNC_PF_EN:
|
1322 |
env->async_pf_en_msr = msrs[i].data; |
1323 |
break;
|
1324 |
} |
1325 |
} |
1326 |
|
1327 |
return 0; |
1328 |
} |
1329 |
|
1330 |
static int kvm_put_mp_state(CPUState *env) |
1331 |
{ |
1332 |
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
|
1333 |
|
1334 |
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
|
1335 |
} |
1336 |
|
1337 |
static int kvm_get_mp_state(CPUState *env) |
1338 |
{ |
1339 |
struct kvm_mp_state mp_state;
|
1340 |
int ret;
|
1341 |
|
1342 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state); |
1343 |
if (ret < 0) { |
1344 |
return ret;
|
1345 |
} |
1346 |
env->mp_state = mp_state.mp_state; |
1347 |
if (kvm_irqchip_in_kernel()) {
|
1348 |
env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); |
1349 |
} |
1350 |
return 0; |
1351 |
} |
1352 |
|
1353 |
static int kvm_get_apic(CPUState *env) |
1354 |
{ |
1355 |
DeviceState *apic = env->apic_state; |
1356 |
struct kvm_lapic_state kapic;
|
1357 |
int ret;
|
1358 |
|
1359 |
if (apic && kvm_irqchip_in_kernel()) {
|
1360 |
ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic); |
1361 |
if (ret < 0) { |
1362 |
return ret;
|
1363 |
} |
1364 |
|
1365 |
kvm_get_apic_state(apic, &kapic); |
1366 |
} |
1367 |
return 0; |
1368 |
} |
1369 |
|
1370 |
static int kvm_put_apic(CPUState *env) |
1371 |
{ |
1372 |
DeviceState *apic = env->apic_state; |
1373 |
struct kvm_lapic_state kapic;
|
1374 |
|
1375 |
if (apic && kvm_irqchip_in_kernel()) {
|
1376 |
kvm_put_apic_state(apic, &kapic); |
1377 |
|
1378 |
return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
|
1379 |
} |
1380 |
return 0; |
1381 |
} |
1382 |
|
1383 |
static int kvm_put_vcpu_events(CPUState *env, int level) |
1384 |
{ |
1385 |
struct kvm_vcpu_events events;
|
1386 |
|
1387 |
if (!kvm_has_vcpu_events()) {
|
1388 |
return 0; |
1389 |
} |
1390 |
|
1391 |
events.exception.injected = (env->exception_injected >= 0);
|
1392 |
events.exception.nr = env->exception_injected; |
1393 |
events.exception.has_error_code = env->has_error_code; |
1394 |
events.exception.error_code = env->error_code; |
1395 |
|
1396 |
events.interrupt.injected = (env->interrupt_injected >= 0);
|
1397 |
events.interrupt.nr = env->interrupt_injected; |
1398 |
events.interrupt.soft = env->soft_interrupt; |
1399 |
|
1400 |
events.nmi.injected = env->nmi_injected; |
1401 |
events.nmi.pending = env->nmi_pending; |
1402 |
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); |
1403 |
|
1404 |
events.sipi_vector = env->sipi_vector; |
1405 |
|
1406 |
events.flags = 0;
|
1407 |
if (level >= KVM_PUT_RESET_STATE) {
|
1408 |
events.flags |= |
1409 |
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; |
1410 |
} |
1411 |
|
1412 |
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
1413 |
} |
1414 |
|
1415 |
static int kvm_get_vcpu_events(CPUState *env) |
1416 |
{ |
1417 |
struct kvm_vcpu_events events;
|
1418 |
int ret;
|
1419 |
|
1420 |
if (!kvm_has_vcpu_events()) {
|
1421 |
return 0; |
1422 |
} |
1423 |
|
1424 |
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events); |
1425 |
if (ret < 0) { |
1426 |
return ret;
|
1427 |
} |
1428 |
env->exception_injected = |
1429 |
events.exception.injected ? events.exception.nr : -1;
|
1430 |
env->has_error_code = events.exception.has_error_code; |
1431 |
env->error_code = events.exception.error_code; |
1432 |
|
1433 |
env->interrupt_injected = |
1434 |
events.interrupt.injected ? events.interrupt.nr : -1;
|
1435 |
env->soft_interrupt = events.interrupt.soft; |
1436 |
|
1437 |
env->nmi_injected = events.nmi.injected; |
1438 |
env->nmi_pending = events.nmi.pending; |
1439 |
if (events.nmi.masked) {
|
1440 |
env->hflags2 |= HF2_NMI_MASK; |
1441 |
} else {
|
1442 |
env->hflags2 &= ~HF2_NMI_MASK; |
1443 |
} |
1444 |
|
1445 |
env->sipi_vector = events.sipi_vector; |
1446 |
|
1447 |
return 0; |
1448 |
} |
1449 |
|
1450 |
static int kvm_guest_debug_workarounds(CPUState *env) |
1451 |
{ |
1452 |
int ret = 0; |
1453 |
unsigned long reinject_trap = 0; |
1454 |
|
1455 |
if (!kvm_has_vcpu_events()) {
|
1456 |
if (env->exception_injected == 1) { |
1457 |
reinject_trap = KVM_GUESTDBG_INJECT_DB; |
1458 |
} else if (env->exception_injected == 3) { |
1459 |
reinject_trap = KVM_GUESTDBG_INJECT_BP; |
1460 |
} |
1461 |
env->exception_injected = -1;
|
1462 |
} |
1463 |
|
1464 |
/*
|
1465 |
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
1466 |
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
1467 |
* by updating the debug state once again if single-stepping is on.
|
1468 |
* Another reason to call kvm_update_guest_debug here is a pending debug
|
1469 |
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
1470 |
* reinject them via SET_GUEST_DEBUG.
|
1471 |
*/
|
1472 |
if (reinject_trap ||
|
1473 |
(!kvm_has_robust_singlestep() && env->singlestep_enabled)) { |
1474 |
ret = kvm_update_guest_debug(env, reinject_trap); |
1475 |
} |
1476 |
return ret;
|
1477 |
} |
1478 |
|
1479 |
static int kvm_put_debugregs(CPUState *env) |
1480 |
{ |
1481 |
struct kvm_debugregs dbgregs;
|
1482 |
int i;
|
1483 |
|
1484 |
if (!kvm_has_debugregs()) {
|
1485 |
return 0; |
1486 |
} |
1487 |
|
1488 |
for (i = 0; i < 4; i++) { |
1489 |
dbgregs.db[i] = env->dr[i]; |
1490 |
} |
1491 |
dbgregs.dr6 = env->dr[6];
|
1492 |
dbgregs.dr7 = env->dr[7];
|
1493 |
dbgregs.flags = 0;
|
1494 |
|
1495 |
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
|
1496 |
} |
1497 |
|
1498 |
static int kvm_get_debugregs(CPUState *env) |
1499 |
{ |
1500 |
struct kvm_debugregs dbgregs;
|
1501 |
int i, ret;
|
1502 |
|
1503 |
if (!kvm_has_debugregs()) {
|
1504 |
return 0; |
1505 |
} |
1506 |
|
1507 |
ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs); |
1508 |
if (ret < 0) { |
1509 |
return ret;
|
1510 |
} |
1511 |
for (i = 0; i < 4; i++) { |
1512 |
env->dr[i] = dbgregs.db[i]; |
1513 |
} |
1514 |
env->dr[4] = env->dr[6] = dbgregs.dr6; |
1515 |
env->dr[5] = env->dr[7] = dbgregs.dr7; |
1516 |
|
1517 |
return 0; |
1518 |
} |
1519 |
|
1520 |
int kvm_arch_put_registers(CPUState *env, int level) |
1521 |
{ |
1522 |
int ret;
|
1523 |
|
1524 |
assert(cpu_is_stopped(env) || qemu_cpu_is_self(env)); |
1525 |
|
1526 |
ret = kvm_getput_regs(env, 1);
|
1527 |
if (ret < 0) { |
1528 |
return ret;
|
1529 |
} |
1530 |
ret = kvm_put_xsave(env); |
1531 |
if (ret < 0) { |
1532 |
return ret;
|
1533 |
} |
1534 |
ret = kvm_put_xcrs(env); |
1535 |
if (ret < 0) { |
1536 |
return ret;
|
1537 |
} |
1538 |
ret = kvm_put_sregs(env); |
1539 |
if (ret < 0) { |
1540 |
return ret;
|
1541 |
} |
1542 |
/* must be before kvm_put_msrs */
|
1543 |
ret = kvm_inject_mce_oldstyle(env); |
1544 |
if (ret < 0) { |
1545 |
return ret;
|
1546 |
} |
1547 |
ret = kvm_put_msrs(env, level); |
1548 |
if (ret < 0) { |
1549 |
return ret;
|
1550 |
} |
1551 |
if (level >= KVM_PUT_RESET_STATE) {
|
1552 |
ret = kvm_put_mp_state(env); |
1553 |
if (ret < 0) { |
1554 |
return ret;
|
1555 |
} |
1556 |
ret = kvm_put_apic(env); |
1557 |
if (ret < 0) { |
1558 |
return ret;
|
1559 |
} |
1560 |
} |
1561 |
ret = kvm_put_vcpu_events(env, level); |
1562 |
if (ret < 0) { |
1563 |
return ret;
|
1564 |
} |
1565 |
ret = kvm_put_debugregs(env); |
1566 |
if (ret < 0) { |
1567 |
return ret;
|
1568 |
} |
1569 |
/* must be last */
|
1570 |
ret = kvm_guest_debug_workarounds(env); |
1571 |
if (ret < 0) { |
1572 |
return ret;
|
1573 |
} |
1574 |
return 0; |
1575 |
} |
1576 |
|
1577 |
int kvm_arch_get_registers(CPUState *env)
|
1578 |
{ |
1579 |
int ret;
|
1580 |
|
1581 |
assert(cpu_is_stopped(env) || qemu_cpu_is_self(env)); |
1582 |
|
1583 |
ret = kvm_getput_regs(env, 0);
|
1584 |
if (ret < 0) { |
1585 |
return ret;
|
1586 |
} |
1587 |
ret = kvm_get_xsave(env); |
1588 |
if (ret < 0) { |
1589 |
return ret;
|
1590 |
} |
1591 |
ret = kvm_get_xcrs(env); |
1592 |
if (ret < 0) { |
1593 |
return ret;
|
1594 |
} |
1595 |
ret = kvm_get_sregs(env); |
1596 |
if (ret < 0) { |
1597 |
return ret;
|
1598 |
} |
1599 |
ret = kvm_get_msrs(env); |
1600 |
if (ret < 0) { |
1601 |
return ret;
|
1602 |
} |
1603 |
ret = kvm_get_mp_state(env); |
1604 |
if (ret < 0) { |
1605 |
return ret;
|
1606 |
} |
1607 |
ret = kvm_get_apic(env); |
1608 |
if (ret < 0) { |
1609 |
return ret;
|
1610 |
} |
1611 |
ret = kvm_get_vcpu_events(env); |
1612 |
if (ret < 0) { |
1613 |
return ret;
|
1614 |
} |
1615 |
ret = kvm_get_debugregs(env); |
1616 |
if (ret < 0) { |
1617 |
return ret;
|
1618 |
} |
1619 |
return 0; |
1620 |
} |
1621 |
|
1622 |
void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
1623 |
{ |
1624 |
int ret;
|
1625 |
|
1626 |
/* Inject NMI */
|
1627 |
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
|
1628 |
env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
1629 |
DPRINTF("injected NMI\n");
|
1630 |
ret = kvm_vcpu_ioctl(env, KVM_NMI); |
1631 |
if (ret < 0) { |
1632 |
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
|
1633 |
strerror(-ret)); |
1634 |
} |
1635 |
} |
1636 |
|
1637 |
if (!kvm_irqchip_in_kernel()) {
|
1638 |
/* Force the VCPU out of its inner loop to process any INIT requests
|
1639 |
* or pending TPR access reports. */
|
1640 |
if (env->interrupt_request &
|
1641 |
(CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { |
1642 |
env->exit_request = 1;
|
1643 |
} |
1644 |
|
1645 |
/* Try to inject an interrupt if the guest can accept it */
|
1646 |
if (run->ready_for_interrupt_injection &&
|
1647 |
(env->interrupt_request & CPU_INTERRUPT_HARD) && |
1648 |
(env->eflags & IF_MASK)) { |
1649 |
int irq;
|
1650 |
|
1651 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
1652 |
irq = cpu_get_pic_interrupt(env); |
1653 |
if (irq >= 0) { |
1654 |
struct kvm_interrupt intr;
|
1655 |
|
1656 |
intr.irq = irq; |
1657 |
DPRINTF("injected interrupt %d\n", irq);
|
1658 |
ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
1659 |
if (ret < 0) { |
1660 |
fprintf(stderr, |
1661 |
"KVM: injection failed, interrupt lost (%s)\n",
|
1662 |
strerror(-ret)); |
1663 |
} |
1664 |
} |
1665 |
} |
1666 |
|
1667 |
/* If we have an interrupt but the guest is not ready to receive an
|
1668 |
* interrupt, request an interrupt window exit. This will
|
1669 |
* cause a return to userspace as soon as the guest is ready to
|
1670 |
* receive interrupts. */
|
1671 |
if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
|
1672 |
run->request_interrupt_window = 1;
|
1673 |
} else {
|
1674 |
run->request_interrupt_window = 0;
|
1675 |
} |
1676 |
|
1677 |
DPRINTF("setting tpr\n");
|
1678 |
run->cr8 = cpu_get_apic_tpr(env->apic_state); |
1679 |
} |
1680 |
} |
1681 |
|
1682 |
void kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
1683 |
{ |
1684 |
if (run->if_flag) {
|
1685 |
env->eflags |= IF_MASK; |
1686 |
} else {
|
1687 |
env->eflags &= ~IF_MASK; |
1688 |
} |
1689 |
cpu_set_apic_tpr(env->apic_state, run->cr8); |
1690 |
cpu_set_apic_base(env->apic_state, run->apic_base); |
1691 |
} |
1692 |
|
1693 |
int kvm_arch_process_async_events(CPUState *env)
|
1694 |
{ |
1695 |
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
|
1696 |
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
|
1697 |
assert(env->mcg_cap); |
1698 |
|
1699 |
env->interrupt_request &= ~CPU_INTERRUPT_MCE; |
1700 |
|
1701 |
kvm_cpu_synchronize_state(env); |
1702 |
|
1703 |
if (env->exception_injected == EXCP08_DBLE) {
|
1704 |
/* this means triple fault */
|
1705 |
qemu_system_reset_request(); |
1706 |
env->exit_request = 1;
|
1707 |
return 0; |
1708 |
} |
1709 |
env->exception_injected = EXCP12_MCHK; |
1710 |
env->has_error_code = 0;
|
1711 |
|
1712 |
env->halted = 0;
|
1713 |
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
|
1714 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
1715 |
} |
1716 |
} |
1717 |
|
1718 |
if (kvm_irqchip_in_kernel()) {
|
1719 |
return 0; |
1720 |
} |
1721 |
|
1722 |
if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
1723 |
(env->eflags & IF_MASK)) || |
1724 |
(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
1725 |
env->halted = 0;
|
1726 |
} |
1727 |
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
1728 |
kvm_cpu_synchronize_state(env); |
1729 |
do_cpu_init(env); |
1730 |
} |
1731 |
if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
|
1732 |
kvm_cpu_synchronize_state(env); |
1733 |
do_cpu_sipi(env); |
1734 |
} |
1735 |
if (env->interrupt_request & CPU_INTERRUPT_TPR) {
|
1736 |
env->interrupt_request &= ~CPU_INTERRUPT_TPR; |
1737 |
kvm_cpu_synchronize_state(env); |
1738 |
apic_handle_tpr_access_report(env->apic_state, env->eip, |
1739 |
env->tpr_access_type); |
1740 |
} |
1741 |
|
1742 |
return env->halted;
|
1743 |
} |
1744 |
|
1745 |
static int kvm_handle_halt(CPUState *env) |
1746 |
{ |
1747 |
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
1748 |
(env->eflags & IF_MASK)) && |
1749 |
!(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
1750 |
env->halted = 1;
|
1751 |
return EXCP_HLT;
|
1752 |
} |
1753 |
|
1754 |
return 0; |
1755 |
} |
1756 |
|
1757 |
static int kvm_handle_tpr_access(CPUState *env) |
1758 |
{ |
1759 |
struct kvm_run *run = env->kvm_run;
|
1760 |
|
1761 |
apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip, |
1762 |
run->tpr_access.is_write ? TPR_ACCESS_WRITE |
1763 |
: TPR_ACCESS_READ); |
1764 |
return 1; |
1765 |
} |
1766 |
|
1767 |
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1768 |
{ |
1769 |
static const uint8_t int3 = 0xcc; |
1770 |
|
1771 |
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
1772 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) { |
1773 |
return -EINVAL;
|
1774 |
} |
1775 |
return 0; |
1776 |
} |
1777 |
|
1778 |
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1779 |
{ |
1780 |
uint8_t int3; |
1781 |
|
1782 |
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || |
1783 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { |
1784 |
return -EINVAL;
|
1785 |
} |
1786 |
return 0; |
1787 |
} |
1788 |
|
1789 |
static struct { |
1790 |
target_ulong addr; |
1791 |
int len;
|
1792 |
int type;
|
1793 |
} hw_breakpoint[4];
|
1794 |
|
1795 |
static int nb_hw_breakpoint; |
1796 |
|
1797 |
static int find_hw_breakpoint(target_ulong addr, int len, int type) |
1798 |
{ |
1799 |
int n;
|
1800 |
|
1801 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
1802 |
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
1803 |
(hw_breakpoint[n].len == len || len == -1)) {
|
1804 |
return n;
|
1805 |
} |
1806 |
} |
1807 |
return -1; |
1808 |
} |
1809 |
|
1810 |
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
1811 |
target_ulong len, int type)
|
1812 |
{ |
1813 |
switch (type) {
|
1814 |
case GDB_BREAKPOINT_HW:
|
1815 |
len = 1;
|
1816 |
break;
|
1817 |
case GDB_WATCHPOINT_WRITE:
|
1818 |
case GDB_WATCHPOINT_ACCESS:
|
1819 |
switch (len) {
|
1820 |
case 1: |
1821 |
break;
|
1822 |
case 2: |
1823 |
case 4: |
1824 |
case 8: |
1825 |
if (addr & (len - 1)) { |
1826 |
return -EINVAL;
|
1827 |
} |
1828 |
break;
|
1829 |
default:
|
1830 |
return -EINVAL;
|
1831 |
} |
1832 |
break;
|
1833 |
default:
|
1834 |
return -ENOSYS;
|
1835 |
} |
1836 |
|
1837 |
if (nb_hw_breakpoint == 4) { |
1838 |
return -ENOBUFS;
|
1839 |
} |
1840 |
if (find_hw_breakpoint(addr, len, type) >= 0) { |
1841 |
return -EEXIST;
|
1842 |
} |
1843 |
hw_breakpoint[nb_hw_breakpoint].addr = addr; |
1844 |
hw_breakpoint[nb_hw_breakpoint].len = len; |
1845 |
hw_breakpoint[nb_hw_breakpoint].type = type; |
1846 |
nb_hw_breakpoint++; |
1847 |
|
1848 |
return 0; |
1849 |
} |
1850 |
|
1851 |
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
1852 |
target_ulong len, int type)
|
1853 |
{ |
1854 |
int n;
|
1855 |
|
1856 |
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
1857 |
if (n < 0) { |
1858 |
return -ENOENT;
|
1859 |
} |
1860 |
nb_hw_breakpoint--; |
1861 |
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; |
1862 |
|
1863 |
return 0; |
1864 |
} |
1865 |
|
1866 |
void kvm_arch_remove_all_hw_breakpoints(void) |
1867 |
{ |
1868 |
nb_hw_breakpoint = 0;
|
1869 |
} |
1870 |
|
1871 |
static CPUWatchpoint hw_watchpoint;
|
1872 |
|
1873 |
static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info) |
1874 |
{ |
1875 |
int ret = 0; |
1876 |
int n;
|
1877 |
|
1878 |
if (arch_info->exception == 1) { |
1879 |
if (arch_info->dr6 & (1 << 14)) { |
1880 |
if (cpu_single_env->singlestep_enabled) {
|
1881 |
ret = EXCP_DEBUG; |
1882 |
} |
1883 |
} else {
|
1884 |
for (n = 0; n < 4; n++) { |
1885 |
if (arch_info->dr6 & (1 << n)) { |
1886 |
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { |
1887 |
case 0x0: |
1888 |
ret = EXCP_DEBUG; |
1889 |
break;
|
1890 |
case 0x1: |
1891 |
ret = EXCP_DEBUG; |
1892 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1893 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1894 |
hw_watchpoint.flags = BP_MEM_WRITE; |
1895 |
break;
|
1896 |
case 0x3: |
1897 |
ret = EXCP_DEBUG; |
1898 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1899 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1900 |
hw_watchpoint.flags = BP_MEM_ACCESS; |
1901 |
break;
|
1902 |
} |
1903 |
} |
1904 |
} |
1905 |
} |
1906 |
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) { |
1907 |
ret = EXCP_DEBUG; |
1908 |
} |
1909 |
if (ret == 0) { |
1910 |
cpu_synchronize_state(cpu_single_env); |
1911 |
assert(cpu_single_env->exception_injected == -1);
|
1912 |
|
1913 |
/* pass to guest */
|
1914 |
cpu_single_env->exception_injected = arch_info->exception; |
1915 |
cpu_single_env->has_error_code = 0;
|
1916 |
} |
1917 |
|
1918 |
return ret;
|
1919 |
} |
1920 |
|
1921 |
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) |
1922 |
{ |
1923 |
const uint8_t type_code[] = {
|
1924 |
[GDB_BREAKPOINT_HW] = 0x0,
|
1925 |
[GDB_WATCHPOINT_WRITE] = 0x1,
|
1926 |
[GDB_WATCHPOINT_ACCESS] = 0x3
|
1927 |
}; |
1928 |
const uint8_t len_code[] = {
|
1929 |
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 |
1930 |
}; |
1931 |
int n;
|
1932 |
|
1933 |
if (kvm_sw_breakpoints_active(env)) {
|
1934 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; |
1935 |
} |
1936 |
if (nb_hw_breakpoint > 0) { |
1937 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; |
1938 |
dbg->arch.debugreg[7] = 0x0600; |
1939 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
1940 |
dbg->arch.debugreg[n] = hw_breakpoint[n].addr; |
1941 |
dbg->arch.debugreg[7] |= (2 << (n * 2)) | |
1942 |
(type_code[hw_breakpoint[n].type] << (16 + n*4)) | |
1943 |
((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); |
1944 |
} |
1945 |
} |
1946 |
} |
1947 |
|
1948 |
static bool host_supports_vmx(void) |
1949 |
{ |
1950 |
uint32_t ecx, unused; |
1951 |
|
1952 |
host_cpuid(1, 0, &unused, &unused, &ecx, &unused); |
1953 |
return ecx & CPUID_EXT_VMX;
|
1954 |
} |
1955 |
|
1956 |
#define VMX_INVALID_GUEST_STATE 0x80000021 |
1957 |
|
1958 |
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
1959 |
{ |
1960 |
uint64_t code; |
1961 |
int ret;
|
1962 |
|
1963 |
switch (run->exit_reason) {
|
1964 |
case KVM_EXIT_HLT:
|
1965 |
DPRINTF("handle_hlt\n");
|
1966 |
ret = kvm_handle_halt(env); |
1967 |
break;
|
1968 |
case KVM_EXIT_SET_TPR:
|
1969 |
ret = 0;
|
1970 |
break;
|
1971 |
case KVM_EXIT_TPR_ACCESS:
|
1972 |
ret = kvm_handle_tpr_access(env); |
1973 |
break;
|
1974 |
case KVM_EXIT_FAIL_ENTRY:
|
1975 |
code = run->fail_entry.hardware_entry_failure_reason; |
1976 |
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", |
1977 |
code); |
1978 |
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
|
1979 |
fprintf(stderr, |
1980 |
"\nIf you're running a guest on an Intel machine without "
|
1981 |
"unrestricted mode\n"
|
1982 |
"support, the failure can be most likely due to the guest "
|
1983 |
"entering an invalid\n"
|
1984 |
"state for Intel VT. For example, the guest maybe running "
|
1985 |
"in big real mode\n"
|
1986 |
"which is not supported on less recent Intel processors."
|
1987 |
"\n\n");
|
1988 |
} |
1989 |
ret = -1;
|
1990 |
break;
|
1991 |
case KVM_EXIT_EXCEPTION:
|
1992 |
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
|
1993 |
run->ex.exception, run->ex.error_code); |
1994 |
ret = -1;
|
1995 |
break;
|
1996 |
case KVM_EXIT_DEBUG:
|
1997 |
DPRINTF("kvm_exit_debug\n");
|
1998 |
ret = kvm_handle_debug(&run->debug.arch); |
1999 |
break;
|
2000 |
default:
|
2001 |
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
2002 |
ret = -1;
|
2003 |
break;
|
2004 |
} |
2005 |
|
2006 |
return ret;
|
2007 |
} |
2008 |
|
2009 |
bool kvm_arch_stop_on_emulation_error(CPUState *env)
|
2010 |
{ |
2011 |
kvm_cpu_synchronize_state(env); |
2012 |
return !(env->cr[0] & CR0_PE_MASK) || |
2013 |
((env->segs[R_CS].selector & 3) != 3); |
2014 |
} |
2015 |
|
2016 |
void kvm_arch_init_irq_routing(KVMState *s)
|
2017 |
{ |
2018 |
if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
|
2019 |
/* If kernel can't do irq routing, interrupt source
|
2020 |
* override 0->2 cannot be set up as required by HPET.
|
2021 |
* So we have to disable it.
|
2022 |
*/
|
2023 |
no_hpet = 1;
|
2024 |
} |
2025 |
} |