root / target-i386 / kvm.c @ c5999bfc
History | View | Annotate | Download (51.9 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright (C) 2006-2008 Qumranet Technologies
|
5 |
* Copyright IBM, Corp. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
11 |
* See the COPYING file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include <sys/types.h> |
16 |
#include <sys/ioctl.h> |
17 |
#include <sys/mman.h> |
18 |
#include <sys/utsname.h> |
19 |
|
20 |
#include <linux/kvm.h> |
21 |
|
22 |
#include "qemu-common.h" |
23 |
#include "sysemu.h" |
24 |
#include "kvm.h" |
25 |
#include "cpu.h" |
26 |
#include "gdbstub.h" |
27 |
#include "host-utils.h" |
28 |
#include "hw/pc.h" |
29 |
#include "hw/apic.h" |
30 |
#include "ioport.h" |
31 |
#include "kvm_x86.h" |
32 |
|
33 |
#ifdef CONFIG_KVM_PARA
|
34 |
#include <linux/kvm_para.h> |
35 |
#endif
|
36 |
//
|
37 |
//#define DEBUG_KVM
|
38 |
|
39 |
#ifdef DEBUG_KVM
|
40 |
#define DPRINTF(fmt, ...) \
|
41 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
42 |
#else
|
43 |
#define DPRINTF(fmt, ...) \
|
44 |
do { } while (0) |
45 |
#endif
|
46 |
|
47 |
#define MSR_KVM_WALL_CLOCK 0x11 |
48 |
#define MSR_KVM_SYSTEM_TIME 0x12 |
49 |
|
50 |
#ifndef BUS_MCEERR_AR
|
51 |
#define BUS_MCEERR_AR 4 |
52 |
#endif
|
53 |
#ifndef BUS_MCEERR_AO
|
54 |
#define BUS_MCEERR_AO 5 |
55 |
#endif
|
56 |
|
57 |
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
58 |
KVM_CAP_INFO(SET_TSS_ADDR), |
59 |
KVM_CAP_INFO(EXT_CPUID), |
60 |
KVM_CAP_INFO(MP_STATE), |
61 |
KVM_CAP_LAST_INFO |
62 |
}; |
63 |
|
64 |
static bool has_msr_star; |
65 |
static bool has_msr_hsave_pa; |
66 |
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
67 |
static bool has_msr_async_pf_en; |
68 |
#endif
|
69 |
static int lm_capable_kernel; |
70 |
|
71 |
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) |
72 |
{ |
73 |
struct kvm_cpuid2 *cpuid;
|
74 |
int r, size;
|
75 |
|
76 |
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
77 |
cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
|
78 |
cpuid->nent = max; |
79 |
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); |
80 |
if (r == 0 && cpuid->nent >= max) { |
81 |
r = -E2BIG; |
82 |
} |
83 |
if (r < 0) { |
84 |
if (r == -E2BIG) {
|
85 |
qemu_free(cpuid); |
86 |
return NULL; |
87 |
} else {
|
88 |
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
89 |
strerror(-r)); |
90 |
exit(1);
|
91 |
} |
92 |
} |
93 |
return cpuid;
|
94 |
} |
95 |
|
96 |
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, |
97 |
uint32_t index, int reg)
|
98 |
{ |
99 |
struct kvm_cpuid2 *cpuid;
|
100 |
int i, max;
|
101 |
uint32_t ret = 0;
|
102 |
uint32_t cpuid_1_edx; |
103 |
|
104 |
max = 1;
|
105 |
while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { |
106 |
max *= 2;
|
107 |
} |
108 |
|
109 |
for (i = 0; i < cpuid->nent; ++i) { |
110 |
if (cpuid->entries[i].function == function &&
|
111 |
cpuid->entries[i].index == index) { |
112 |
switch (reg) {
|
113 |
case R_EAX:
|
114 |
ret = cpuid->entries[i].eax; |
115 |
break;
|
116 |
case R_EBX:
|
117 |
ret = cpuid->entries[i].ebx; |
118 |
break;
|
119 |
case R_ECX:
|
120 |
ret = cpuid->entries[i].ecx; |
121 |
break;
|
122 |
case R_EDX:
|
123 |
ret = cpuid->entries[i].edx; |
124 |
switch (function) {
|
125 |
case 1: |
126 |
/* KVM before 2.6.30 misreports the following features */
|
127 |
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; |
128 |
break;
|
129 |
case 0x80000001: |
130 |
/* On Intel, kvm returns cpuid according to the Intel spec,
|
131 |
* so add missing bits according to the AMD spec:
|
132 |
*/
|
133 |
cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
134 |
ret |= cpuid_1_edx & 0x183f7ff;
|
135 |
break;
|
136 |
} |
137 |
break;
|
138 |
} |
139 |
} |
140 |
} |
141 |
|
142 |
qemu_free(cpuid); |
143 |
|
144 |
return ret;
|
145 |
} |
146 |
|
147 |
#ifdef CONFIG_KVM_PARA
|
148 |
struct kvm_para_features {
|
149 |
int cap;
|
150 |
int feature;
|
151 |
} para_features[] = { |
152 |
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, |
153 |
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, |
154 |
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, |
155 |
#ifdef KVM_CAP_ASYNC_PF
|
156 |
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF }, |
157 |
#endif
|
158 |
{ -1, -1 } |
159 |
}; |
160 |
|
161 |
static int get_para_features(CPUState *env) |
162 |
{ |
163 |
int i, features = 0; |
164 |
|
165 |
for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) { |
166 |
if (kvm_check_extension(env->kvm_state, para_features[i].cap)) {
|
167 |
features |= (1 << para_features[i].feature);
|
168 |
} |
169 |
} |
170 |
has_msr_async_pf_en = features & (1 << KVM_FEATURE_ASYNC_PF);
|
171 |
return features;
|
172 |
} |
173 |
#endif
|
174 |
|
175 |
#ifdef KVM_CAP_MCE
|
176 |
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, |
177 |
int *max_banks)
|
178 |
{ |
179 |
int r;
|
180 |
|
181 |
r = kvm_check_extension(s, KVM_CAP_MCE); |
182 |
if (r > 0) { |
183 |
*max_banks = r; |
184 |
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
185 |
} |
186 |
return -ENOSYS;
|
187 |
} |
188 |
|
189 |
static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap) |
190 |
{ |
191 |
return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
|
192 |
} |
193 |
|
194 |
static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m) |
195 |
{ |
196 |
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
|
197 |
} |
198 |
|
199 |
static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n) |
200 |
{ |
201 |
struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); |
202 |
int r;
|
203 |
|
204 |
kmsrs->nmsrs = n; |
205 |
memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
|
206 |
r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs); |
207 |
memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
|
208 |
free(kmsrs); |
209 |
return r;
|
210 |
} |
211 |
|
212 |
/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
|
213 |
static int kvm_mce_in_progress(CPUState *env) |
214 |
{ |
215 |
struct kvm_msr_entry msr_mcg_status = {
|
216 |
.index = MSR_MCG_STATUS, |
217 |
}; |
218 |
int r;
|
219 |
|
220 |
r = kvm_get_msr(env, &msr_mcg_status, 1);
|
221 |
if (r == -1 || r == 0) { |
222 |
fprintf(stderr, "Failed to get MCE status\n");
|
223 |
return 0; |
224 |
} |
225 |
return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
|
226 |
} |
227 |
|
228 |
struct kvm_x86_mce_data
|
229 |
{ |
230 |
CPUState *env; |
231 |
struct kvm_x86_mce *mce;
|
232 |
int abort_on_error;
|
233 |
}; |
234 |
|
235 |
static void kvm_do_inject_x86_mce(void *_data) |
236 |
{ |
237 |
struct kvm_x86_mce_data *data = _data;
|
238 |
int r;
|
239 |
|
240 |
/* If there is an MCE exception being processed, ignore this SRAO MCE */
|
241 |
if ((data->env->mcg_cap & MCG_SER_P) &&
|
242 |
!(data->mce->status & MCI_STATUS_AR)) { |
243 |
if (kvm_mce_in_progress(data->env)) {
|
244 |
return;
|
245 |
} |
246 |
} |
247 |
|
248 |
r = kvm_set_mce(data->env, data->mce); |
249 |
if (r < 0) { |
250 |
perror("kvm_set_mce FAILED");
|
251 |
if (data->abort_on_error) {
|
252 |
abort(); |
253 |
} |
254 |
} |
255 |
} |
256 |
|
257 |
static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce, |
258 |
int flag)
|
259 |
{ |
260 |
struct kvm_x86_mce_data data = {
|
261 |
.env = env, |
262 |
.mce = mce, |
263 |
.abort_on_error = (flag & ABORT_ON_ERROR), |
264 |
}; |
265 |
|
266 |
if (!env->mcg_cap) {
|
267 |
fprintf(stderr, "MCE support is not enabled!\n");
|
268 |
return;
|
269 |
} |
270 |
|
271 |
run_on_cpu(env, kvm_do_inject_x86_mce, &data); |
272 |
} |
273 |
|
274 |
static void kvm_mce_broadcast_rest(CPUState *env); |
275 |
#endif
|
276 |
|
277 |
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, |
278 |
uint64_t mcg_status, uint64_t addr, uint64_t misc, |
279 |
int flag)
|
280 |
{ |
281 |
#ifdef KVM_CAP_MCE
|
282 |
struct kvm_x86_mce mce = {
|
283 |
.bank = bank, |
284 |
.status = status, |
285 |
.mcg_status = mcg_status, |
286 |
.addr = addr, |
287 |
.misc = misc, |
288 |
}; |
289 |
|
290 |
if (flag & MCE_BROADCAST) {
|
291 |
kvm_mce_broadcast_rest(cenv); |
292 |
} |
293 |
|
294 |
kvm_inject_x86_mce_on(cenv, &mce, flag); |
295 |
#else
|
296 |
if (flag & ABORT_ON_ERROR) {
|
297 |
abort(); |
298 |
} |
299 |
#endif
|
300 |
} |
301 |
|
302 |
int kvm_arch_init_vcpu(CPUState *env)
|
303 |
{ |
304 |
struct {
|
305 |
struct kvm_cpuid2 cpuid;
|
306 |
struct kvm_cpuid_entry2 entries[100]; |
307 |
} __attribute__((packed)) cpuid_data; |
308 |
uint32_t limit, i, j, cpuid_i; |
309 |
uint32_t unused; |
310 |
struct kvm_cpuid_entry2 *c;
|
311 |
#ifdef CONFIG_KVM_PARA
|
312 |
uint32_t signature[3];
|
313 |
#endif
|
314 |
|
315 |
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
316 |
|
317 |
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR; |
318 |
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX); |
319 |
env->cpuid_ext_features |= i; |
320 |
|
321 |
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
322 |
0, R_EDX);
|
323 |
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
324 |
0, R_ECX);
|
325 |
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
|
326 |
0, R_EDX);
|
327 |
|
328 |
|
329 |
cpuid_i = 0;
|
330 |
|
331 |
#ifdef CONFIG_KVM_PARA
|
332 |
/* Paravirtualization CPUIDs */
|
333 |
memcpy(signature, "KVMKVMKVM\0\0\0", 12); |
334 |
c = &cpuid_data.entries[cpuid_i++]; |
335 |
memset(c, 0, sizeof(*c)); |
336 |
c->function = KVM_CPUID_SIGNATURE; |
337 |
c->eax = 0;
|
338 |
c->ebx = signature[0];
|
339 |
c->ecx = signature[1];
|
340 |
c->edx = signature[2];
|
341 |
|
342 |
c = &cpuid_data.entries[cpuid_i++]; |
343 |
memset(c, 0, sizeof(*c)); |
344 |
c->function = KVM_CPUID_FEATURES; |
345 |
c->eax = env->cpuid_kvm_features & get_para_features(env); |
346 |
#endif
|
347 |
|
348 |
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
349 |
|
350 |
for (i = 0; i <= limit; i++) { |
351 |
c = &cpuid_data.entries[cpuid_i++]; |
352 |
|
353 |
switch (i) {
|
354 |
case 2: { |
355 |
/* Keep reading function 2 till all the input is received */
|
356 |
int times;
|
357 |
|
358 |
c->function = i; |
359 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
360 |
KVM_CPUID_FLAG_STATE_READ_NEXT; |
361 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
362 |
times = c->eax & 0xff;
|
363 |
|
364 |
for (j = 1; j < times; ++j) { |
365 |
c = &cpuid_data.entries[cpuid_i++]; |
366 |
c->function = i; |
367 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
368 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
369 |
} |
370 |
break;
|
371 |
} |
372 |
case 4: |
373 |
case 0xb: |
374 |
case 0xd: |
375 |
for (j = 0; ; j++) { |
376 |
c->function = i; |
377 |
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
378 |
c->index = j; |
379 |
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
380 |
|
381 |
if (i == 4 && c->eax == 0) { |
382 |
break;
|
383 |
} |
384 |
if (i == 0xb && !(c->ecx & 0xff00)) { |
385 |
break;
|
386 |
} |
387 |
if (i == 0xd && c->eax == 0) { |
388 |
break;
|
389 |
} |
390 |
c = &cpuid_data.entries[cpuid_i++]; |
391 |
} |
392 |
break;
|
393 |
default:
|
394 |
c->function = i; |
395 |
c->flags = 0;
|
396 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
397 |
break;
|
398 |
} |
399 |
} |
400 |
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
401 |
|
402 |
for (i = 0x80000000; i <= limit; i++) { |
403 |
c = &cpuid_data.entries[cpuid_i++]; |
404 |
|
405 |
c->function = i; |
406 |
c->flags = 0;
|
407 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
408 |
} |
409 |
|
410 |
cpuid_data.cpuid.nent = cpuid_i; |
411 |
|
412 |
#ifdef KVM_CAP_MCE
|
413 |
if (((env->cpuid_version >> 8)&0xF) >= 6 |
414 |
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA) |
415 |
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
|
416 |
uint64_t mcg_cap; |
417 |
int banks;
|
418 |
|
419 |
if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks)) {
|
420 |
perror("kvm_get_mce_cap_supported FAILED");
|
421 |
} else {
|
422 |
if (banks > MCE_BANKS_DEF)
|
423 |
banks = MCE_BANKS_DEF; |
424 |
mcg_cap &= MCE_CAP_DEF; |
425 |
mcg_cap |= banks; |
426 |
if (kvm_setup_mce(env, &mcg_cap)) {
|
427 |
perror("kvm_setup_mce FAILED");
|
428 |
} else {
|
429 |
env->mcg_cap = mcg_cap; |
430 |
} |
431 |
} |
432 |
} |
433 |
#endif
|
434 |
|
435 |
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
|
436 |
} |
437 |
|
438 |
void kvm_arch_reset_vcpu(CPUState *env)
|
439 |
{ |
440 |
env->exception_injected = -1;
|
441 |
env->interrupt_injected = -1;
|
442 |
env->xcr0 = 1;
|
443 |
if (kvm_irqchip_in_kernel()) {
|
444 |
env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE : |
445 |
KVM_MP_STATE_UNINITIALIZED; |
446 |
} else {
|
447 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
448 |
} |
449 |
} |
450 |
|
451 |
static int kvm_get_supported_msrs(KVMState *s) |
452 |
{ |
453 |
static int kvm_supported_msrs; |
454 |
int ret = 0; |
455 |
|
456 |
/* first time */
|
457 |
if (kvm_supported_msrs == 0) { |
458 |
struct kvm_msr_list msr_list, *kvm_msr_list;
|
459 |
|
460 |
kvm_supported_msrs = -1;
|
461 |
|
462 |
/* Obtain MSR list from KVM. These are the MSRs that we must
|
463 |
* save/restore */
|
464 |
msr_list.nmsrs = 0;
|
465 |
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); |
466 |
if (ret < 0 && ret != -E2BIG) { |
467 |
return ret;
|
468 |
} |
469 |
/* Old kernel modules had a bug and could write beyond the provided
|
470 |
memory. Allocate at least a safe amount of 1K. */
|
471 |
kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) + |
472 |
msr_list.nmsrs * |
473 |
sizeof(msr_list.indices[0]))); |
474 |
|
475 |
kvm_msr_list->nmsrs = msr_list.nmsrs; |
476 |
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
477 |
if (ret >= 0) { |
478 |
int i;
|
479 |
|
480 |
for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
481 |
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
482 |
has_msr_star = true;
|
483 |
continue;
|
484 |
} |
485 |
if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
|
486 |
has_msr_hsave_pa = true;
|
487 |
continue;
|
488 |
} |
489 |
} |
490 |
} |
491 |
|
492 |
free(kvm_msr_list); |
493 |
} |
494 |
|
495 |
return ret;
|
496 |
} |
497 |
|
498 |
int kvm_arch_init(KVMState *s)
|
499 |
{ |
500 |
uint64_t identity_base = 0xfffbc000;
|
501 |
int ret;
|
502 |
struct utsname utsname;
|
503 |
|
504 |
ret = kvm_get_supported_msrs(s); |
505 |
if (ret < 0) { |
506 |
return ret;
|
507 |
} |
508 |
|
509 |
uname(&utsname); |
510 |
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; |
511 |
|
512 |
/*
|
513 |
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
|
514 |
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
|
515 |
* Since these must be part of guest physical memory, we need to allocate
|
516 |
* them, both by setting their start addresses in the kernel and by
|
517 |
* creating a corresponding e820 entry. We need 4 pages before the BIOS.
|
518 |
*
|
519 |
* Older KVM versions may not support setting the identity map base. In
|
520 |
* that case we need to stick with the default, i.e. a 256K maximum BIOS
|
521 |
* size.
|
522 |
*/
|
523 |
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
|
524 |
if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
525 |
/* Allows up to 16M BIOSes. */
|
526 |
identity_base = 0xfeffc000;
|
527 |
|
528 |
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); |
529 |
if (ret < 0) { |
530 |
return ret;
|
531 |
} |
532 |
} |
533 |
#endif
|
534 |
/* Set TSS base one page after EPT identity map. */
|
535 |
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
|
536 |
if (ret < 0) { |
537 |
return ret;
|
538 |
} |
539 |
|
540 |
/* Tell fw_cfg to notify the BIOS to reserve the range. */
|
541 |
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
|
542 |
if (ret < 0) { |
543 |
fprintf(stderr, "e820_add_entry() table is full\n");
|
544 |
return ret;
|
545 |
} |
546 |
|
547 |
return 0; |
548 |
} |
549 |
|
550 |
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
551 |
{ |
552 |
lhs->selector = rhs->selector; |
553 |
lhs->base = rhs->base; |
554 |
lhs->limit = rhs->limit; |
555 |
lhs->type = 3;
|
556 |
lhs->present = 1;
|
557 |
lhs->dpl = 3;
|
558 |
lhs->db = 0;
|
559 |
lhs->s = 1;
|
560 |
lhs->l = 0;
|
561 |
lhs->g = 0;
|
562 |
lhs->avl = 0;
|
563 |
lhs->unusable = 0;
|
564 |
} |
565 |
|
566 |
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
567 |
{ |
568 |
unsigned flags = rhs->flags;
|
569 |
lhs->selector = rhs->selector; |
570 |
lhs->base = rhs->base; |
571 |
lhs->limit = rhs->limit; |
572 |
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
573 |
lhs->present = (flags & DESC_P_MASK) != 0;
|
574 |
lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
|
575 |
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
576 |
lhs->s = (flags & DESC_S_MASK) != 0;
|
577 |
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
578 |
lhs->g = (flags & DESC_G_MASK) != 0;
|
579 |
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
580 |
lhs->unusable = 0;
|
581 |
} |
582 |
|
583 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
584 |
{ |
585 |
lhs->selector = rhs->selector; |
586 |
lhs->base = rhs->base; |
587 |
lhs->limit = rhs->limit; |
588 |
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | |
589 |
(rhs->present * DESC_P_MASK) | |
590 |
(rhs->dpl << DESC_DPL_SHIFT) | |
591 |
(rhs->db << DESC_B_SHIFT) | |
592 |
(rhs->s * DESC_S_MASK) | |
593 |
(rhs->l << DESC_L_SHIFT) | |
594 |
(rhs->g * DESC_G_MASK) | |
595 |
(rhs->avl * DESC_AVL_MASK); |
596 |
} |
597 |
|
598 |
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
599 |
{ |
600 |
if (set) {
|
601 |
*kvm_reg = *qemu_reg; |
602 |
} else {
|
603 |
*qemu_reg = *kvm_reg; |
604 |
} |
605 |
} |
606 |
|
607 |
static int kvm_getput_regs(CPUState *env, int set) |
608 |
{ |
609 |
struct kvm_regs regs;
|
610 |
int ret = 0; |
611 |
|
612 |
if (!set) {
|
613 |
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
614 |
if (ret < 0) { |
615 |
return ret;
|
616 |
} |
617 |
} |
618 |
|
619 |
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
620 |
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
621 |
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
622 |
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
623 |
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
624 |
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
625 |
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
626 |
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
627 |
#ifdef TARGET_X86_64
|
628 |
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
629 |
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
630 |
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
631 |
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
632 |
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
633 |
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
634 |
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
635 |
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
636 |
#endif
|
637 |
|
638 |
kvm_getput_reg(®s.rflags, &env->eflags, set); |
639 |
kvm_getput_reg(®s.rip, &env->eip, set); |
640 |
|
641 |
if (set) {
|
642 |
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
643 |
} |
644 |
|
645 |
return ret;
|
646 |
} |
647 |
|
648 |
static int kvm_put_fpu(CPUState *env) |
649 |
{ |
650 |
struct kvm_fpu fpu;
|
651 |
int i;
|
652 |
|
653 |
memset(&fpu, 0, sizeof fpu); |
654 |
fpu.fsw = env->fpus & ~(7 << 11); |
655 |
fpu.fsw |= (env->fpstt & 7) << 11; |
656 |
fpu.fcw = env->fpuc; |
657 |
for (i = 0; i < 8; ++i) { |
658 |
fpu.ftwx |= (!env->fptags[i]) << i; |
659 |
} |
660 |
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
661 |
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
662 |
fpu.mxcsr = env->mxcsr; |
663 |
|
664 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
665 |
} |
666 |
|
667 |
#ifdef KVM_CAP_XSAVE
|
668 |
#define XSAVE_CWD_RIP 2 |
669 |
#define XSAVE_CWD_RDP 4 |
670 |
#define XSAVE_MXCSR 6 |
671 |
#define XSAVE_ST_SPACE 8 |
672 |
#define XSAVE_XMM_SPACE 40 |
673 |
#define XSAVE_XSTATE_BV 128 |
674 |
#define XSAVE_YMMH_SPACE 144 |
675 |
#endif
|
676 |
|
677 |
static int kvm_put_xsave(CPUState *env) |
678 |
{ |
679 |
#ifdef KVM_CAP_XSAVE
|
680 |
int i, r;
|
681 |
struct kvm_xsave* xsave;
|
682 |
uint16_t cwd, swd, twd, fop; |
683 |
|
684 |
if (!kvm_has_xsave()) {
|
685 |
return kvm_put_fpu(env);
|
686 |
} |
687 |
|
688 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
689 |
memset(xsave, 0, sizeof(struct kvm_xsave)); |
690 |
cwd = swd = twd = fop = 0;
|
691 |
swd = env->fpus & ~(7 << 11); |
692 |
swd |= (env->fpstt & 7) << 11; |
693 |
cwd = env->fpuc; |
694 |
for (i = 0; i < 8; ++i) { |
695 |
twd |= (!env->fptags[i]) << i; |
696 |
} |
697 |
xsave->region[0] = (uint32_t)(swd << 16) + cwd; |
698 |
xsave->region[1] = (uint32_t)(fop << 16) + twd; |
699 |
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, |
700 |
sizeof env->fpregs);
|
701 |
memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, |
702 |
sizeof env->xmm_regs);
|
703 |
xsave->region[XSAVE_MXCSR] = env->mxcsr; |
704 |
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; |
705 |
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, |
706 |
sizeof env->ymmh_regs);
|
707 |
r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); |
708 |
qemu_free(xsave); |
709 |
return r;
|
710 |
#else
|
711 |
return kvm_put_fpu(env);
|
712 |
#endif
|
713 |
} |
714 |
|
715 |
static int kvm_put_xcrs(CPUState *env) |
716 |
{ |
717 |
#ifdef KVM_CAP_XCRS
|
718 |
struct kvm_xcrs xcrs;
|
719 |
|
720 |
if (!kvm_has_xcrs()) {
|
721 |
return 0; |
722 |
} |
723 |
|
724 |
xcrs.nr_xcrs = 1;
|
725 |
xcrs.flags = 0;
|
726 |
xcrs.xcrs[0].xcr = 0; |
727 |
xcrs.xcrs[0].value = env->xcr0;
|
728 |
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
|
729 |
#else
|
730 |
return 0; |
731 |
#endif
|
732 |
} |
733 |
|
734 |
static int kvm_put_sregs(CPUState *env) |
735 |
{ |
736 |
struct kvm_sregs sregs;
|
737 |
|
738 |
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); |
739 |
if (env->interrupt_injected >= 0) { |
740 |
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
741 |
(uint64_t)1 << (env->interrupt_injected % 64); |
742 |
} |
743 |
|
744 |
if ((env->eflags & VM_MASK)) {
|
745 |
set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
746 |
set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
747 |
set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
748 |
set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
749 |
set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
750 |
set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
751 |
} else {
|
752 |
set_seg(&sregs.cs, &env->segs[R_CS]); |
753 |
set_seg(&sregs.ds, &env->segs[R_DS]); |
754 |
set_seg(&sregs.es, &env->segs[R_ES]); |
755 |
set_seg(&sregs.fs, &env->segs[R_FS]); |
756 |
set_seg(&sregs.gs, &env->segs[R_GS]); |
757 |
set_seg(&sregs.ss, &env->segs[R_SS]); |
758 |
} |
759 |
|
760 |
set_seg(&sregs.tr, &env->tr); |
761 |
set_seg(&sregs.ldt, &env->ldt); |
762 |
|
763 |
sregs.idt.limit = env->idt.limit; |
764 |
sregs.idt.base = env->idt.base; |
765 |
sregs.gdt.limit = env->gdt.limit; |
766 |
sregs.gdt.base = env->gdt.base; |
767 |
|
768 |
sregs.cr0 = env->cr[0];
|
769 |
sregs.cr2 = env->cr[2];
|
770 |
sregs.cr3 = env->cr[3];
|
771 |
sregs.cr4 = env->cr[4];
|
772 |
|
773 |
sregs.cr8 = cpu_get_apic_tpr(env->apic_state); |
774 |
sregs.apic_base = cpu_get_apic_base(env->apic_state); |
775 |
|
776 |
sregs.efer = env->efer; |
777 |
|
778 |
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
779 |
} |
780 |
|
781 |
static void kvm_msr_entry_set(struct kvm_msr_entry *entry, |
782 |
uint32_t index, uint64_t value) |
783 |
{ |
784 |
entry->index = index; |
785 |
entry->data = value; |
786 |
} |
787 |
|
788 |
static int kvm_put_msrs(CPUState *env, int level) |
789 |
{ |
790 |
struct {
|
791 |
struct kvm_msrs info;
|
792 |
struct kvm_msr_entry entries[100]; |
793 |
} msr_data; |
794 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
795 |
int n = 0; |
796 |
|
797 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
798 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
799 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
800 |
if (has_msr_star) {
|
801 |
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); |
802 |
} |
803 |
if (has_msr_hsave_pa) {
|
804 |
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave); |
805 |
} |
806 |
#ifdef TARGET_X86_64
|
807 |
if (lm_capable_kernel) {
|
808 |
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); |
809 |
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); |
810 |
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); |
811 |
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); |
812 |
} |
813 |
#endif
|
814 |
if (level == KVM_PUT_FULL_STATE) {
|
815 |
/*
|
816 |
* KVM is yet unable to synchronize TSC values of multiple VCPUs on
|
817 |
* writeback. Until this is fixed, we only write the offset to SMP
|
818 |
* guests after migration, desynchronizing the VCPUs, but avoiding
|
819 |
* huge jump-backs that would occur without any writeback at all.
|
820 |
*/
|
821 |
if (smp_cpus == 1 || env->tsc != 0) { |
822 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); |
823 |
} |
824 |
} |
825 |
/*
|
826 |
* The following paravirtual MSRs have side effects on the guest or are
|
827 |
* too heavy for normal writeback. Limit them to reset or full state
|
828 |
* updates.
|
829 |
*/
|
830 |
if (level >= KVM_PUT_RESET_STATE) {
|
831 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, |
832 |
env->system_time_msr); |
833 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); |
834 |
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
835 |
if (has_msr_async_pf_en) {
|
836 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN, |
837 |
env->async_pf_en_msr); |
838 |
} |
839 |
#endif
|
840 |
} |
841 |
#ifdef KVM_CAP_MCE
|
842 |
if (env->mcg_cap) {
|
843 |
int i;
|
844 |
|
845 |
if (level == KVM_PUT_RESET_STATE) {
|
846 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); |
847 |
} else if (level == KVM_PUT_FULL_STATE) { |
848 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); |
849 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl); |
850 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
851 |
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]); |
852 |
} |
853 |
} |
854 |
} |
855 |
#endif
|
856 |
|
857 |
msr_data.info.nmsrs = n; |
858 |
|
859 |
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
860 |
|
861 |
} |
862 |
|
863 |
|
864 |
static int kvm_get_fpu(CPUState *env) |
865 |
{ |
866 |
struct kvm_fpu fpu;
|
867 |
int i, ret;
|
868 |
|
869 |
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); |
870 |
if (ret < 0) { |
871 |
return ret;
|
872 |
} |
873 |
|
874 |
env->fpstt = (fpu.fsw >> 11) & 7; |
875 |
env->fpus = fpu.fsw; |
876 |
env->fpuc = fpu.fcw; |
877 |
for (i = 0; i < 8; ++i) { |
878 |
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
879 |
} |
880 |
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
881 |
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
882 |
env->mxcsr = fpu.mxcsr; |
883 |
|
884 |
return 0; |
885 |
} |
886 |
|
887 |
static int kvm_get_xsave(CPUState *env) |
888 |
{ |
889 |
#ifdef KVM_CAP_XSAVE
|
890 |
struct kvm_xsave* xsave;
|
891 |
int ret, i;
|
892 |
uint16_t cwd, swd, twd, fop; |
893 |
|
894 |
if (!kvm_has_xsave()) {
|
895 |
return kvm_get_fpu(env);
|
896 |
} |
897 |
|
898 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
899 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); |
900 |
if (ret < 0) { |
901 |
qemu_free(xsave); |
902 |
return ret;
|
903 |
} |
904 |
|
905 |
cwd = (uint16_t)xsave->region[0];
|
906 |
swd = (uint16_t)(xsave->region[0] >> 16); |
907 |
twd = (uint16_t)xsave->region[1];
|
908 |
fop = (uint16_t)(xsave->region[1] >> 16); |
909 |
env->fpstt = (swd >> 11) & 7; |
910 |
env->fpus = swd; |
911 |
env->fpuc = cwd; |
912 |
for (i = 0; i < 8; ++i) { |
913 |
env->fptags[i] = !((twd >> i) & 1);
|
914 |
} |
915 |
env->mxcsr = xsave->region[XSAVE_MXCSR]; |
916 |
memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], |
917 |
sizeof env->fpregs);
|
918 |
memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], |
919 |
sizeof env->xmm_regs);
|
920 |
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; |
921 |
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], |
922 |
sizeof env->ymmh_regs);
|
923 |
qemu_free(xsave); |
924 |
return 0; |
925 |
#else
|
926 |
return kvm_get_fpu(env);
|
927 |
#endif
|
928 |
} |
929 |
|
930 |
static int kvm_get_xcrs(CPUState *env) |
931 |
{ |
932 |
#ifdef KVM_CAP_XCRS
|
933 |
int i, ret;
|
934 |
struct kvm_xcrs xcrs;
|
935 |
|
936 |
if (!kvm_has_xcrs()) {
|
937 |
return 0; |
938 |
} |
939 |
|
940 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); |
941 |
if (ret < 0) { |
942 |
return ret;
|
943 |
} |
944 |
|
945 |
for (i = 0; i < xcrs.nr_xcrs; i++) { |
946 |
/* Only support xcr0 now */
|
947 |
if (xcrs.xcrs[0].xcr == 0) { |
948 |
env->xcr0 = xcrs.xcrs[0].value;
|
949 |
break;
|
950 |
} |
951 |
} |
952 |
return 0; |
953 |
#else
|
954 |
return 0; |
955 |
#endif
|
956 |
} |
957 |
|
958 |
static int kvm_get_sregs(CPUState *env) |
959 |
{ |
960 |
struct kvm_sregs sregs;
|
961 |
uint32_t hflags; |
962 |
int bit, i, ret;
|
963 |
|
964 |
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
965 |
if (ret < 0) { |
966 |
return ret;
|
967 |
} |
968 |
|
969 |
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
970 |
to find it and save its number instead (-1 for none). */
|
971 |
env->interrupt_injected = -1;
|
972 |
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { |
973 |
if (sregs.interrupt_bitmap[i]) {
|
974 |
bit = ctz64(sregs.interrupt_bitmap[i]); |
975 |
env->interrupt_injected = i * 64 + bit;
|
976 |
break;
|
977 |
} |
978 |
} |
979 |
|
980 |
get_seg(&env->segs[R_CS], &sregs.cs); |
981 |
get_seg(&env->segs[R_DS], &sregs.ds); |
982 |
get_seg(&env->segs[R_ES], &sregs.es); |
983 |
get_seg(&env->segs[R_FS], &sregs.fs); |
984 |
get_seg(&env->segs[R_GS], &sregs.gs); |
985 |
get_seg(&env->segs[R_SS], &sregs.ss); |
986 |
|
987 |
get_seg(&env->tr, &sregs.tr); |
988 |
get_seg(&env->ldt, &sregs.ldt); |
989 |
|
990 |
env->idt.limit = sregs.idt.limit; |
991 |
env->idt.base = sregs.idt.base; |
992 |
env->gdt.limit = sregs.gdt.limit; |
993 |
env->gdt.base = sregs.gdt.base; |
994 |
|
995 |
env->cr[0] = sregs.cr0;
|
996 |
env->cr[2] = sregs.cr2;
|
997 |
env->cr[3] = sregs.cr3;
|
998 |
env->cr[4] = sregs.cr4;
|
999 |
|
1000 |
cpu_set_apic_base(env->apic_state, sregs.apic_base); |
1001 |
|
1002 |
env->efer = sregs.efer; |
1003 |
//cpu_set_apic_tpr(env->apic_state, sregs.cr8);
|
1004 |
|
1005 |
#define HFLAG_COPY_MASK \
|
1006 |
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
1007 |
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
1008 |
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
1009 |
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
1010 |
|
1011 |
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
1012 |
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
1013 |
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
1014 |
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
1015 |
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
1016 |
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
1017 |
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); |
1018 |
|
1019 |
if (env->efer & MSR_EFER_LMA) {
|
1020 |
hflags |= HF_LMA_MASK; |
1021 |
} |
1022 |
|
1023 |
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
1024 |
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
1025 |
} else {
|
1026 |
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
1027 |
(DESC_B_SHIFT - HF_CS32_SHIFT); |
1028 |
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
1029 |
(DESC_B_SHIFT - HF_SS32_SHIFT); |
1030 |
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || |
1031 |
!(hflags & HF_CS32_MASK)) { |
1032 |
hflags |= HF_ADDSEG_MASK; |
1033 |
} else {
|
1034 |
hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | |
1035 |
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
|
1036 |
} |
1037 |
} |
1038 |
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; |
1039 |
|
1040 |
return 0; |
1041 |
} |
1042 |
|
1043 |
static int kvm_get_msrs(CPUState *env) |
1044 |
{ |
1045 |
struct {
|
1046 |
struct kvm_msrs info;
|
1047 |
struct kvm_msr_entry entries[100]; |
1048 |
} msr_data; |
1049 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
1050 |
int ret, i, n;
|
1051 |
|
1052 |
n = 0;
|
1053 |
msrs[n++].index = MSR_IA32_SYSENTER_CS; |
1054 |
msrs[n++].index = MSR_IA32_SYSENTER_ESP; |
1055 |
msrs[n++].index = MSR_IA32_SYSENTER_EIP; |
1056 |
if (has_msr_star) {
|
1057 |
msrs[n++].index = MSR_STAR; |
1058 |
} |
1059 |
if (has_msr_hsave_pa) {
|
1060 |
msrs[n++].index = MSR_VM_HSAVE_PA; |
1061 |
} |
1062 |
msrs[n++].index = MSR_IA32_TSC; |
1063 |
#ifdef TARGET_X86_64
|
1064 |
if (lm_capable_kernel) {
|
1065 |
msrs[n++].index = MSR_CSTAR; |
1066 |
msrs[n++].index = MSR_KERNELGSBASE; |
1067 |
msrs[n++].index = MSR_FMASK; |
1068 |
msrs[n++].index = MSR_LSTAR; |
1069 |
} |
1070 |
#endif
|
1071 |
msrs[n++].index = MSR_KVM_SYSTEM_TIME; |
1072 |
msrs[n++].index = MSR_KVM_WALL_CLOCK; |
1073 |
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
1074 |
if (has_msr_async_pf_en) {
|
1075 |
msrs[n++].index = MSR_KVM_ASYNC_PF_EN; |
1076 |
} |
1077 |
#endif
|
1078 |
|
1079 |
#ifdef KVM_CAP_MCE
|
1080 |
if (env->mcg_cap) {
|
1081 |
msrs[n++].index = MSR_MCG_STATUS; |
1082 |
msrs[n++].index = MSR_MCG_CTL; |
1083 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
1084 |
msrs[n++].index = MSR_MC0_CTL + i; |
1085 |
} |
1086 |
} |
1087 |
#endif
|
1088 |
|
1089 |
msr_data.info.nmsrs = n; |
1090 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); |
1091 |
if (ret < 0) { |
1092 |
return ret;
|
1093 |
} |
1094 |
|
1095 |
for (i = 0; i < ret; i++) { |
1096 |
switch (msrs[i].index) {
|
1097 |
case MSR_IA32_SYSENTER_CS:
|
1098 |
env->sysenter_cs = msrs[i].data; |
1099 |
break;
|
1100 |
case MSR_IA32_SYSENTER_ESP:
|
1101 |
env->sysenter_esp = msrs[i].data; |
1102 |
break;
|
1103 |
case MSR_IA32_SYSENTER_EIP:
|
1104 |
env->sysenter_eip = msrs[i].data; |
1105 |
break;
|
1106 |
case MSR_STAR:
|
1107 |
env->star = msrs[i].data; |
1108 |
break;
|
1109 |
#ifdef TARGET_X86_64
|
1110 |
case MSR_CSTAR:
|
1111 |
env->cstar = msrs[i].data; |
1112 |
break;
|
1113 |
case MSR_KERNELGSBASE:
|
1114 |
env->kernelgsbase = msrs[i].data; |
1115 |
break;
|
1116 |
case MSR_FMASK:
|
1117 |
env->fmask = msrs[i].data; |
1118 |
break;
|
1119 |
case MSR_LSTAR:
|
1120 |
env->lstar = msrs[i].data; |
1121 |
break;
|
1122 |
#endif
|
1123 |
case MSR_IA32_TSC:
|
1124 |
env->tsc = msrs[i].data; |
1125 |
break;
|
1126 |
case MSR_VM_HSAVE_PA:
|
1127 |
env->vm_hsave = msrs[i].data; |
1128 |
break;
|
1129 |
case MSR_KVM_SYSTEM_TIME:
|
1130 |
env->system_time_msr = msrs[i].data; |
1131 |
break;
|
1132 |
case MSR_KVM_WALL_CLOCK:
|
1133 |
env->wall_clock_msr = msrs[i].data; |
1134 |
break;
|
1135 |
#ifdef KVM_CAP_MCE
|
1136 |
case MSR_MCG_STATUS:
|
1137 |
env->mcg_status = msrs[i].data; |
1138 |
break;
|
1139 |
case MSR_MCG_CTL:
|
1140 |
env->mcg_ctl = msrs[i].data; |
1141 |
break;
|
1142 |
#endif
|
1143 |
default:
|
1144 |
#ifdef KVM_CAP_MCE
|
1145 |
if (msrs[i].index >= MSR_MC0_CTL &&
|
1146 |
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { |
1147 |
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; |
1148 |
} |
1149 |
#endif
|
1150 |
break;
|
1151 |
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
1152 |
case MSR_KVM_ASYNC_PF_EN:
|
1153 |
env->async_pf_en_msr = msrs[i].data; |
1154 |
break;
|
1155 |
#endif
|
1156 |
} |
1157 |
} |
1158 |
|
1159 |
return 0; |
1160 |
} |
1161 |
|
1162 |
static int kvm_put_mp_state(CPUState *env) |
1163 |
{ |
1164 |
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
|
1165 |
|
1166 |
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
|
1167 |
} |
1168 |
|
1169 |
static int kvm_get_mp_state(CPUState *env) |
1170 |
{ |
1171 |
struct kvm_mp_state mp_state;
|
1172 |
int ret;
|
1173 |
|
1174 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state); |
1175 |
if (ret < 0) { |
1176 |
return ret;
|
1177 |
} |
1178 |
env->mp_state = mp_state.mp_state; |
1179 |
if (kvm_irqchip_in_kernel()) {
|
1180 |
env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); |
1181 |
} |
1182 |
return 0; |
1183 |
} |
1184 |
|
1185 |
static int kvm_put_vcpu_events(CPUState *env, int level) |
1186 |
{ |
1187 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1188 |
struct kvm_vcpu_events events;
|
1189 |
|
1190 |
if (!kvm_has_vcpu_events()) {
|
1191 |
return 0; |
1192 |
} |
1193 |
|
1194 |
events.exception.injected = (env->exception_injected >= 0);
|
1195 |
events.exception.nr = env->exception_injected; |
1196 |
events.exception.has_error_code = env->has_error_code; |
1197 |
events.exception.error_code = env->error_code; |
1198 |
|
1199 |
events.interrupt.injected = (env->interrupt_injected >= 0);
|
1200 |
events.interrupt.nr = env->interrupt_injected; |
1201 |
events.interrupt.soft = env->soft_interrupt; |
1202 |
|
1203 |
events.nmi.injected = env->nmi_injected; |
1204 |
events.nmi.pending = env->nmi_pending; |
1205 |
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); |
1206 |
|
1207 |
events.sipi_vector = env->sipi_vector; |
1208 |
|
1209 |
events.flags = 0;
|
1210 |
if (level >= KVM_PUT_RESET_STATE) {
|
1211 |
events.flags |= |
1212 |
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; |
1213 |
} |
1214 |
|
1215 |
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
1216 |
#else
|
1217 |
return 0; |
1218 |
#endif
|
1219 |
} |
1220 |
|
1221 |
static int kvm_get_vcpu_events(CPUState *env) |
1222 |
{ |
1223 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1224 |
struct kvm_vcpu_events events;
|
1225 |
int ret;
|
1226 |
|
1227 |
if (!kvm_has_vcpu_events()) {
|
1228 |
return 0; |
1229 |
} |
1230 |
|
1231 |
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events); |
1232 |
if (ret < 0) { |
1233 |
return ret;
|
1234 |
} |
1235 |
env->exception_injected = |
1236 |
events.exception.injected ? events.exception.nr : -1;
|
1237 |
env->has_error_code = events.exception.has_error_code; |
1238 |
env->error_code = events.exception.error_code; |
1239 |
|
1240 |
env->interrupt_injected = |
1241 |
events.interrupt.injected ? events.interrupt.nr : -1;
|
1242 |
env->soft_interrupt = events.interrupt.soft; |
1243 |
|
1244 |
env->nmi_injected = events.nmi.injected; |
1245 |
env->nmi_pending = events.nmi.pending; |
1246 |
if (events.nmi.masked) {
|
1247 |
env->hflags2 |= HF2_NMI_MASK; |
1248 |
} else {
|
1249 |
env->hflags2 &= ~HF2_NMI_MASK; |
1250 |
} |
1251 |
|
1252 |
env->sipi_vector = events.sipi_vector; |
1253 |
#endif
|
1254 |
|
1255 |
return 0; |
1256 |
} |
1257 |
|
1258 |
static int kvm_guest_debug_workarounds(CPUState *env) |
1259 |
{ |
1260 |
int ret = 0; |
1261 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1262 |
unsigned long reinject_trap = 0; |
1263 |
|
1264 |
if (!kvm_has_vcpu_events()) {
|
1265 |
if (env->exception_injected == 1) { |
1266 |
reinject_trap = KVM_GUESTDBG_INJECT_DB; |
1267 |
} else if (env->exception_injected == 3) { |
1268 |
reinject_trap = KVM_GUESTDBG_INJECT_BP; |
1269 |
} |
1270 |
env->exception_injected = -1;
|
1271 |
} |
1272 |
|
1273 |
/*
|
1274 |
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
1275 |
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
1276 |
* by updating the debug state once again if single-stepping is on.
|
1277 |
* Another reason to call kvm_update_guest_debug here is a pending debug
|
1278 |
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
1279 |
* reinject them via SET_GUEST_DEBUG.
|
1280 |
*/
|
1281 |
if (reinject_trap ||
|
1282 |
(!kvm_has_robust_singlestep() && env->singlestep_enabled)) { |
1283 |
ret = kvm_update_guest_debug(env, reinject_trap); |
1284 |
} |
1285 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
1286 |
return ret;
|
1287 |
} |
1288 |
|
1289 |
static int kvm_put_debugregs(CPUState *env) |
1290 |
{ |
1291 |
#ifdef KVM_CAP_DEBUGREGS
|
1292 |
struct kvm_debugregs dbgregs;
|
1293 |
int i;
|
1294 |
|
1295 |
if (!kvm_has_debugregs()) {
|
1296 |
return 0; |
1297 |
} |
1298 |
|
1299 |
for (i = 0; i < 4; i++) { |
1300 |
dbgregs.db[i] = env->dr[i]; |
1301 |
} |
1302 |
dbgregs.dr6 = env->dr[6];
|
1303 |
dbgregs.dr7 = env->dr[7];
|
1304 |
dbgregs.flags = 0;
|
1305 |
|
1306 |
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
|
1307 |
#else
|
1308 |
return 0; |
1309 |
#endif
|
1310 |
} |
1311 |
|
1312 |
static int kvm_get_debugregs(CPUState *env) |
1313 |
{ |
1314 |
#ifdef KVM_CAP_DEBUGREGS
|
1315 |
struct kvm_debugregs dbgregs;
|
1316 |
int i, ret;
|
1317 |
|
1318 |
if (!kvm_has_debugregs()) {
|
1319 |
return 0; |
1320 |
} |
1321 |
|
1322 |
ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs); |
1323 |
if (ret < 0) { |
1324 |
return ret;
|
1325 |
} |
1326 |
for (i = 0; i < 4; i++) { |
1327 |
env->dr[i] = dbgregs.db[i]; |
1328 |
} |
1329 |
env->dr[4] = env->dr[6] = dbgregs.dr6; |
1330 |
env->dr[5] = env->dr[7] = dbgregs.dr7; |
1331 |
#endif
|
1332 |
|
1333 |
return 0; |
1334 |
} |
1335 |
|
1336 |
int kvm_arch_put_registers(CPUState *env, int level) |
1337 |
{ |
1338 |
int ret;
|
1339 |
|
1340 |
assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1341 |
|
1342 |
ret = kvm_getput_regs(env, 1);
|
1343 |
if (ret < 0) { |
1344 |
return ret;
|
1345 |
} |
1346 |
ret = kvm_put_xsave(env); |
1347 |
if (ret < 0) { |
1348 |
return ret;
|
1349 |
} |
1350 |
ret = kvm_put_xcrs(env); |
1351 |
if (ret < 0) { |
1352 |
return ret;
|
1353 |
} |
1354 |
ret = kvm_put_sregs(env); |
1355 |
if (ret < 0) { |
1356 |
return ret;
|
1357 |
} |
1358 |
ret = kvm_put_msrs(env, level); |
1359 |
if (ret < 0) { |
1360 |
return ret;
|
1361 |
} |
1362 |
if (level >= KVM_PUT_RESET_STATE) {
|
1363 |
ret = kvm_put_mp_state(env); |
1364 |
if (ret < 0) { |
1365 |
return ret;
|
1366 |
} |
1367 |
} |
1368 |
ret = kvm_put_vcpu_events(env, level); |
1369 |
if (ret < 0) { |
1370 |
return ret;
|
1371 |
} |
1372 |
ret = kvm_put_debugregs(env); |
1373 |
if (ret < 0) { |
1374 |
return ret;
|
1375 |
} |
1376 |
/* must be last */
|
1377 |
ret = kvm_guest_debug_workarounds(env); |
1378 |
if (ret < 0) { |
1379 |
return ret;
|
1380 |
} |
1381 |
return 0; |
1382 |
} |
1383 |
|
1384 |
int kvm_arch_get_registers(CPUState *env)
|
1385 |
{ |
1386 |
int ret;
|
1387 |
|
1388 |
assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1389 |
|
1390 |
ret = kvm_getput_regs(env, 0);
|
1391 |
if (ret < 0) { |
1392 |
return ret;
|
1393 |
} |
1394 |
ret = kvm_get_xsave(env); |
1395 |
if (ret < 0) { |
1396 |
return ret;
|
1397 |
} |
1398 |
ret = kvm_get_xcrs(env); |
1399 |
if (ret < 0) { |
1400 |
return ret;
|
1401 |
} |
1402 |
ret = kvm_get_sregs(env); |
1403 |
if (ret < 0) { |
1404 |
return ret;
|
1405 |
} |
1406 |
ret = kvm_get_msrs(env); |
1407 |
if (ret < 0) { |
1408 |
return ret;
|
1409 |
} |
1410 |
ret = kvm_get_mp_state(env); |
1411 |
if (ret < 0) { |
1412 |
return ret;
|
1413 |
} |
1414 |
ret = kvm_get_vcpu_events(env); |
1415 |
if (ret < 0) { |
1416 |
return ret;
|
1417 |
} |
1418 |
ret = kvm_get_debugregs(env); |
1419 |
if (ret < 0) { |
1420 |
return ret;
|
1421 |
} |
1422 |
return 0; |
1423 |
} |
1424 |
|
1425 |
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
1426 |
{ |
1427 |
/* Inject NMI */
|
1428 |
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
|
1429 |
env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
1430 |
DPRINTF("injected NMI\n");
|
1431 |
kvm_vcpu_ioctl(env, KVM_NMI); |
1432 |
} |
1433 |
|
1434 |
/* Try to inject an interrupt if the guest can accept it */
|
1435 |
if (run->ready_for_interrupt_injection &&
|
1436 |
(env->interrupt_request & CPU_INTERRUPT_HARD) && |
1437 |
(env->eflags & IF_MASK)) { |
1438 |
int irq;
|
1439 |
|
1440 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
1441 |
irq = cpu_get_pic_interrupt(env); |
1442 |
if (irq >= 0) { |
1443 |
struct kvm_interrupt intr;
|
1444 |
intr.irq = irq; |
1445 |
/* FIXME: errors */
|
1446 |
DPRINTF("injected interrupt %d\n", irq);
|
1447 |
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
1448 |
} |
1449 |
} |
1450 |
|
1451 |
/* If we have an interrupt but the guest is not ready to receive an
|
1452 |
* interrupt, request an interrupt window exit. This will
|
1453 |
* cause a return to userspace as soon as the guest is ready to
|
1454 |
* receive interrupts. */
|
1455 |
if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
|
1456 |
run->request_interrupt_window = 1;
|
1457 |
} else {
|
1458 |
run->request_interrupt_window = 0;
|
1459 |
} |
1460 |
|
1461 |
DPRINTF("setting tpr\n");
|
1462 |
run->cr8 = cpu_get_apic_tpr(env->apic_state); |
1463 |
|
1464 |
return 0; |
1465 |
} |
1466 |
|
1467 |
int kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
1468 |
{ |
1469 |
if (run->if_flag) {
|
1470 |
env->eflags |= IF_MASK; |
1471 |
} else {
|
1472 |
env->eflags &= ~IF_MASK; |
1473 |
} |
1474 |
cpu_set_apic_tpr(env->apic_state, run->cr8); |
1475 |
cpu_set_apic_base(env->apic_state, run->apic_base); |
1476 |
|
1477 |
return 0; |
1478 |
} |
1479 |
|
1480 |
int kvm_arch_process_irqchip_events(CPUState *env)
|
1481 |
{ |
1482 |
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
1483 |
kvm_cpu_synchronize_state(env); |
1484 |
do_cpu_init(env); |
1485 |
env->exception_index = EXCP_HALTED; |
1486 |
} |
1487 |
|
1488 |
if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
|
1489 |
kvm_cpu_synchronize_state(env); |
1490 |
do_cpu_sipi(env); |
1491 |
} |
1492 |
|
1493 |
return env->halted;
|
1494 |
} |
1495 |
|
1496 |
static int kvm_handle_halt(CPUState *env) |
1497 |
{ |
1498 |
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
1499 |
(env->eflags & IF_MASK)) && |
1500 |
!(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
1501 |
env->halted = 1;
|
1502 |
env->exception_index = EXCP_HLT; |
1503 |
return 0; |
1504 |
} |
1505 |
|
1506 |
return 1; |
1507 |
} |
1508 |
|
1509 |
static bool host_supports_vmx(void) |
1510 |
{ |
1511 |
uint32_t ecx, unused; |
1512 |
|
1513 |
host_cpuid(1, 0, &unused, &unused, &ecx, &unused); |
1514 |
return ecx & CPUID_EXT_VMX;
|
1515 |
} |
1516 |
|
1517 |
#define VMX_INVALID_GUEST_STATE 0x80000021 |
1518 |
|
1519 |
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
1520 |
{ |
1521 |
uint64_t code; |
1522 |
int ret = 0; |
1523 |
|
1524 |
switch (run->exit_reason) {
|
1525 |
case KVM_EXIT_HLT:
|
1526 |
DPRINTF("handle_hlt\n");
|
1527 |
ret = kvm_handle_halt(env); |
1528 |
break;
|
1529 |
case KVM_EXIT_SET_TPR:
|
1530 |
ret = 1;
|
1531 |
break;
|
1532 |
case KVM_EXIT_FAIL_ENTRY:
|
1533 |
code = run->fail_entry.hardware_entry_failure_reason; |
1534 |
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", |
1535 |
code); |
1536 |
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
|
1537 |
fprintf(stderr, |
1538 |
"\nIf you're runnning a guest on an Intel machine without "
|
1539 |
"unrestricted mode\n"
|
1540 |
"support, the failure can be most likely due to the guest "
|
1541 |
"entering an invalid\n"
|
1542 |
"state for Intel VT. For example, the guest maybe running "
|
1543 |
"in big real mode\n"
|
1544 |
"which is not supported on less recent Intel processors."
|
1545 |
"\n\n");
|
1546 |
} |
1547 |
ret = -1;
|
1548 |
break;
|
1549 |
case KVM_EXIT_EXCEPTION:
|
1550 |
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
|
1551 |
run->ex.exception, run->ex.error_code); |
1552 |
ret = -1;
|
1553 |
break;
|
1554 |
default:
|
1555 |
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
1556 |
ret = -1;
|
1557 |
break;
|
1558 |
} |
1559 |
|
1560 |
return ret;
|
1561 |
} |
1562 |
|
1563 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1564 |
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1565 |
{ |
1566 |
static const uint8_t int3 = 0xcc; |
1567 |
|
1568 |
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
1569 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) { |
1570 |
return -EINVAL;
|
1571 |
} |
1572 |
return 0; |
1573 |
} |
1574 |
|
1575 |
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1576 |
{ |
1577 |
uint8_t int3; |
1578 |
|
1579 |
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || |
1580 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { |
1581 |
return -EINVAL;
|
1582 |
} |
1583 |
return 0; |
1584 |
} |
1585 |
|
1586 |
static struct { |
1587 |
target_ulong addr; |
1588 |
int len;
|
1589 |
int type;
|
1590 |
} hw_breakpoint[4];
|
1591 |
|
1592 |
static int nb_hw_breakpoint; |
1593 |
|
1594 |
static int find_hw_breakpoint(target_ulong addr, int len, int type) |
1595 |
{ |
1596 |
int n;
|
1597 |
|
1598 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
1599 |
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
1600 |
(hw_breakpoint[n].len == len || len == -1)) {
|
1601 |
return n;
|
1602 |
} |
1603 |
} |
1604 |
return -1; |
1605 |
} |
1606 |
|
1607 |
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
1608 |
target_ulong len, int type)
|
1609 |
{ |
1610 |
switch (type) {
|
1611 |
case GDB_BREAKPOINT_HW:
|
1612 |
len = 1;
|
1613 |
break;
|
1614 |
case GDB_WATCHPOINT_WRITE:
|
1615 |
case GDB_WATCHPOINT_ACCESS:
|
1616 |
switch (len) {
|
1617 |
case 1: |
1618 |
break;
|
1619 |
case 2: |
1620 |
case 4: |
1621 |
case 8: |
1622 |
if (addr & (len - 1)) { |
1623 |
return -EINVAL;
|
1624 |
} |
1625 |
break;
|
1626 |
default:
|
1627 |
return -EINVAL;
|
1628 |
} |
1629 |
break;
|
1630 |
default:
|
1631 |
return -ENOSYS;
|
1632 |
} |
1633 |
|
1634 |
if (nb_hw_breakpoint == 4) { |
1635 |
return -ENOBUFS;
|
1636 |
} |
1637 |
if (find_hw_breakpoint(addr, len, type) >= 0) { |
1638 |
return -EEXIST;
|
1639 |
} |
1640 |
hw_breakpoint[nb_hw_breakpoint].addr = addr; |
1641 |
hw_breakpoint[nb_hw_breakpoint].len = len; |
1642 |
hw_breakpoint[nb_hw_breakpoint].type = type; |
1643 |
nb_hw_breakpoint++; |
1644 |
|
1645 |
return 0; |
1646 |
} |
1647 |
|
1648 |
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
1649 |
target_ulong len, int type)
|
1650 |
{ |
1651 |
int n;
|
1652 |
|
1653 |
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
1654 |
if (n < 0) { |
1655 |
return -ENOENT;
|
1656 |
} |
1657 |
nb_hw_breakpoint--; |
1658 |
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; |
1659 |
|
1660 |
return 0; |
1661 |
} |
1662 |
|
1663 |
void kvm_arch_remove_all_hw_breakpoints(void) |
1664 |
{ |
1665 |
nb_hw_breakpoint = 0;
|
1666 |
} |
1667 |
|
1668 |
static CPUWatchpoint hw_watchpoint;
|
1669 |
|
1670 |
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) |
1671 |
{ |
1672 |
int handle = 0; |
1673 |
int n;
|
1674 |
|
1675 |
if (arch_info->exception == 1) { |
1676 |
if (arch_info->dr6 & (1 << 14)) { |
1677 |
if (cpu_single_env->singlestep_enabled) {
|
1678 |
handle = 1;
|
1679 |
} |
1680 |
} else {
|
1681 |
for (n = 0; n < 4; n++) { |
1682 |
if (arch_info->dr6 & (1 << n)) { |
1683 |
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { |
1684 |
case 0x0: |
1685 |
handle = 1;
|
1686 |
break;
|
1687 |
case 0x1: |
1688 |
handle = 1;
|
1689 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1690 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1691 |
hw_watchpoint.flags = BP_MEM_WRITE; |
1692 |
break;
|
1693 |
case 0x3: |
1694 |
handle = 1;
|
1695 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1696 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1697 |
hw_watchpoint.flags = BP_MEM_ACCESS; |
1698 |
break;
|
1699 |
} |
1700 |
} |
1701 |
} |
1702 |
} |
1703 |
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) { |
1704 |
handle = 1;
|
1705 |
} |
1706 |
if (!handle) {
|
1707 |
cpu_synchronize_state(cpu_single_env); |
1708 |
assert(cpu_single_env->exception_injected == -1);
|
1709 |
|
1710 |
cpu_single_env->exception_injected = arch_info->exception; |
1711 |
cpu_single_env->has_error_code = 0;
|
1712 |
} |
1713 |
|
1714 |
return handle;
|
1715 |
} |
1716 |
|
1717 |
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) |
1718 |
{ |
1719 |
const uint8_t type_code[] = {
|
1720 |
[GDB_BREAKPOINT_HW] = 0x0,
|
1721 |
[GDB_WATCHPOINT_WRITE] = 0x1,
|
1722 |
[GDB_WATCHPOINT_ACCESS] = 0x3
|
1723 |
}; |
1724 |
const uint8_t len_code[] = {
|
1725 |
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 |
1726 |
}; |
1727 |
int n;
|
1728 |
|
1729 |
if (kvm_sw_breakpoints_active(env)) {
|
1730 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; |
1731 |
} |
1732 |
if (nb_hw_breakpoint > 0) { |
1733 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; |
1734 |
dbg->arch.debugreg[7] = 0x0600; |
1735 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
1736 |
dbg->arch.debugreg[n] = hw_breakpoint[n].addr; |
1737 |
dbg->arch.debugreg[7] |= (2 << (n * 2)) | |
1738 |
(type_code[hw_breakpoint[n].type] << (16 + n*4)) | |
1739 |
((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); |
1740 |
} |
1741 |
} |
1742 |
} |
1743 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
1744 |
|
1745 |
bool kvm_arch_stop_on_emulation_error(CPUState *env)
|
1746 |
{ |
1747 |
return !(env->cr[0] & CR0_PE_MASK) || |
1748 |
((env->segs[R_CS].selector & 3) != 3); |
1749 |
} |
1750 |
|
1751 |
static void hardware_memory_error(void) |
1752 |
{ |
1753 |
fprintf(stderr, "Hardware memory error!\n");
|
1754 |
exit(1);
|
1755 |
} |
1756 |
|
1757 |
#ifdef KVM_CAP_MCE
|
1758 |
static void kvm_mce_broadcast_rest(CPUState *env) |
1759 |
{ |
1760 |
struct kvm_x86_mce mce = {
|
1761 |
.bank = 1,
|
1762 |
.status = MCI_STATUS_VAL | MCI_STATUS_UC, |
1763 |
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV, |
1764 |
.addr = 0,
|
1765 |
.misc = 0,
|
1766 |
}; |
1767 |
CPUState *cenv; |
1768 |
|
1769 |
/* Broadcast MCA signal for processor version 06H_EH and above */
|
1770 |
if (cpu_x86_support_mca_broadcast(env)) {
|
1771 |
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { |
1772 |
if (cenv == env) {
|
1773 |
continue;
|
1774 |
} |
1775 |
kvm_inject_x86_mce_on(cenv, &mce, ABORT_ON_ERROR); |
1776 |
} |
1777 |
} |
1778 |
} |
1779 |
|
1780 |
static void kvm_mce_inj_srar_dataload(CPUState *env, target_phys_addr_t paddr) |
1781 |
{ |
1782 |
struct kvm_x86_mce mce = {
|
1783 |
.bank = 9,
|
1784 |
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1785 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1786 |
| MCI_STATUS_AR | 0x134,
|
1787 |
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV, |
1788 |
.addr = paddr, |
1789 |
.misc = (MCM_ADDR_PHYS << 6) | 0xc, |
1790 |
}; |
1791 |
int r;
|
1792 |
|
1793 |
r = kvm_set_mce(env, &mce); |
1794 |
if (r < 0) { |
1795 |
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
|
1796 |
abort(); |
1797 |
} |
1798 |
kvm_mce_broadcast_rest(env); |
1799 |
} |
1800 |
|
1801 |
static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr) |
1802 |
{ |
1803 |
struct kvm_x86_mce mce = {
|
1804 |
.bank = 9,
|
1805 |
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1806 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1807 |
| 0xc0,
|
1808 |
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV, |
1809 |
.addr = paddr, |
1810 |
.misc = (MCM_ADDR_PHYS << 6) | 0xc, |
1811 |
}; |
1812 |
int r;
|
1813 |
|
1814 |
r = kvm_set_mce(env, &mce); |
1815 |
if (r < 0) { |
1816 |
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
|
1817 |
abort(); |
1818 |
} |
1819 |
kvm_mce_broadcast_rest(env); |
1820 |
} |
1821 |
|
1822 |
static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr) |
1823 |
{ |
1824 |
struct kvm_x86_mce mce = {
|
1825 |
.bank = 9,
|
1826 |
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1827 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1828 |
| 0xc0,
|
1829 |
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV, |
1830 |
.addr = paddr, |
1831 |
.misc = (MCM_ADDR_PHYS << 6) | 0xc, |
1832 |
}; |
1833 |
|
1834 |
kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR); |
1835 |
kvm_mce_broadcast_rest(env); |
1836 |
} |
1837 |
|
1838 |
#endif
|
1839 |
|
1840 |
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) |
1841 |
{ |
1842 |
#if defined(KVM_CAP_MCE)
|
1843 |
void *vaddr;
|
1844 |
ram_addr_t ram_addr; |
1845 |
target_phys_addr_t paddr; |
1846 |
|
1847 |
if ((env->mcg_cap & MCG_SER_P) && addr
|
1848 |
&& (code == BUS_MCEERR_AR |
1849 |
|| code == BUS_MCEERR_AO)) { |
1850 |
vaddr = (void *)addr;
|
1851 |
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
|
1852 |
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) { |
1853 |
fprintf(stderr, "Hardware memory error for memory used by "
|
1854 |
"QEMU itself instead of guest system!\n");
|
1855 |
/* Hope we are lucky for AO MCE */
|
1856 |
if (code == BUS_MCEERR_AO) {
|
1857 |
return 0; |
1858 |
} else {
|
1859 |
hardware_memory_error(); |
1860 |
} |
1861 |
} |
1862 |
|
1863 |
if (code == BUS_MCEERR_AR) {
|
1864 |
/* Fake an Intel architectural Data Load SRAR UCR */
|
1865 |
kvm_mce_inj_srar_dataload(env, paddr); |
1866 |
} else {
|
1867 |
/*
|
1868 |
* If there is an MCE excpetion being processed, ignore
|
1869 |
* this SRAO MCE
|
1870 |
*/
|
1871 |
if (!kvm_mce_in_progress(env)) {
|
1872 |
/* Fake an Intel architectural Memory scrubbing UCR */
|
1873 |
kvm_mce_inj_srao_memscrub(env, paddr); |
1874 |
} |
1875 |
} |
1876 |
} else
|
1877 |
#endif
|
1878 |
{ |
1879 |
if (code == BUS_MCEERR_AO) {
|
1880 |
return 0; |
1881 |
} else if (code == BUS_MCEERR_AR) { |
1882 |
hardware_memory_error(); |
1883 |
} else {
|
1884 |
return 1; |
1885 |
} |
1886 |
} |
1887 |
return 0; |
1888 |
} |
1889 |
|
1890 |
int kvm_on_sigbus(int code, void *addr) |
1891 |
{ |
1892 |
#if defined(KVM_CAP_MCE)
|
1893 |
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
1894 |
void *vaddr;
|
1895 |
ram_addr_t ram_addr; |
1896 |
target_phys_addr_t paddr; |
1897 |
|
1898 |
/* Hope we are lucky for AO MCE */
|
1899 |
vaddr = addr; |
1900 |
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
|
1901 |
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { |
1902 |
fprintf(stderr, "Hardware memory error for memory used by "
|
1903 |
"QEMU itself instead of guest system!: %p\n", addr);
|
1904 |
return 0; |
1905 |
} |
1906 |
kvm_mce_inj_srao_memscrub2(first_cpu, paddr); |
1907 |
} else
|
1908 |
#endif
|
1909 |
{ |
1910 |
if (code == BUS_MCEERR_AO) {
|
1911 |
return 0; |
1912 |
} else if (code == BUS_MCEERR_AR) { |
1913 |
hardware_memory_error(); |
1914 |
} else {
|
1915 |
return 1; |
1916 |
} |
1917 |
} |
1918 |
return 0; |
1919 |
} |