root / target-i386 / kvm.c @ 0f53994f
History | View | Annotate | Download (47 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright (C) 2006-2008 Qumranet Technologies
|
5 |
* Copyright IBM, Corp. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
11 |
* See the COPYING file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include <sys/types.h> |
16 |
#include <sys/ioctl.h> |
17 |
#include <sys/mman.h> |
18 |
|
19 |
#include <linux/kvm.h> |
20 |
|
21 |
#include "qemu-common.h" |
22 |
#include "sysemu.h" |
23 |
#include "kvm.h" |
24 |
#include "cpu.h" |
25 |
#include "gdbstub.h" |
26 |
#include "host-utils.h" |
27 |
#include "hw/pc.h" |
28 |
#include "hw/apic.h" |
29 |
#include "ioport.h" |
30 |
#include "kvm_x86.h" |
31 |
|
32 |
#ifdef CONFIG_KVM_PARA
|
33 |
#include <linux/kvm_para.h> |
34 |
#endif
|
35 |
//
|
36 |
//#define DEBUG_KVM
|
37 |
|
38 |
#ifdef DEBUG_KVM
|
39 |
#define DPRINTF(fmt, ...) \
|
40 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
41 |
#else
|
42 |
#define DPRINTF(fmt, ...) \
|
43 |
do { } while (0) |
44 |
#endif
|
45 |
|
46 |
#define MSR_KVM_WALL_CLOCK 0x11 |
47 |
#define MSR_KVM_SYSTEM_TIME 0x12 |
48 |
|
49 |
#ifndef BUS_MCEERR_AR
|
50 |
#define BUS_MCEERR_AR 4 |
51 |
#endif
|
52 |
#ifndef BUS_MCEERR_AO
|
53 |
#define BUS_MCEERR_AO 5 |
54 |
#endif
|
55 |
|
56 |
#ifdef KVM_CAP_EXT_CPUID
|
57 |
|
58 |
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) |
59 |
{ |
60 |
struct kvm_cpuid2 *cpuid;
|
61 |
int r, size;
|
62 |
|
63 |
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
64 |
cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
|
65 |
cpuid->nent = max; |
66 |
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); |
67 |
if (r == 0 && cpuid->nent >= max) { |
68 |
r = -E2BIG; |
69 |
} |
70 |
if (r < 0) { |
71 |
if (r == -E2BIG) {
|
72 |
qemu_free(cpuid); |
73 |
return NULL; |
74 |
} else {
|
75 |
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
76 |
strerror(-r)); |
77 |
exit(1);
|
78 |
} |
79 |
} |
80 |
return cpuid;
|
81 |
} |
82 |
|
83 |
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, |
84 |
uint32_t index, int reg)
|
85 |
{ |
86 |
struct kvm_cpuid2 *cpuid;
|
87 |
int i, max;
|
88 |
uint32_t ret = 0;
|
89 |
uint32_t cpuid_1_edx; |
90 |
|
91 |
if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
|
92 |
return -1U; |
93 |
} |
94 |
|
95 |
max = 1;
|
96 |
while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { |
97 |
max *= 2;
|
98 |
} |
99 |
|
100 |
for (i = 0; i < cpuid->nent; ++i) { |
101 |
if (cpuid->entries[i].function == function &&
|
102 |
cpuid->entries[i].index == index) { |
103 |
switch (reg) {
|
104 |
case R_EAX:
|
105 |
ret = cpuid->entries[i].eax; |
106 |
break;
|
107 |
case R_EBX:
|
108 |
ret = cpuid->entries[i].ebx; |
109 |
break;
|
110 |
case R_ECX:
|
111 |
ret = cpuid->entries[i].ecx; |
112 |
break;
|
113 |
case R_EDX:
|
114 |
ret = cpuid->entries[i].edx; |
115 |
switch (function) {
|
116 |
case 1: |
117 |
/* KVM before 2.6.30 misreports the following features */
|
118 |
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; |
119 |
break;
|
120 |
case 0x80000001: |
121 |
/* On Intel, kvm returns cpuid according to the Intel spec,
|
122 |
* so add missing bits according to the AMD spec:
|
123 |
*/
|
124 |
cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
125 |
ret |= cpuid_1_edx & 0x183f7ff;
|
126 |
break;
|
127 |
} |
128 |
break;
|
129 |
} |
130 |
} |
131 |
} |
132 |
|
133 |
qemu_free(cpuid); |
134 |
|
135 |
return ret;
|
136 |
} |
137 |
|
138 |
#else
|
139 |
|
140 |
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, |
141 |
uint32_t index, int reg)
|
142 |
{ |
143 |
return -1U; |
144 |
} |
145 |
|
146 |
#endif
|
147 |
|
148 |
#ifdef CONFIG_KVM_PARA
|
149 |
struct kvm_para_features {
|
150 |
int cap;
|
151 |
int feature;
|
152 |
} para_features[] = { |
153 |
#ifdef KVM_CAP_CLOCKSOURCE
|
154 |
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, |
155 |
#endif
|
156 |
#ifdef KVM_CAP_NOP_IO_DELAY
|
157 |
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, |
158 |
#endif
|
159 |
#ifdef KVM_CAP_PV_MMU
|
160 |
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, |
161 |
#endif
|
162 |
{ -1, -1 } |
163 |
}; |
164 |
|
165 |
static int get_para_features(CPUState *env) |
166 |
{ |
167 |
int i, features = 0; |
168 |
|
169 |
for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) { |
170 |
if (kvm_check_extension(env->kvm_state, para_features[i].cap))
|
171 |
features |= (1 << para_features[i].feature);
|
172 |
} |
173 |
|
174 |
return features;
|
175 |
} |
176 |
#endif
|
177 |
|
178 |
#ifdef KVM_CAP_MCE
|
179 |
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, |
180 |
int *max_banks)
|
181 |
{ |
182 |
int r;
|
183 |
|
184 |
r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_MCE); |
185 |
if (r > 0) { |
186 |
*max_banks = r; |
187 |
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
188 |
} |
189 |
return -ENOSYS;
|
190 |
} |
191 |
|
192 |
static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap) |
193 |
{ |
194 |
return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
|
195 |
} |
196 |
|
197 |
static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m) |
198 |
{ |
199 |
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
|
200 |
} |
201 |
|
202 |
static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n) |
203 |
{ |
204 |
struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); |
205 |
int r;
|
206 |
|
207 |
kmsrs->nmsrs = n; |
208 |
memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
|
209 |
r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs); |
210 |
memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
|
211 |
free(kmsrs); |
212 |
return r;
|
213 |
} |
214 |
|
215 |
/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
|
216 |
static int kvm_mce_in_exception(CPUState *env) |
217 |
{ |
218 |
struct kvm_msr_entry msr_mcg_status = {
|
219 |
.index = MSR_MCG_STATUS, |
220 |
}; |
221 |
int r;
|
222 |
|
223 |
r = kvm_get_msr(env, &msr_mcg_status, 1);
|
224 |
if (r == -1 || r == 0) { |
225 |
return -1; |
226 |
} |
227 |
return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
|
228 |
} |
229 |
|
230 |
struct kvm_x86_mce_data
|
231 |
{ |
232 |
CPUState *env; |
233 |
struct kvm_x86_mce *mce;
|
234 |
int abort_on_error;
|
235 |
}; |
236 |
|
237 |
static void kvm_do_inject_x86_mce(void *_data) |
238 |
{ |
239 |
struct kvm_x86_mce_data *data = _data;
|
240 |
int r;
|
241 |
|
242 |
/* If there is an MCE excpetion being processed, ignore this SRAO MCE */
|
243 |
r = kvm_mce_in_exception(data->env); |
244 |
if (r == -1) |
245 |
fprintf(stderr, "Failed to get MCE status\n");
|
246 |
else if (r && !(data->mce->status & MCI_STATUS_AR)) |
247 |
return;
|
248 |
|
249 |
r = kvm_set_mce(data->env, data->mce); |
250 |
if (r < 0) { |
251 |
perror("kvm_set_mce FAILED");
|
252 |
if (data->abort_on_error) {
|
253 |
abort(); |
254 |
} |
255 |
} |
256 |
} |
257 |
#endif
|
258 |
|
259 |
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, |
260 |
uint64_t mcg_status, uint64_t addr, uint64_t misc, |
261 |
int abort_on_error)
|
262 |
{ |
263 |
#ifdef KVM_CAP_MCE
|
264 |
struct kvm_x86_mce mce = {
|
265 |
.bank = bank, |
266 |
.status = status, |
267 |
.mcg_status = mcg_status, |
268 |
.addr = addr, |
269 |
.misc = misc, |
270 |
}; |
271 |
struct kvm_x86_mce_data data = {
|
272 |
.env = cenv, |
273 |
.mce = &mce, |
274 |
}; |
275 |
|
276 |
if (!cenv->mcg_cap) {
|
277 |
fprintf(stderr, "MCE support is not enabled!\n");
|
278 |
return;
|
279 |
} |
280 |
|
281 |
run_on_cpu(cenv, kvm_do_inject_x86_mce, &data); |
282 |
#else
|
283 |
if (abort_on_error)
|
284 |
abort(); |
285 |
#endif
|
286 |
} |
287 |
|
288 |
int kvm_arch_init_vcpu(CPUState *env)
|
289 |
{ |
290 |
struct {
|
291 |
struct kvm_cpuid2 cpuid;
|
292 |
struct kvm_cpuid_entry2 entries[100]; |
293 |
} __attribute__((packed)) cpuid_data; |
294 |
uint32_t limit, i, j, cpuid_i; |
295 |
uint32_t unused; |
296 |
struct kvm_cpuid_entry2 *c;
|
297 |
#ifdef KVM_CPUID_SIGNATURE
|
298 |
uint32_t signature[3];
|
299 |
#endif
|
300 |
|
301 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
302 |
|
303 |
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
304 |
|
305 |
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR; |
306 |
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX); |
307 |
env->cpuid_ext_features |= i; |
308 |
|
309 |
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
310 |
0, R_EDX);
|
311 |
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
312 |
0, R_ECX);
|
313 |
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
|
314 |
0, R_EDX);
|
315 |
|
316 |
|
317 |
cpuid_i = 0;
|
318 |
|
319 |
#ifdef CONFIG_KVM_PARA
|
320 |
/* Paravirtualization CPUIDs */
|
321 |
memcpy(signature, "KVMKVMKVM\0\0\0", 12); |
322 |
c = &cpuid_data.entries[cpuid_i++]; |
323 |
memset(c, 0, sizeof(*c)); |
324 |
c->function = KVM_CPUID_SIGNATURE; |
325 |
c->eax = 0;
|
326 |
c->ebx = signature[0];
|
327 |
c->ecx = signature[1];
|
328 |
c->edx = signature[2];
|
329 |
|
330 |
c = &cpuid_data.entries[cpuid_i++]; |
331 |
memset(c, 0, sizeof(*c)); |
332 |
c->function = KVM_CPUID_FEATURES; |
333 |
c->eax = env->cpuid_kvm_features & get_para_features(env); |
334 |
#endif
|
335 |
|
336 |
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
337 |
|
338 |
for (i = 0; i <= limit; i++) { |
339 |
c = &cpuid_data.entries[cpuid_i++]; |
340 |
|
341 |
switch (i) {
|
342 |
case 2: { |
343 |
/* Keep reading function 2 till all the input is received */
|
344 |
int times;
|
345 |
|
346 |
c->function = i; |
347 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
348 |
KVM_CPUID_FLAG_STATE_READ_NEXT; |
349 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
350 |
times = c->eax & 0xff;
|
351 |
|
352 |
for (j = 1; j < times; ++j) { |
353 |
c = &cpuid_data.entries[cpuid_i++]; |
354 |
c->function = i; |
355 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
356 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
357 |
} |
358 |
break;
|
359 |
} |
360 |
case 4: |
361 |
case 0xb: |
362 |
case 0xd: |
363 |
for (j = 0; ; j++) { |
364 |
c->function = i; |
365 |
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
366 |
c->index = j; |
367 |
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
368 |
|
369 |
if (i == 4 && c->eax == 0) |
370 |
break;
|
371 |
if (i == 0xb && !(c->ecx & 0xff00)) |
372 |
break;
|
373 |
if (i == 0xd && c->eax == 0) |
374 |
break;
|
375 |
|
376 |
c = &cpuid_data.entries[cpuid_i++]; |
377 |
} |
378 |
break;
|
379 |
default:
|
380 |
c->function = i; |
381 |
c->flags = 0;
|
382 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
383 |
break;
|
384 |
} |
385 |
} |
386 |
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
387 |
|
388 |
for (i = 0x80000000; i <= limit; i++) { |
389 |
c = &cpuid_data.entries[cpuid_i++]; |
390 |
|
391 |
c->function = i; |
392 |
c->flags = 0;
|
393 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
394 |
} |
395 |
|
396 |
cpuid_data.cpuid.nent = cpuid_i; |
397 |
|
398 |
#ifdef KVM_CAP_MCE
|
399 |
if (((env->cpuid_version >> 8)&0xF) >= 6 |
400 |
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA) |
401 |
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
|
402 |
uint64_t mcg_cap; |
403 |
int banks;
|
404 |
|
405 |
if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks))
|
406 |
perror("kvm_get_mce_cap_supported FAILED");
|
407 |
else {
|
408 |
if (banks > MCE_BANKS_DEF)
|
409 |
banks = MCE_BANKS_DEF; |
410 |
mcg_cap &= MCE_CAP_DEF; |
411 |
mcg_cap |= banks; |
412 |
if (kvm_setup_mce(env, &mcg_cap))
|
413 |
perror("kvm_setup_mce FAILED");
|
414 |
else
|
415 |
env->mcg_cap = mcg_cap; |
416 |
} |
417 |
} |
418 |
#endif
|
419 |
|
420 |
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
|
421 |
} |
422 |
|
423 |
void kvm_arch_reset_vcpu(CPUState *env)
|
424 |
{ |
425 |
env->exception_injected = -1;
|
426 |
env->interrupt_injected = -1;
|
427 |
env->nmi_injected = 0;
|
428 |
env->nmi_pending = 0;
|
429 |
if (kvm_irqchip_in_kernel()) {
|
430 |
env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE : |
431 |
KVM_MP_STATE_UNINITIALIZED; |
432 |
} else {
|
433 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
434 |
} |
435 |
} |
436 |
|
437 |
static int kvm_has_msr_star(CPUState *env) |
438 |
{ |
439 |
static int has_msr_star; |
440 |
int ret;
|
441 |
|
442 |
/* first time */
|
443 |
if (has_msr_star == 0) { |
444 |
struct kvm_msr_list msr_list, *kvm_msr_list;
|
445 |
|
446 |
has_msr_star = -1;
|
447 |
|
448 |
/* Obtain MSR list from KVM. These are the MSRs that we must
|
449 |
* save/restore */
|
450 |
msr_list.nmsrs = 0;
|
451 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); |
452 |
if (ret < 0 && ret != -E2BIG) { |
453 |
return 0; |
454 |
} |
455 |
/* Old kernel modules had a bug and could write beyond the provided
|
456 |
memory. Allocate at least a safe amount of 1K. */
|
457 |
kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) + |
458 |
msr_list.nmsrs * |
459 |
sizeof(msr_list.indices[0]))); |
460 |
|
461 |
kvm_msr_list->nmsrs = msr_list.nmsrs; |
462 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
463 |
if (ret >= 0) { |
464 |
int i;
|
465 |
|
466 |
for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
467 |
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
468 |
has_msr_star = 1;
|
469 |
break;
|
470 |
} |
471 |
} |
472 |
} |
473 |
|
474 |
free(kvm_msr_list); |
475 |
} |
476 |
|
477 |
if (has_msr_star == 1) |
478 |
return 1; |
479 |
return 0; |
480 |
} |
481 |
|
482 |
static int kvm_init_identity_map_page(KVMState *s) |
483 |
{ |
484 |
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
|
485 |
int ret;
|
486 |
uint64_t addr = 0xfffbc000;
|
487 |
|
488 |
if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
489 |
return 0; |
490 |
} |
491 |
|
492 |
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr); |
493 |
if (ret < 0) { |
494 |
fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret));
|
495 |
return ret;
|
496 |
} |
497 |
#endif
|
498 |
return 0; |
499 |
} |
500 |
|
501 |
int kvm_arch_init(KVMState *s, int smp_cpus) |
502 |
{ |
503 |
int ret;
|
504 |
|
505 |
/* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
|
506 |
* directly. In order to use vm86 mode, a TSS is needed. Since this
|
507 |
* must be part of guest physical memory, we need to allocate it. Older
|
508 |
* versions of KVM just assumed that it would be at the end of physical
|
509 |
* memory but that doesn't work with more than 4GB of memory. We simply
|
510 |
* refuse to work with those older versions of KVM. */
|
511 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR); |
512 |
if (ret <= 0) { |
513 |
fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
|
514 |
return ret;
|
515 |
} |
516 |
|
517 |
/* this address is 3 pages before the bios, and the bios should present
|
518 |
* as unavaible memory. FIXME, need to ensure the e820 map deals with
|
519 |
* this?
|
520 |
*/
|
521 |
/*
|
522 |
* Tell fw_cfg to notify the BIOS to reserve the range.
|
523 |
*/
|
524 |
if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) { |
525 |
perror("e820_add_entry() table is full");
|
526 |
exit(1);
|
527 |
} |
528 |
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
|
529 |
if (ret < 0) { |
530 |
return ret;
|
531 |
} |
532 |
|
533 |
return kvm_init_identity_map_page(s);
|
534 |
} |
535 |
|
536 |
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
537 |
{ |
538 |
lhs->selector = rhs->selector; |
539 |
lhs->base = rhs->base; |
540 |
lhs->limit = rhs->limit; |
541 |
lhs->type = 3;
|
542 |
lhs->present = 1;
|
543 |
lhs->dpl = 3;
|
544 |
lhs->db = 0;
|
545 |
lhs->s = 1;
|
546 |
lhs->l = 0;
|
547 |
lhs->g = 0;
|
548 |
lhs->avl = 0;
|
549 |
lhs->unusable = 0;
|
550 |
} |
551 |
|
552 |
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
553 |
{ |
554 |
unsigned flags = rhs->flags;
|
555 |
lhs->selector = rhs->selector; |
556 |
lhs->base = rhs->base; |
557 |
lhs->limit = rhs->limit; |
558 |
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
559 |
lhs->present = (flags & DESC_P_MASK) != 0;
|
560 |
lhs->dpl = rhs->selector & 3;
|
561 |
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
562 |
lhs->s = (flags & DESC_S_MASK) != 0;
|
563 |
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
564 |
lhs->g = (flags & DESC_G_MASK) != 0;
|
565 |
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
566 |
lhs->unusable = 0;
|
567 |
} |
568 |
|
569 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
570 |
{ |
571 |
lhs->selector = rhs->selector; |
572 |
lhs->base = rhs->base; |
573 |
lhs->limit = rhs->limit; |
574 |
lhs->flags = |
575 |
(rhs->type << DESC_TYPE_SHIFT) |
576 |
| (rhs->present * DESC_P_MASK) |
577 |
| (rhs->dpl << DESC_DPL_SHIFT) |
578 |
| (rhs->db << DESC_B_SHIFT) |
579 |
| (rhs->s * DESC_S_MASK) |
580 |
| (rhs->l << DESC_L_SHIFT) |
581 |
| (rhs->g * DESC_G_MASK) |
582 |
| (rhs->avl * DESC_AVL_MASK); |
583 |
} |
584 |
|
585 |
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
586 |
{ |
587 |
if (set)
|
588 |
*kvm_reg = *qemu_reg; |
589 |
else
|
590 |
*qemu_reg = *kvm_reg; |
591 |
} |
592 |
|
593 |
static int kvm_getput_regs(CPUState *env, int set) |
594 |
{ |
595 |
struct kvm_regs regs;
|
596 |
int ret = 0; |
597 |
|
598 |
if (!set) {
|
599 |
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
600 |
if (ret < 0) |
601 |
return ret;
|
602 |
} |
603 |
|
604 |
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
605 |
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
606 |
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
607 |
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
608 |
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
609 |
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
610 |
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
611 |
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
612 |
#ifdef TARGET_X86_64
|
613 |
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
614 |
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
615 |
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
616 |
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
617 |
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
618 |
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
619 |
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
620 |
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
621 |
#endif
|
622 |
|
623 |
kvm_getput_reg(®s.rflags, &env->eflags, set); |
624 |
kvm_getput_reg(®s.rip, &env->eip, set); |
625 |
|
626 |
if (set)
|
627 |
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
628 |
|
629 |
return ret;
|
630 |
} |
631 |
|
632 |
static int kvm_put_fpu(CPUState *env) |
633 |
{ |
634 |
struct kvm_fpu fpu;
|
635 |
int i;
|
636 |
|
637 |
memset(&fpu, 0, sizeof fpu); |
638 |
fpu.fsw = env->fpus & ~(7 << 11); |
639 |
fpu.fsw |= (env->fpstt & 7) << 11; |
640 |
fpu.fcw = env->fpuc; |
641 |
for (i = 0; i < 8; ++i) |
642 |
fpu.ftwx |= (!env->fptags[i]) << i; |
643 |
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
644 |
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
645 |
fpu.mxcsr = env->mxcsr; |
646 |
|
647 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
648 |
} |
649 |
|
650 |
#ifdef KVM_CAP_XSAVE
|
651 |
#define XSAVE_CWD_RIP 2 |
652 |
#define XSAVE_CWD_RDP 4 |
653 |
#define XSAVE_MXCSR 6 |
654 |
#define XSAVE_ST_SPACE 8 |
655 |
#define XSAVE_XMM_SPACE 40 |
656 |
#define XSAVE_XSTATE_BV 128 |
657 |
#define XSAVE_YMMH_SPACE 144 |
658 |
#endif
|
659 |
|
660 |
static int kvm_put_xsave(CPUState *env) |
661 |
{ |
662 |
#ifdef KVM_CAP_XSAVE
|
663 |
int i, r;
|
664 |
struct kvm_xsave* xsave;
|
665 |
uint16_t cwd, swd, twd, fop; |
666 |
|
667 |
if (!kvm_has_xsave())
|
668 |
return kvm_put_fpu(env);
|
669 |
|
670 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
671 |
memset(xsave, 0, sizeof(struct kvm_xsave)); |
672 |
cwd = swd = twd = fop = 0;
|
673 |
swd = env->fpus & ~(7 << 11); |
674 |
swd |= (env->fpstt & 7) << 11; |
675 |
cwd = env->fpuc; |
676 |
for (i = 0; i < 8; ++i) |
677 |
twd |= (!env->fptags[i]) << i; |
678 |
xsave->region[0] = (uint32_t)(swd << 16) + cwd; |
679 |
xsave->region[1] = (uint32_t)(fop << 16) + twd; |
680 |
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, |
681 |
sizeof env->fpregs);
|
682 |
memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, |
683 |
sizeof env->xmm_regs);
|
684 |
xsave->region[XSAVE_MXCSR] = env->mxcsr; |
685 |
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; |
686 |
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, |
687 |
sizeof env->ymmh_regs);
|
688 |
r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); |
689 |
qemu_free(xsave); |
690 |
return r;
|
691 |
#else
|
692 |
return kvm_put_fpu(env);
|
693 |
#endif
|
694 |
} |
695 |
|
696 |
static int kvm_put_xcrs(CPUState *env) |
697 |
{ |
698 |
#ifdef KVM_CAP_XCRS
|
699 |
struct kvm_xcrs xcrs;
|
700 |
|
701 |
if (!kvm_has_xcrs())
|
702 |
return 0; |
703 |
|
704 |
xcrs.nr_xcrs = 1;
|
705 |
xcrs.flags = 0;
|
706 |
xcrs.xcrs[0].xcr = 0; |
707 |
xcrs.xcrs[0].value = env->xcr0;
|
708 |
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
|
709 |
#else
|
710 |
return 0; |
711 |
#endif
|
712 |
} |
713 |
|
714 |
static int kvm_put_sregs(CPUState *env) |
715 |
{ |
716 |
struct kvm_sregs sregs;
|
717 |
|
718 |
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); |
719 |
if (env->interrupt_injected >= 0) { |
720 |
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
721 |
(uint64_t)1 << (env->interrupt_injected % 64); |
722 |
} |
723 |
|
724 |
if ((env->eflags & VM_MASK)) {
|
725 |
set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
726 |
set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
727 |
set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
728 |
set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
729 |
set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
730 |
set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
731 |
} else {
|
732 |
set_seg(&sregs.cs, &env->segs[R_CS]); |
733 |
set_seg(&sregs.ds, &env->segs[R_DS]); |
734 |
set_seg(&sregs.es, &env->segs[R_ES]); |
735 |
set_seg(&sregs.fs, &env->segs[R_FS]); |
736 |
set_seg(&sregs.gs, &env->segs[R_GS]); |
737 |
set_seg(&sregs.ss, &env->segs[R_SS]); |
738 |
|
739 |
if (env->cr[0] & CR0_PE_MASK) { |
740 |
/* force ss cpl to cs cpl */
|
741 |
sregs.ss.selector = (sregs.ss.selector & ~3) |
|
742 |
(sregs.cs.selector & 3);
|
743 |
sregs.ss.dpl = sregs.ss.selector & 3;
|
744 |
} |
745 |
} |
746 |
|
747 |
set_seg(&sregs.tr, &env->tr); |
748 |
set_seg(&sregs.ldt, &env->ldt); |
749 |
|
750 |
sregs.idt.limit = env->idt.limit; |
751 |
sregs.idt.base = env->idt.base; |
752 |
sregs.gdt.limit = env->gdt.limit; |
753 |
sregs.gdt.base = env->gdt.base; |
754 |
|
755 |
sregs.cr0 = env->cr[0];
|
756 |
sregs.cr2 = env->cr[2];
|
757 |
sregs.cr3 = env->cr[3];
|
758 |
sregs.cr4 = env->cr[4];
|
759 |
|
760 |
sregs.cr8 = cpu_get_apic_tpr(env->apic_state); |
761 |
sregs.apic_base = cpu_get_apic_base(env->apic_state); |
762 |
|
763 |
sregs.efer = env->efer; |
764 |
|
765 |
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
766 |
} |
767 |
|
768 |
static void kvm_msr_entry_set(struct kvm_msr_entry *entry, |
769 |
uint32_t index, uint64_t value) |
770 |
{ |
771 |
entry->index = index; |
772 |
entry->data = value; |
773 |
} |
774 |
|
775 |
static int kvm_put_msrs(CPUState *env, int level) |
776 |
{ |
777 |
struct {
|
778 |
struct kvm_msrs info;
|
779 |
struct kvm_msr_entry entries[100]; |
780 |
} msr_data; |
781 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
782 |
int i, n = 0; |
783 |
|
784 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
785 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
786 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
787 |
if (kvm_has_msr_star(env))
|
788 |
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); |
789 |
#ifdef TARGET_X86_64
|
790 |
/* FIXME if lm capable */
|
791 |
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); |
792 |
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); |
793 |
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); |
794 |
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); |
795 |
#endif
|
796 |
if (level == KVM_PUT_FULL_STATE) {
|
797 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); |
798 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, |
799 |
env->system_time_msr); |
800 |
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); |
801 |
} |
802 |
#ifdef KVM_CAP_MCE
|
803 |
if (env->mcg_cap) {
|
804 |
if (level == KVM_PUT_RESET_STATE)
|
805 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); |
806 |
else if (level == KVM_PUT_FULL_STATE) { |
807 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); |
808 |
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl); |
809 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) |
810 |
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]); |
811 |
} |
812 |
} |
813 |
#endif
|
814 |
|
815 |
msr_data.info.nmsrs = n; |
816 |
|
817 |
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
818 |
|
819 |
} |
820 |
|
821 |
|
822 |
static int kvm_get_fpu(CPUState *env) |
823 |
{ |
824 |
struct kvm_fpu fpu;
|
825 |
int i, ret;
|
826 |
|
827 |
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); |
828 |
if (ret < 0) |
829 |
return ret;
|
830 |
|
831 |
env->fpstt = (fpu.fsw >> 11) & 7; |
832 |
env->fpus = fpu.fsw; |
833 |
env->fpuc = fpu.fcw; |
834 |
for (i = 0; i < 8; ++i) |
835 |
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
836 |
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
837 |
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
838 |
env->mxcsr = fpu.mxcsr; |
839 |
|
840 |
return 0; |
841 |
} |
842 |
|
843 |
static int kvm_get_xsave(CPUState *env) |
844 |
{ |
845 |
#ifdef KVM_CAP_XSAVE
|
846 |
struct kvm_xsave* xsave;
|
847 |
int ret, i;
|
848 |
uint16_t cwd, swd, twd, fop; |
849 |
|
850 |
if (!kvm_has_xsave())
|
851 |
return kvm_get_fpu(env);
|
852 |
|
853 |
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
854 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); |
855 |
if (ret < 0) { |
856 |
qemu_free(xsave); |
857 |
return ret;
|
858 |
} |
859 |
|
860 |
cwd = (uint16_t)xsave->region[0];
|
861 |
swd = (uint16_t)(xsave->region[0] >> 16); |
862 |
twd = (uint16_t)xsave->region[1];
|
863 |
fop = (uint16_t)(xsave->region[1] >> 16); |
864 |
env->fpstt = (swd >> 11) & 7; |
865 |
env->fpus = swd; |
866 |
env->fpuc = cwd; |
867 |
for (i = 0; i < 8; ++i) |
868 |
env->fptags[i] = !((twd >> i) & 1);
|
869 |
env->mxcsr = xsave->region[XSAVE_MXCSR]; |
870 |
memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], |
871 |
sizeof env->fpregs);
|
872 |
memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], |
873 |
sizeof env->xmm_regs);
|
874 |
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; |
875 |
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], |
876 |
sizeof env->ymmh_regs);
|
877 |
qemu_free(xsave); |
878 |
return 0; |
879 |
#else
|
880 |
return kvm_get_fpu(env);
|
881 |
#endif
|
882 |
} |
883 |
|
884 |
static int kvm_get_xcrs(CPUState *env) |
885 |
{ |
886 |
#ifdef KVM_CAP_XCRS
|
887 |
int i, ret;
|
888 |
struct kvm_xcrs xcrs;
|
889 |
|
890 |
if (!kvm_has_xcrs())
|
891 |
return 0; |
892 |
|
893 |
ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); |
894 |
if (ret < 0) |
895 |
return ret;
|
896 |
|
897 |
for (i = 0; i < xcrs.nr_xcrs; i++) |
898 |
/* Only support xcr0 now */
|
899 |
if (xcrs.xcrs[0].xcr == 0) { |
900 |
env->xcr0 = xcrs.xcrs[0].value;
|
901 |
break;
|
902 |
} |
903 |
return 0; |
904 |
#else
|
905 |
return 0; |
906 |
#endif
|
907 |
} |
908 |
|
909 |
static int kvm_get_sregs(CPUState *env) |
910 |
{ |
911 |
struct kvm_sregs sregs;
|
912 |
uint32_t hflags; |
913 |
int bit, i, ret;
|
914 |
|
915 |
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
916 |
if (ret < 0) |
917 |
return ret;
|
918 |
|
919 |
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
920 |
to find it and save its number instead (-1 for none). */
|
921 |
env->interrupt_injected = -1;
|
922 |
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { |
923 |
if (sregs.interrupt_bitmap[i]) {
|
924 |
bit = ctz64(sregs.interrupt_bitmap[i]); |
925 |
env->interrupt_injected = i * 64 + bit;
|
926 |
break;
|
927 |
} |
928 |
} |
929 |
|
930 |
get_seg(&env->segs[R_CS], &sregs.cs); |
931 |
get_seg(&env->segs[R_DS], &sregs.ds); |
932 |
get_seg(&env->segs[R_ES], &sregs.es); |
933 |
get_seg(&env->segs[R_FS], &sregs.fs); |
934 |
get_seg(&env->segs[R_GS], &sregs.gs); |
935 |
get_seg(&env->segs[R_SS], &sregs.ss); |
936 |
|
937 |
get_seg(&env->tr, &sregs.tr); |
938 |
get_seg(&env->ldt, &sregs.ldt); |
939 |
|
940 |
env->idt.limit = sregs.idt.limit; |
941 |
env->idt.base = sregs.idt.base; |
942 |
env->gdt.limit = sregs.gdt.limit; |
943 |
env->gdt.base = sregs.gdt.base; |
944 |
|
945 |
env->cr[0] = sregs.cr0;
|
946 |
env->cr[2] = sregs.cr2;
|
947 |
env->cr[3] = sregs.cr3;
|
948 |
env->cr[4] = sregs.cr4;
|
949 |
|
950 |
cpu_set_apic_base(env->apic_state, sregs.apic_base); |
951 |
|
952 |
env->efer = sregs.efer; |
953 |
//cpu_set_apic_tpr(env->apic_state, sregs.cr8);
|
954 |
|
955 |
#define HFLAG_COPY_MASK ~( \
|
956 |
HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
957 |
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
958 |
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
959 |
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
960 |
|
961 |
|
962 |
|
963 |
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
964 |
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
965 |
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
966 |
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
967 |
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
968 |
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
969 |
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); |
970 |
|
971 |
if (env->efer & MSR_EFER_LMA) {
|
972 |
hflags |= HF_LMA_MASK; |
973 |
} |
974 |
|
975 |
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
976 |
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
977 |
} else {
|
978 |
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
979 |
(DESC_B_SHIFT - HF_CS32_SHIFT); |
980 |
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
981 |
(DESC_B_SHIFT - HF_SS32_SHIFT); |
982 |
if (!(env->cr[0] & CR0_PE_MASK) || |
983 |
(env->eflags & VM_MASK) || |
984 |
!(hflags & HF_CS32_MASK)) { |
985 |
hflags |= HF_ADDSEG_MASK; |
986 |
} else {
|
987 |
hflags |= ((env->segs[R_DS].base | |
988 |
env->segs[R_ES].base | |
989 |
env->segs[R_SS].base) != 0) <<
|
990 |
HF_ADDSEG_SHIFT; |
991 |
} |
992 |
} |
993 |
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; |
994 |
|
995 |
return 0; |
996 |
} |
997 |
|
998 |
static int kvm_get_msrs(CPUState *env) |
999 |
{ |
1000 |
struct {
|
1001 |
struct kvm_msrs info;
|
1002 |
struct kvm_msr_entry entries[100]; |
1003 |
} msr_data; |
1004 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
1005 |
int ret, i, n;
|
1006 |
|
1007 |
n = 0;
|
1008 |
msrs[n++].index = MSR_IA32_SYSENTER_CS; |
1009 |
msrs[n++].index = MSR_IA32_SYSENTER_ESP; |
1010 |
msrs[n++].index = MSR_IA32_SYSENTER_EIP; |
1011 |
if (kvm_has_msr_star(env))
|
1012 |
msrs[n++].index = MSR_STAR; |
1013 |
msrs[n++].index = MSR_IA32_TSC; |
1014 |
#ifdef TARGET_X86_64
|
1015 |
/* FIXME lm_capable_kernel */
|
1016 |
msrs[n++].index = MSR_CSTAR; |
1017 |
msrs[n++].index = MSR_KERNELGSBASE; |
1018 |
msrs[n++].index = MSR_FMASK; |
1019 |
msrs[n++].index = MSR_LSTAR; |
1020 |
#endif
|
1021 |
msrs[n++].index = MSR_KVM_SYSTEM_TIME; |
1022 |
msrs[n++].index = MSR_KVM_WALL_CLOCK; |
1023 |
|
1024 |
#ifdef KVM_CAP_MCE
|
1025 |
if (env->mcg_cap) {
|
1026 |
msrs[n++].index = MSR_MCG_STATUS; |
1027 |
msrs[n++].index = MSR_MCG_CTL; |
1028 |
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) |
1029 |
msrs[n++].index = MSR_MC0_CTL + i; |
1030 |
} |
1031 |
#endif
|
1032 |
|
1033 |
msr_data.info.nmsrs = n; |
1034 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); |
1035 |
if (ret < 0) |
1036 |
return ret;
|
1037 |
|
1038 |
for (i = 0; i < ret; i++) { |
1039 |
switch (msrs[i].index) {
|
1040 |
case MSR_IA32_SYSENTER_CS:
|
1041 |
env->sysenter_cs = msrs[i].data; |
1042 |
break;
|
1043 |
case MSR_IA32_SYSENTER_ESP:
|
1044 |
env->sysenter_esp = msrs[i].data; |
1045 |
break;
|
1046 |
case MSR_IA32_SYSENTER_EIP:
|
1047 |
env->sysenter_eip = msrs[i].data; |
1048 |
break;
|
1049 |
case MSR_STAR:
|
1050 |
env->star = msrs[i].data; |
1051 |
break;
|
1052 |
#ifdef TARGET_X86_64
|
1053 |
case MSR_CSTAR:
|
1054 |
env->cstar = msrs[i].data; |
1055 |
break;
|
1056 |
case MSR_KERNELGSBASE:
|
1057 |
env->kernelgsbase = msrs[i].data; |
1058 |
break;
|
1059 |
case MSR_FMASK:
|
1060 |
env->fmask = msrs[i].data; |
1061 |
break;
|
1062 |
case MSR_LSTAR:
|
1063 |
env->lstar = msrs[i].data; |
1064 |
break;
|
1065 |
#endif
|
1066 |
case MSR_IA32_TSC:
|
1067 |
env->tsc = msrs[i].data; |
1068 |
break;
|
1069 |
case MSR_KVM_SYSTEM_TIME:
|
1070 |
env->system_time_msr = msrs[i].data; |
1071 |
break;
|
1072 |
case MSR_KVM_WALL_CLOCK:
|
1073 |
env->wall_clock_msr = msrs[i].data; |
1074 |
break;
|
1075 |
#ifdef KVM_CAP_MCE
|
1076 |
case MSR_MCG_STATUS:
|
1077 |
env->mcg_status = msrs[i].data; |
1078 |
break;
|
1079 |
case MSR_MCG_CTL:
|
1080 |
env->mcg_ctl = msrs[i].data; |
1081 |
break;
|
1082 |
#endif
|
1083 |
default:
|
1084 |
#ifdef KVM_CAP_MCE
|
1085 |
if (msrs[i].index >= MSR_MC0_CTL &&
|
1086 |
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { |
1087 |
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; |
1088 |
break;
|
1089 |
} |
1090 |
#endif
|
1091 |
} |
1092 |
} |
1093 |
|
1094 |
return 0; |
1095 |
} |
1096 |
|
1097 |
static int kvm_put_mp_state(CPUState *env) |
1098 |
{ |
1099 |
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
|
1100 |
|
1101 |
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
|
1102 |
} |
1103 |
|
1104 |
static int kvm_get_mp_state(CPUState *env) |
1105 |
{ |
1106 |
struct kvm_mp_state mp_state;
|
1107 |
int ret;
|
1108 |
|
1109 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state); |
1110 |
if (ret < 0) { |
1111 |
return ret;
|
1112 |
} |
1113 |
env->mp_state = mp_state.mp_state; |
1114 |
return 0; |
1115 |
} |
1116 |
|
1117 |
static int kvm_put_vcpu_events(CPUState *env, int level) |
1118 |
{ |
1119 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1120 |
struct kvm_vcpu_events events;
|
1121 |
|
1122 |
if (!kvm_has_vcpu_events()) {
|
1123 |
return 0; |
1124 |
} |
1125 |
|
1126 |
events.exception.injected = (env->exception_injected >= 0);
|
1127 |
events.exception.nr = env->exception_injected; |
1128 |
events.exception.has_error_code = env->has_error_code; |
1129 |
events.exception.error_code = env->error_code; |
1130 |
|
1131 |
events.interrupt.injected = (env->interrupt_injected >= 0);
|
1132 |
events.interrupt.nr = env->interrupt_injected; |
1133 |
events.interrupt.soft = env->soft_interrupt; |
1134 |
|
1135 |
events.nmi.injected = env->nmi_injected; |
1136 |
events.nmi.pending = env->nmi_pending; |
1137 |
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); |
1138 |
|
1139 |
events.sipi_vector = env->sipi_vector; |
1140 |
|
1141 |
events.flags = 0;
|
1142 |
if (level >= KVM_PUT_RESET_STATE) {
|
1143 |
events.flags |= |
1144 |
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; |
1145 |
} |
1146 |
|
1147 |
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
1148 |
#else
|
1149 |
return 0; |
1150 |
#endif
|
1151 |
} |
1152 |
|
1153 |
static int kvm_get_vcpu_events(CPUState *env) |
1154 |
{ |
1155 |
#ifdef KVM_CAP_VCPU_EVENTS
|
1156 |
struct kvm_vcpu_events events;
|
1157 |
int ret;
|
1158 |
|
1159 |
if (!kvm_has_vcpu_events()) {
|
1160 |
return 0; |
1161 |
} |
1162 |
|
1163 |
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events); |
1164 |
if (ret < 0) { |
1165 |
return ret;
|
1166 |
} |
1167 |
env->exception_injected = |
1168 |
events.exception.injected ? events.exception.nr : -1;
|
1169 |
env->has_error_code = events.exception.has_error_code; |
1170 |
env->error_code = events.exception.error_code; |
1171 |
|
1172 |
env->interrupt_injected = |
1173 |
events.interrupt.injected ? events.interrupt.nr : -1;
|
1174 |
env->soft_interrupt = events.interrupt.soft; |
1175 |
|
1176 |
env->nmi_injected = events.nmi.injected; |
1177 |
env->nmi_pending = events.nmi.pending; |
1178 |
if (events.nmi.masked) {
|
1179 |
env->hflags2 |= HF2_NMI_MASK; |
1180 |
} else {
|
1181 |
env->hflags2 &= ~HF2_NMI_MASK; |
1182 |
} |
1183 |
|
1184 |
env->sipi_vector = events.sipi_vector; |
1185 |
#endif
|
1186 |
|
1187 |
return 0; |
1188 |
} |
1189 |
|
1190 |
static int kvm_guest_debug_workarounds(CPUState *env) |
1191 |
{ |
1192 |
int ret = 0; |
1193 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1194 |
unsigned long reinject_trap = 0; |
1195 |
|
1196 |
if (!kvm_has_vcpu_events()) {
|
1197 |
if (env->exception_injected == 1) { |
1198 |
reinject_trap = KVM_GUESTDBG_INJECT_DB; |
1199 |
} else if (env->exception_injected == 3) { |
1200 |
reinject_trap = KVM_GUESTDBG_INJECT_BP; |
1201 |
} |
1202 |
env->exception_injected = -1;
|
1203 |
} |
1204 |
|
1205 |
/*
|
1206 |
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
1207 |
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
1208 |
* by updating the debug state once again if single-stepping is on.
|
1209 |
* Another reason to call kvm_update_guest_debug here is a pending debug
|
1210 |
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
1211 |
* reinject them via SET_GUEST_DEBUG.
|
1212 |
*/
|
1213 |
if (reinject_trap ||
|
1214 |
(!kvm_has_robust_singlestep() && env->singlestep_enabled)) { |
1215 |
ret = kvm_update_guest_debug(env, reinject_trap); |
1216 |
} |
1217 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
1218 |
return ret;
|
1219 |
} |
1220 |
|
1221 |
static int kvm_put_debugregs(CPUState *env) |
1222 |
{ |
1223 |
#ifdef KVM_CAP_DEBUGREGS
|
1224 |
struct kvm_debugregs dbgregs;
|
1225 |
int i;
|
1226 |
|
1227 |
if (!kvm_has_debugregs()) {
|
1228 |
return 0; |
1229 |
} |
1230 |
|
1231 |
for (i = 0; i < 4; i++) { |
1232 |
dbgregs.db[i] = env->dr[i]; |
1233 |
} |
1234 |
dbgregs.dr6 = env->dr[6];
|
1235 |
dbgregs.dr7 = env->dr[7];
|
1236 |
dbgregs.flags = 0;
|
1237 |
|
1238 |
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
|
1239 |
#else
|
1240 |
return 0; |
1241 |
#endif
|
1242 |
} |
1243 |
|
1244 |
static int kvm_get_debugregs(CPUState *env) |
1245 |
{ |
1246 |
#ifdef KVM_CAP_DEBUGREGS
|
1247 |
struct kvm_debugregs dbgregs;
|
1248 |
int i, ret;
|
1249 |
|
1250 |
if (!kvm_has_debugregs()) {
|
1251 |
return 0; |
1252 |
} |
1253 |
|
1254 |
ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs); |
1255 |
if (ret < 0) { |
1256 |
return ret;
|
1257 |
} |
1258 |
for (i = 0; i < 4; i++) { |
1259 |
env->dr[i] = dbgregs.db[i]; |
1260 |
} |
1261 |
env->dr[4] = env->dr[6] = dbgregs.dr6; |
1262 |
env->dr[5] = env->dr[7] = dbgregs.dr7; |
1263 |
#endif
|
1264 |
|
1265 |
return 0; |
1266 |
} |
1267 |
|
1268 |
int kvm_arch_put_registers(CPUState *env, int level) |
1269 |
{ |
1270 |
int ret;
|
1271 |
|
1272 |
assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1273 |
|
1274 |
ret = kvm_getput_regs(env, 1);
|
1275 |
if (ret < 0) |
1276 |
return ret;
|
1277 |
|
1278 |
ret = kvm_put_xsave(env); |
1279 |
if (ret < 0) |
1280 |
return ret;
|
1281 |
|
1282 |
ret = kvm_put_xcrs(env); |
1283 |
if (ret < 0) |
1284 |
return ret;
|
1285 |
|
1286 |
ret = kvm_put_sregs(env); |
1287 |
if (ret < 0) |
1288 |
return ret;
|
1289 |
|
1290 |
ret = kvm_put_msrs(env, level); |
1291 |
if (ret < 0) |
1292 |
return ret;
|
1293 |
|
1294 |
if (level >= KVM_PUT_RESET_STATE) {
|
1295 |
ret = kvm_put_mp_state(env); |
1296 |
if (ret < 0) |
1297 |
return ret;
|
1298 |
} |
1299 |
|
1300 |
ret = kvm_put_vcpu_events(env, level); |
1301 |
if (ret < 0) |
1302 |
return ret;
|
1303 |
|
1304 |
/* must be last */
|
1305 |
ret = kvm_guest_debug_workarounds(env); |
1306 |
if (ret < 0) |
1307 |
return ret;
|
1308 |
|
1309 |
ret = kvm_put_debugregs(env); |
1310 |
if (ret < 0) |
1311 |
return ret;
|
1312 |
|
1313 |
return 0; |
1314 |
} |
1315 |
|
1316 |
int kvm_arch_get_registers(CPUState *env)
|
1317 |
{ |
1318 |
int ret;
|
1319 |
|
1320 |
assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1321 |
|
1322 |
ret = kvm_getput_regs(env, 0);
|
1323 |
if (ret < 0) |
1324 |
return ret;
|
1325 |
|
1326 |
ret = kvm_get_xsave(env); |
1327 |
if (ret < 0) |
1328 |
return ret;
|
1329 |
|
1330 |
ret = kvm_get_xcrs(env); |
1331 |
if (ret < 0) |
1332 |
return ret;
|
1333 |
|
1334 |
ret = kvm_get_sregs(env); |
1335 |
if (ret < 0) |
1336 |
return ret;
|
1337 |
|
1338 |
ret = kvm_get_msrs(env); |
1339 |
if (ret < 0) |
1340 |
return ret;
|
1341 |
|
1342 |
ret = kvm_get_mp_state(env); |
1343 |
if (ret < 0) |
1344 |
return ret;
|
1345 |
|
1346 |
ret = kvm_get_vcpu_events(env); |
1347 |
if (ret < 0) |
1348 |
return ret;
|
1349 |
|
1350 |
ret = kvm_get_debugregs(env); |
1351 |
if (ret < 0) |
1352 |
return ret;
|
1353 |
|
1354 |
return 0; |
1355 |
} |
1356 |
|
1357 |
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
1358 |
{ |
1359 |
/* Try to inject an interrupt if the guest can accept it */
|
1360 |
if (run->ready_for_interrupt_injection &&
|
1361 |
(env->interrupt_request & CPU_INTERRUPT_HARD) && |
1362 |
(env->eflags & IF_MASK)) { |
1363 |
int irq;
|
1364 |
|
1365 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
1366 |
irq = cpu_get_pic_interrupt(env); |
1367 |
if (irq >= 0) { |
1368 |
struct kvm_interrupt intr;
|
1369 |
intr.irq = irq; |
1370 |
/* FIXME: errors */
|
1371 |
DPRINTF("injected interrupt %d\n", irq);
|
1372 |
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
1373 |
} |
1374 |
} |
1375 |
|
1376 |
/* If we have an interrupt but the guest is not ready to receive an
|
1377 |
* interrupt, request an interrupt window exit. This will
|
1378 |
* cause a return to userspace as soon as the guest is ready to
|
1379 |
* receive interrupts. */
|
1380 |
if ((env->interrupt_request & CPU_INTERRUPT_HARD))
|
1381 |
run->request_interrupt_window = 1;
|
1382 |
else
|
1383 |
run->request_interrupt_window = 0;
|
1384 |
|
1385 |
DPRINTF("setting tpr\n");
|
1386 |
run->cr8 = cpu_get_apic_tpr(env->apic_state); |
1387 |
|
1388 |
return 0; |
1389 |
} |
1390 |
|
1391 |
int kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
1392 |
{ |
1393 |
if (run->if_flag)
|
1394 |
env->eflags |= IF_MASK; |
1395 |
else
|
1396 |
env->eflags &= ~IF_MASK; |
1397 |
|
1398 |
cpu_set_apic_tpr(env->apic_state, run->cr8); |
1399 |
cpu_set_apic_base(env->apic_state, run->apic_base); |
1400 |
|
1401 |
return 0; |
1402 |
} |
1403 |
|
1404 |
int kvm_arch_process_irqchip_events(CPUState *env)
|
1405 |
{ |
1406 |
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
1407 |
kvm_cpu_synchronize_state(env); |
1408 |
do_cpu_init(env); |
1409 |
env->exception_index = EXCP_HALTED; |
1410 |
} |
1411 |
|
1412 |
if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
|
1413 |
kvm_cpu_synchronize_state(env); |
1414 |
do_cpu_sipi(env); |
1415 |
} |
1416 |
|
1417 |
return env->halted;
|
1418 |
} |
1419 |
|
1420 |
static int kvm_handle_halt(CPUState *env) |
1421 |
{ |
1422 |
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
1423 |
(env->eflags & IF_MASK)) && |
1424 |
!(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
1425 |
env->halted = 1;
|
1426 |
env->exception_index = EXCP_HLT; |
1427 |
return 0; |
1428 |
} |
1429 |
|
1430 |
return 1; |
1431 |
} |
1432 |
|
1433 |
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
1434 |
{ |
1435 |
int ret = 0; |
1436 |
|
1437 |
switch (run->exit_reason) {
|
1438 |
case KVM_EXIT_HLT:
|
1439 |
DPRINTF("handle_hlt\n");
|
1440 |
ret = kvm_handle_halt(env); |
1441 |
break;
|
1442 |
} |
1443 |
|
1444 |
return ret;
|
1445 |
} |
1446 |
|
1447 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
1448 |
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1449 |
{ |
1450 |
static const uint8_t int3 = 0xcc; |
1451 |
|
1452 |
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
1453 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) |
1454 |
return -EINVAL;
|
1455 |
return 0; |
1456 |
} |
1457 |
|
1458 |
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1459 |
{ |
1460 |
uint8_t int3; |
1461 |
|
1462 |
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || |
1463 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) |
1464 |
return -EINVAL;
|
1465 |
return 0; |
1466 |
} |
1467 |
|
1468 |
static struct { |
1469 |
target_ulong addr; |
1470 |
int len;
|
1471 |
int type;
|
1472 |
} hw_breakpoint[4];
|
1473 |
|
1474 |
static int nb_hw_breakpoint; |
1475 |
|
1476 |
static int find_hw_breakpoint(target_ulong addr, int len, int type) |
1477 |
{ |
1478 |
int n;
|
1479 |
|
1480 |
for (n = 0; n < nb_hw_breakpoint; n++) |
1481 |
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
1482 |
(hw_breakpoint[n].len == len || len == -1))
|
1483 |
return n;
|
1484 |
return -1; |
1485 |
} |
1486 |
|
1487 |
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
1488 |
target_ulong len, int type)
|
1489 |
{ |
1490 |
switch (type) {
|
1491 |
case GDB_BREAKPOINT_HW:
|
1492 |
len = 1;
|
1493 |
break;
|
1494 |
case GDB_WATCHPOINT_WRITE:
|
1495 |
case GDB_WATCHPOINT_ACCESS:
|
1496 |
switch (len) {
|
1497 |
case 1: |
1498 |
break;
|
1499 |
case 2: |
1500 |
case 4: |
1501 |
case 8: |
1502 |
if (addr & (len - 1)) |
1503 |
return -EINVAL;
|
1504 |
break;
|
1505 |
default:
|
1506 |
return -EINVAL;
|
1507 |
} |
1508 |
break;
|
1509 |
default:
|
1510 |
return -ENOSYS;
|
1511 |
} |
1512 |
|
1513 |
if (nb_hw_breakpoint == 4) |
1514 |
return -ENOBUFS;
|
1515 |
|
1516 |
if (find_hw_breakpoint(addr, len, type) >= 0) |
1517 |
return -EEXIST;
|
1518 |
|
1519 |
hw_breakpoint[nb_hw_breakpoint].addr = addr; |
1520 |
hw_breakpoint[nb_hw_breakpoint].len = len; |
1521 |
hw_breakpoint[nb_hw_breakpoint].type = type; |
1522 |
nb_hw_breakpoint++; |
1523 |
|
1524 |
return 0; |
1525 |
} |
1526 |
|
1527 |
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
1528 |
target_ulong len, int type)
|
1529 |
{ |
1530 |
int n;
|
1531 |
|
1532 |
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
1533 |
if (n < 0) |
1534 |
return -ENOENT;
|
1535 |
|
1536 |
nb_hw_breakpoint--; |
1537 |
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; |
1538 |
|
1539 |
return 0; |
1540 |
} |
1541 |
|
1542 |
void kvm_arch_remove_all_hw_breakpoints(void) |
1543 |
{ |
1544 |
nb_hw_breakpoint = 0;
|
1545 |
} |
1546 |
|
1547 |
static CPUWatchpoint hw_watchpoint;
|
1548 |
|
1549 |
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) |
1550 |
{ |
1551 |
int handle = 0; |
1552 |
int n;
|
1553 |
|
1554 |
if (arch_info->exception == 1) { |
1555 |
if (arch_info->dr6 & (1 << 14)) { |
1556 |
if (cpu_single_env->singlestep_enabled)
|
1557 |
handle = 1;
|
1558 |
} else {
|
1559 |
for (n = 0; n < 4; n++) |
1560 |
if (arch_info->dr6 & (1 << n)) |
1561 |
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { |
1562 |
case 0x0: |
1563 |
handle = 1;
|
1564 |
break;
|
1565 |
case 0x1: |
1566 |
handle = 1;
|
1567 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1568 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1569 |
hw_watchpoint.flags = BP_MEM_WRITE; |
1570 |
break;
|
1571 |
case 0x3: |
1572 |
handle = 1;
|
1573 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
1574 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
1575 |
hw_watchpoint.flags = BP_MEM_ACCESS; |
1576 |
break;
|
1577 |
} |
1578 |
} |
1579 |
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) |
1580 |
handle = 1;
|
1581 |
|
1582 |
if (!handle) {
|
1583 |
cpu_synchronize_state(cpu_single_env); |
1584 |
assert(cpu_single_env->exception_injected == -1);
|
1585 |
|
1586 |
cpu_single_env->exception_injected = arch_info->exception; |
1587 |
cpu_single_env->has_error_code = 0;
|
1588 |
} |
1589 |
|
1590 |
return handle;
|
1591 |
} |
1592 |
|
1593 |
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) |
1594 |
{ |
1595 |
const uint8_t type_code[] = {
|
1596 |
[GDB_BREAKPOINT_HW] = 0x0,
|
1597 |
[GDB_WATCHPOINT_WRITE] = 0x1,
|
1598 |
[GDB_WATCHPOINT_ACCESS] = 0x3
|
1599 |
}; |
1600 |
const uint8_t len_code[] = {
|
1601 |
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 |
1602 |
}; |
1603 |
int n;
|
1604 |
|
1605 |
if (kvm_sw_breakpoints_active(env))
|
1606 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; |
1607 |
|
1608 |
if (nb_hw_breakpoint > 0) { |
1609 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; |
1610 |
dbg->arch.debugreg[7] = 0x0600; |
1611 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
1612 |
dbg->arch.debugreg[n] = hw_breakpoint[n].addr; |
1613 |
dbg->arch.debugreg[7] |= (2 << (n * 2)) | |
1614 |
(type_code[hw_breakpoint[n].type] << (16 + n*4)) | |
1615 |
(len_code[hw_breakpoint[n].len] << (18 + n*4)); |
1616 |
} |
1617 |
} |
1618 |
/* Legal xcr0 for loading */
|
1619 |
env->xcr0 = 1;
|
1620 |
} |
1621 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |
1622 |
|
1623 |
bool kvm_arch_stop_on_emulation_error(CPUState *env)
|
1624 |
{ |
1625 |
return !(env->cr[0] & CR0_PE_MASK) || |
1626 |
((env->segs[R_CS].selector & 3) != 3); |
1627 |
} |
1628 |
|
1629 |
static void hardware_memory_error(void) |
1630 |
{ |
1631 |
fprintf(stderr, "Hardware memory error!\n");
|
1632 |
exit(1);
|
1633 |
} |
1634 |
|
1635 |
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) |
1636 |
{ |
1637 |
#if defined(KVM_CAP_MCE)
|
1638 |
struct kvm_x86_mce mce = {
|
1639 |
.bank = 9,
|
1640 |
}; |
1641 |
void *vaddr;
|
1642 |
ram_addr_t ram_addr; |
1643 |
target_phys_addr_t paddr; |
1644 |
int r;
|
1645 |
|
1646 |
if ((env->mcg_cap & MCG_SER_P) && addr
|
1647 |
&& (code == BUS_MCEERR_AR |
1648 |
|| code == BUS_MCEERR_AO)) { |
1649 |
if (code == BUS_MCEERR_AR) {
|
1650 |
/* Fake an Intel architectural Data Load SRAR UCR */
|
1651 |
mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1652 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1653 |
| MCI_STATUS_AR | 0x134;
|
1654 |
mce.misc = (MCM_ADDR_PHYS << 6) | 0xc; |
1655 |
mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; |
1656 |
} else {
|
1657 |
/*
|
1658 |
* If there is an MCE excpetion being processed, ignore
|
1659 |
* this SRAO MCE
|
1660 |
*/
|
1661 |
r = kvm_mce_in_exception(env); |
1662 |
if (r == -1) { |
1663 |
fprintf(stderr, "Failed to get MCE status\n");
|
1664 |
} else if (r) { |
1665 |
return 0; |
1666 |
} |
1667 |
/* Fake an Intel architectural Memory scrubbing UCR */
|
1668 |
mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1669 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1670 |
| 0xc0;
|
1671 |
mce.misc = (MCM_ADDR_PHYS << 6) | 0xc; |
1672 |
mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV; |
1673 |
} |
1674 |
vaddr = (void *)addr;
|
1675 |
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
|
1676 |
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) { |
1677 |
fprintf(stderr, "Hardware memory error for memory used by "
|
1678 |
"QEMU itself instead of guest system!\n");
|
1679 |
/* Hope we are lucky for AO MCE */
|
1680 |
if (code == BUS_MCEERR_AO) {
|
1681 |
return 0; |
1682 |
} else {
|
1683 |
hardware_memory_error(); |
1684 |
} |
1685 |
} |
1686 |
mce.addr = paddr; |
1687 |
r = kvm_set_mce(env, &mce); |
1688 |
if (r < 0) { |
1689 |
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
|
1690 |
abort(); |
1691 |
} |
1692 |
} else
|
1693 |
#endif
|
1694 |
{ |
1695 |
if (code == BUS_MCEERR_AO) {
|
1696 |
return 0; |
1697 |
} else if (code == BUS_MCEERR_AR) { |
1698 |
hardware_memory_error(); |
1699 |
} else {
|
1700 |
return 1; |
1701 |
} |
1702 |
} |
1703 |
return 0; |
1704 |
} |
1705 |
|
1706 |
int kvm_on_sigbus(int code, void *addr) |
1707 |
{ |
1708 |
#if defined(KVM_CAP_MCE)
|
1709 |
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
1710 |
uint64_t status; |
1711 |
void *vaddr;
|
1712 |
ram_addr_t ram_addr; |
1713 |
target_phys_addr_t paddr; |
1714 |
CPUState *cenv; |
1715 |
|
1716 |
/* Hope we are lucky for AO MCE */
|
1717 |
vaddr = addr; |
1718 |
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
|
1719 |
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { |
1720 |
fprintf(stderr, "Hardware memory error for memory used by "
|
1721 |
"QEMU itself instead of guest system!: %p\n", addr);
|
1722 |
return 0; |
1723 |
} |
1724 |
status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
1725 |
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S |
1726 |
| 0xc0;
|
1727 |
kvm_inject_x86_mce(first_cpu, 9, status,
|
1728 |
MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr, |
1729 |
(MCM_ADDR_PHYS << 6) | 0xc, 1); |
1730 |
for (cenv = first_cpu->next_cpu; cenv != NULL; cenv = cenv->next_cpu) { |
1731 |
kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
|
1732 |
MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1); |
1733 |
} |
1734 |
} else
|
1735 |
#endif
|
1736 |
{ |
1737 |
if (code == BUS_MCEERR_AO) {
|
1738 |
return 0; |
1739 |
} else if (code == BUS_MCEERR_AR) { |
1740 |
hardware_memory_error(); |
1741 |
} else {
|
1742 |
return 1; |
1743 |
} |
1744 |
} |
1745 |
return 0; |
1746 |
} |