root / target-i386 / kvm.c @ 6c1f42fe
History | View | Annotate | Download (25.4 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright (C) 2006-2008 Qumranet Technologies
|
5 |
* Copyright IBM, Corp. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
11 |
* See the COPYING file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include <sys/types.h> |
16 |
#include <sys/ioctl.h> |
17 |
#include <sys/mman.h> |
18 |
|
19 |
#include <linux/kvm.h> |
20 |
|
21 |
#include "qemu-common.h" |
22 |
#include "sysemu.h" |
23 |
#include "kvm.h" |
24 |
#include "cpu.h" |
25 |
#include "gdbstub.h" |
26 |
|
27 |
//#define DEBUG_KVM
|
28 |
|
29 |
#ifdef DEBUG_KVM
|
30 |
#define dprintf(fmt, ...) \
|
31 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
32 |
#else
|
33 |
#define dprintf(fmt, ...) \
|
34 |
do { } while (0) |
35 |
#endif
|
36 |
|
37 |
#ifdef KVM_CAP_EXT_CPUID
|
38 |
|
39 |
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) |
40 |
{ |
41 |
struct kvm_cpuid2 *cpuid;
|
42 |
int r, size;
|
43 |
|
44 |
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
45 |
cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
|
46 |
cpuid->nent = max; |
47 |
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); |
48 |
if (r == 0 && cpuid->nent >= max) { |
49 |
r = -E2BIG; |
50 |
} |
51 |
if (r < 0) { |
52 |
if (r == -E2BIG) {
|
53 |
qemu_free(cpuid); |
54 |
return NULL; |
55 |
} else {
|
56 |
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
57 |
strerror(-r)); |
58 |
exit(1);
|
59 |
} |
60 |
} |
61 |
return cpuid;
|
62 |
} |
63 |
|
64 |
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
|
65 |
{ |
66 |
struct kvm_cpuid2 *cpuid;
|
67 |
int i, max;
|
68 |
uint32_t ret = 0;
|
69 |
uint32_t cpuid_1_edx; |
70 |
|
71 |
if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
|
72 |
return -1U; |
73 |
} |
74 |
|
75 |
max = 1;
|
76 |
while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { |
77 |
max *= 2;
|
78 |
} |
79 |
|
80 |
for (i = 0; i < cpuid->nent; ++i) { |
81 |
if (cpuid->entries[i].function == function) {
|
82 |
switch (reg) {
|
83 |
case R_EAX:
|
84 |
ret = cpuid->entries[i].eax; |
85 |
break;
|
86 |
case R_EBX:
|
87 |
ret = cpuid->entries[i].ebx; |
88 |
break;
|
89 |
case R_ECX:
|
90 |
ret = cpuid->entries[i].ecx; |
91 |
break;
|
92 |
case R_EDX:
|
93 |
ret = cpuid->entries[i].edx; |
94 |
if (function == 0x80000001) { |
95 |
/* On Intel, kvm returns cpuid according to the Intel spec,
|
96 |
* so add missing bits according to the AMD spec:
|
97 |
*/
|
98 |
cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
|
99 |
ret |= cpuid_1_edx & 0xdfeff7ff;
|
100 |
} |
101 |
break;
|
102 |
} |
103 |
} |
104 |
} |
105 |
|
106 |
qemu_free(cpuid); |
107 |
|
108 |
return ret;
|
109 |
} |
110 |
|
111 |
#else
|
112 |
|
113 |
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
|
114 |
{ |
115 |
return -1U; |
116 |
} |
117 |
|
118 |
#endif
|
119 |
|
120 |
static void kvm_trim_features(uint32_t *features, uint32_t supported) |
121 |
{ |
122 |
int i;
|
123 |
uint32_t mask; |
124 |
|
125 |
for (i = 0; i < 32; ++i) { |
126 |
mask = 1U << i;
|
127 |
if ((*features & mask) && !(supported & mask)) {
|
128 |
*features &= ~mask; |
129 |
} |
130 |
} |
131 |
} |
132 |
|
133 |
int kvm_arch_init_vcpu(CPUState *env)
|
134 |
{ |
135 |
struct {
|
136 |
struct kvm_cpuid2 cpuid;
|
137 |
struct kvm_cpuid_entry2 entries[100]; |
138 |
} __attribute__((packed)) cpuid_data; |
139 |
uint32_t limit, i, j, cpuid_i; |
140 |
uint32_t unused; |
141 |
|
142 |
env->mp_state = KVM_MP_STATE_RUNNABLE; |
143 |
|
144 |
kvm_trim_features(&env->cpuid_features, |
145 |
kvm_arch_get_supported_cpuid(env, 1, R_EDX));
|
146 |
kvm_trim_features(&env->cpuid_ext_features, |
147 |
kvm_arch_get_supported_cpuid(env, 1, R_ECX));
|
148 |
kvm_trim_features(&env->cpuid_ext2_features, |
149 |
kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX));
|
150 |
kvm_trim_features(&env->cpuid_ext3_features, |
151 |
kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX));
|
152 |
|
153 |
cpuid_i = 0;
|
154 |
|
155 |
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
156 |
|
157 |
for (i = 0; i <= limit; i++) { |
158 |
struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
|
159 |
|
160 |
switch (i) {
|
161 |
case 2: { |
162 |
/* Keep reading function 2 till all the input is received */
|
163 |
int times;
|
164 |
|
165 |
c->function = i; |
166 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
167 |
KVM_CPUID_FLAG_STATE_READ_NEXT; |
168 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
169 |
times = c->eax & 0xff;
|
170 |
|
171 |
for (j = 1; j < times; ++j) { |
172 |
c = &cpuid_data.entries[cpuid_i++]; |
173 |
c->function = i; |
174 |
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
175 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
176 |
} |
177 |
break;
|
178 |
} |
179 |
case 4: |
180 |
case 0xb: |
181 |
case 0xd: |
182 |
for (j = 0; ; j++) { |
183 |
c->function = i; |
184 |
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
185 |
c->index = j; |
186 |
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
187 |
|
188 |
if (i == 4 && c->eax == 0) |
189 |
break;
|
190 |
if (i == 0xb && !(c->ecx & 0xff00)) |
191 |
break;
|
192 |
if (i == 0xd && c->eax == 0) |
193 |
break;
|
194 |
|
195 |
c = &cpuid_data.entries[cpuid_i++]; |
196 |
} |
197 |
break;
|
198 |
default:
|
199 |
c->function = i; |
200 |
c->flags = 0;
|
201 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
202 |
break;
|
203 |
} |
204 |
} |
205 |
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
206 |
|
207 |
for (i = 0x80000000; i <= limit; i++) { |
208 |
struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
|
209 |
|
210 |
c->function = i; |
211 |
c->flags = 0;
|
212 |
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
213 |
} |
214 |
|
215 |
cpuid_data.cpuid.nent = cpuid_i; |
216 |
|
217 |
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
|
218 |
} |
219 |
|
220 |
static int kvm_has_msr_star(CPUState *env) |
221 |
{ |
222 |
static int has_msr_star; |
223 |
int ret;
|
224 |
|
225 |
/* first time */
|
226 |
if (has_msr_star == 0) { |
227 |
struct kvm_msr_list msr_list, *kvm_msr_list;
|
228 |
|
229 |
has_msr_star = -1;
|
230 |
|
231 |
/* Obtain MSR list from KVM. These are the MSRs that we must
|
232 |
* save/restore */
|
233 |
msr_list.nmsrs = 0;
|
234 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); |
235 |
if (ret < 0) |
236 |
return 0; |
237 |
|
238 |
kvm_msr_list = qemu_mallocz(sizeof(msr_list) +
|
239 |
msr_list.nmsrs * sizeof(msr_list.indices[0])); |
240 |
|
241 |
kvm_msr_list->nmsrs = msr_list.nmsrs; |
242 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
243 |
if (ret >= 0) { |
244 |
int i;
|
245 |
|
246 |
for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
247 |
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
248 |
has_msr_star = 1;
|
249 |
break;
|
250 |
} |
251 |
} |
252 |
} |
253 |
|
254 |
free(kvm_msr_list); |
255 |
} |
256 |
|
257 |
if (has_msr_star == 1) |
258 |
return 1; |
259 |
return 0; |
260 |
} |
261 |
|
262 |
int kvm_arch_init(KVMState *s, int smp_cpus) |
263 |
{ |
264 |
int ret;
|
265 |
|
266 |
/* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
|
267 |
* directly. In order to use vm86 mode, a TSS is needed. Since this
|
268 |
* must be part of guest physical memory, we need to allocate it. Older
|
269 |
* versions of KVM just assumed that it would be at the end of physical
|
270 |
* memory but that doesn't work with more than 4GB of memory. We simply
|
271 |
* refuse to work with those older versions of KVM. */
|
272 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR); |
273 |
if (ret <= 0) { |
274 |
fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
|
275 |
return ret;
|
276 |
} |
277 |
|
278 |
/* this address is 3 pages before the bios, and the bios should present
|
279 |
* as unavaible memory. FIXME, need to ensure the e820 map deals with
|
280 |
* this?
|
281 |
*/
|
282 |
return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000); |
283 |
} |
284 |
|
285 |
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
286 |
{ |
287 |
lhs->selector = rhs->selector; |
288 |
lhs->base = rhs->base; |
289 |
lhs->limit = rhs->limit; |
290 |
lhs->type = 3;
|
291 |
lhs->present = 1;
|
292 |
lhs->dpl = 3;
|
293 |
lhs->db = 0;
|
294 |
lhs->s = 1;
|
295 |
lhs->l = 0;
|
296 |
lhs->g = 0;
|
297 |
lhs->avl = 0;
|
298 |
lhs->unusable = 0;
|
299 |
} |
300 |
|
301 |
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
302 |
{ |
303 |
unsigned flags = rhs->flags;
|
304 |
lhs->selector = rhs->selector; |
305 |
lhs->base = rhs->base; |
306 |
lhs->limit = rhs->limit; |
307 |
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
308 |
lhs->present = (flags & DESC_P_MASK) != 0;
|
309 |
lhs->dpl = rhs->selector & 3;
|
310 |
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
311 |
lhs->s = (flags & DESC_S_MASK) != 0;
|
312 |
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
313 |
lhs->g = (flags & DESC_G_MASK) != 0;
|
314 |
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
315 |
lhs->unusable = 0;
|
316 |
} |
317 |
|
318 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
319 |
{ |
320 |
lhs->selector = rhs->selector; |
321 |
lhs->base = rhs->base; |
322 |
lhs->limit = rhs->limit; |
323 |
lhs->flags = |
324 |
(rhs->type << DESC_TYPE_SHIFT) |
325 |
| (rhs->present * DESC_P_MASK) |
326 |
| (rhs->dpl << DESC_DPL_SHIFT) |
327 |
| (rhs->db << DESC_B_SHIFT) |
328 |
| (rhs->s * DESC_S_MASK) |
329 |
| (rhs->l << DESC_L_SHIFT) |
330 |
| (rhs->g * DESC_G_MASK) |
331 |
| (rhs->avl * DESC_AVL_MASK); |
332 |
} |
333 |
|
334 |
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
335 |
{ |
336 |
if (set)
|
337 |
*kvm_reg = *qemu_reg; |
338 |
else
|
339 |
*qemu_reg = *kvm_reg; |
340 |
} |
341 |
|
342 |
static int kvm_getput_regs(CPUState *env, int set) |
343 |
{ |
344 |
struct kvm_regs regs;
|
345 |
int ret = 0; |
346 |
|
347 |
if (!set) {
|
348 |
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
349 |
if (ret < 0) |
350 |
return ret;
|
351 |
} |
352 |
|
353 |
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
354 |
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
355 |
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
356 |
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
357 |
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
358 |
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
359 |
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
360 |
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
361 |
#ifdef TARGET_X86_64
|
362 |
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
363 |
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
364 |
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
365 |
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
366 |
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
367 |
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
368 |
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
369 |
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
370 |
#endif
|
371 |
|
372 |
kvm_getput_reg(®s.rflags, &env->eflags, set); |
373 |
kvm_getput_reg(®s.rip, &env->eip, set); |
374 |
|
375 |
if (set)
|
376 |
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
377 |
|
378 |
return ret;
|
379 |
} |
380 |
|
381 |
static int kvm_put_fpu(CPUState *env) |
382 |
{ |
383 |
struct kvm_fpu fpu;
|
384 |
int i;
|
385 |
|
386 |
memset(&fpu, 0, sizeof fpu); |
387 |
fpu.fsw = env->fpus & ~(7 << 11); |
388 |
fpu.fsw |= (env->fpstt & 7) << 11; |
389 |
fpu.fcw = env->fpuc; |
390 |
for (i = 0; i < 8; ++i) |
391 |
fpu.ftwx |= (!env->fptags[i]) << i; |
392 |
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
393 |
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
394 |
fpu.mxcsr = env->mxcsr; |
395 |
|
396 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
397 |
} |
398 |
|
399 |
static int kvm_put_sregs(CPUState *env) |
400 |
{ |
401 |
struct kvm_sregs sregs;
|
402 |
|
403 |
memcpy(sregs.interrupt_bitmap, |
404 |
env->interrupt_bitmap, |
405 |
sizeof(sregs.interrupt_bitmap));
|
406 |
|
407 |
if ((env->eflags & VM_MASK)) {
|
408 |
set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
409 |
set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
410 |
set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
411 |
set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
412 |
set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
413 |
set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
414 |
} else {
|
415 |
set_seg(&sregs.cs, &env->segs[R_CS]); |
416 |
set_seg(&sregs.ds, &env->segs[R_DS]); |
417 |
set_seg(&sregs.es, &env->segs[R_ES]); |
418 |
set_seg(&sregs.fs, &env->segs[R_FS]); |
419 |
set_seg(&sregs.gs, &env->segs[R_GS]); |
420 |
set_seg(&sregs.ss, &env->segs[R_SS]); |
421 |
|
422 |
if (env->cr[0] & CR0_PE_MASK) { |
423 |
/* force ss cpl to cs cpl */
|
424 |
sregs.ss.selector = (sregs.ss.selector & ~3) |
|
425 |
(sregs.cs.selector & 3);
|
426 |
sregs.ss.dpl = sregs.ss.selector & 3;
|
427 |
} |
428 |
} |
429 |
|
430 |
set_seg(&sregs.tr, &env->tr); |
431 |
set_seg(&sregs.ldt, &env->ldt); |
432 |
|
433 |
sregs.idt.limit = env->idt.limit; |
434 |
sregs.idt.base = env->idt.base; |
435 |
sregs.gdt.limit = env->gdt.limit; |
436 |
sregs.gdt.base = env->gdt.base; |
437 |
|
438 |
sregs.cr0 = env->cr[0];
|
439 |
sregs.cr2 = env->cr[2];
|
440 |
sregs.cr3 = env->cr[3];
|
441 |
sregs.cr4 = env->cr[4];
|
442 |
|
443 |
sregs.cr8 = cpu_get_apic_tpr(env); |
444 |
sregs.apic_base = cpu_get_apic_base(env); |
445 |
|
446 |
sregs.efer = env->efer; |
447 |
|
448 |
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
449 |
} |
450 |
|
451 |
static void kvm_msr_entry_set(struct kvm_msr_entry *entry, |
452 |
uint32_t index, uint64_t value) |
453 |
{ |
454 |
entry->index = index; |
455 |
entry->data = value; |
456 |
} |
457 |
|
458 |
static int kvm_put_msrs(CPUState *env) |
459 |
{ |
460 |
struct {
|
461 |
struct kvm_msrs info;
|
462 |
struct kvm_msr_entry entries[100]; |
463 |
} msr_data; |
464 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
465 |
int n = 0; |
466 |
|
467 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
468 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
469 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
470 |
if (kvm_has_msr_star(env))
|
471 |
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); |
472 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); |
473 |
#ifdef TARGET_X86_64
|
474 |
/* FIXME if lm capable */
|
475 |
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); |
476 |
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); |
477 |
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); |
478 |
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); |
479 |
#endif
|
480 |
msr_data.info.nmsrs = n; |
481 |
|
482 |
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
483 |
|
484 |
} |
485 |
|
486 |
|
487 |
static int kvm_get_fpu(CPUState *env) |
488 |
{ |
489 |
struct kvm_fpu fpu;
|
490 |
int i, ret;
|
491 |
|
492 |
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); |
493 |
if (ret < 0) |
494 |
return ret;
|
495 |
|
496 |
env->fpstt = (fpu.fsw >> 11) & 7; |
497 |
env->fpus = fpu.fsw; |
498 |
env->fpuc = fpu.fcw; |
499 |
for (i = 0; i < 8; ++i) |
500 |
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
501 |
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
502 |
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
503 |
env->mxcsr = fpu.mxcsr; |
504 |
|
505 |
return 0; |
506 |
} |
507 |
|
508 |
static int kvm_get_sregs(CPUState *env) |
509 |
{ |
510 |
struct kvm_sregs sregs;
|
511 |
uint32_t hflags; |
512 |
int ret;
|
513 |
|
514 |
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
515 |
if (ret < 0) |
516 |
return ret;
|
517 |
|
518 |
memcpy(env->interrupt_bitmap, |
519 |
sregs.interrupt_bitmap, |
520 |
sizeof(sregs.interrupt_bitmap));
|
521 |
|
522 |
get_seg(&env->segs[R_CS], &sregs.cs); |
523 |
get_seg(&env->segs[R_DS], &sregs.ds); |
524 |
get_seg(&env->segs[R_ES], &sregs.es); |
525 |
get_seg(&env->segs[R_FS], &sregs.fs); |
526 |
get_seg(&env->segs[R_GS], &sregs.gs); |
527 |
get_seg(&env->segs[R_SS], &sregs.ss); |
528 |
|
529 |
get_seg(&env->tr, &sregs.tr); |
530 |
get_seg(&env->ldt, &sregs.ldt); |
531 |
|
532 |
env->idt.limit = sregs.idt.limit; |
533 |
env->idt.base = sregs.idt.base; |
534 |
env->gdt.limit = sregs.gdt.limit; |
535 |
env->gdt.base = sregs.gdt.base; |
536 |
|
537 |
env->cr[0] = sregs.cr0;
|
538 |
env->cr[2] = sregs.cr2;
|
539 |
env->cr[3] = sregs.cr3;
|
540 |
env->cr[4] = sregs.cr4;
|
541 |
|
542 |
cpu_set_apic_base(env, sregs.apic_base); |
543 |
|
544 |
env->efer = sregs.efer; |
545 |
//cpu_set_apic_tpr(env, sregs.cr8);
|
546 |
|
547 |
#define HFLAG_COPY_MASK ~( \
|
548 |
HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
549 |
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
550 |
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
551 |
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
552 |
|
553 |
|
554 |
|
555 |
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
556 |
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
557 |
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
558 |
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
559 |
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
560 |
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
561 |
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); |
562 |
|
563 |
if (env->efer & MSR_EFER_LMA) {
|
564 |
hflags |= HF_LMA_MASK; |
565 |
} |
566 |
|
567 |
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
568 |
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
569 |
} else {
|
570 |
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
571 |
(DESC_B_SHIFT - HF_CS32_SHIFT); |
572 |
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
573 |
(DESC_B_SHIFT - HF_SS32_SHIFT); |
574 |
if (!(env->cr[0] & CR0_PE_MASK) || |
575 |
(env->eflags & VM_MASK) || |
576 |
!(hflags & HF_CS32_MASK)) { |
577 |
hflags |= HF_ADDSEG_MASK; |
578 |
} else {
|
579 |
hflags |= ((env->segs[R_DS].base | |
580 |
env->segs[R_ES].base | |
581 |
env->segs[R_SS].base) != 0) <<
|
582 |
HF_ADDSEG_SHIFT; |
583 |
} |
584 |
} |
585 |
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; |
586 |
|
587 |
return 0; |
588 |
} |
589 |
|
590 |
static int kvm_get_msrs(CPUState *env) |
591 |
{ |
592 |
struct {
|
593 |
struct kvm_msrs info;
|
594 |
struct kvm_msr_entry entries[100]; |
595 |
} msr_data; |
596 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
597 |
int ret, i, n;
|
598 |
|
599 |
n = 0;
|
600 |
msrs[n++].index = MSR_IA32_SYSENTER_CS; |
601 |
msrs[n++].index = MSR_IA32_SYSENTER_ESP; |
602 |
msrs[n++].index = MSR_IA32_SYSENTER_EIP; |
603 |
if (kvm_has_msr_star(env))
|
604 |
msrs[n++].index = MSR_STAR; |
605 |
msrs[n++].index = MSR_IA32_TSC; |
606 |
#ifdef TARGET_X86_64
|
607 |
/* FIXME lm_capable_kernel */
|
608 |
msrs[n++].index = MSR_CSTAR; |
609 |
msrs[n++].index = MSR_KERNELGSBASE; |
610 |
msrs[n++].index = MSR_FMASK; |
611 |
msrs[n++].index = MSR_LSTAR; |
612 |
#endif
|
613 |
msr_data.info.nmsrs = n; |
614 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); |
615 |
if (ret < 0) |
616 |
return ret;
|
617 |
|
618 |
for (i = 0; i < ret; i++) { |
619 |
switch (msrs[i].index) {
|
620 |
case MSR_IA32_SYSENTER_CS:
|
621 |
env->sysenter_cs = msrs[i].data; |
622 |
break;
|
623 |
case MSR_IA32_SYSENTER_ESP:
|
624 |
env->sysenter_esp = msrs[i].data; |
625 |
break;
|
626 |
case MSR_IA32_SYSENTER_EIP:
|
627 |
env->sysenter_eip = msrs[i].data; |
628 |
break;
|
629 |
case MSR_STAR:
|
630 |
env->star = msrs[i].data; |
631 |
break;
|
632 |
#ifdef TARGET_X86_64
|
633 |
case MSR_CSTAR:
|
634 |
env->cstar = msrs[i].data; |
635 |
break;
|
636 |
case MSR_KERNELGSBASE:
|
637 |
env->kernelgsbase = msrs[i].data; |
638 |
break;
|
639 |
case MSR_FMASK:
|
640 |
env->fmask = msrs[i].data; |
641 |
break;
|
642 |
case MSR_LSTAR:
|
643 |
env->lstar = msrs[i].data; |
644 |
break;
|
645 |
#endif
|
646 |
case MSR_IA32_TSC:
|
647 |
env->tsc = msrs[i].data; |
648 |
break;
|
649 |
} |
650 |
} |
651 |
|
652 |
return 0; |
653 |
} |
654 |
|
655 |
int kvm_arch_put_registers(CPUState *env)
|
656 |
{ |
657 |
int ret;
|
658 |
|
659 |
ret = kvm_getput_regs(env, 1);
|
660 |
if (ret < 0) |
661 |
return ret;
|
662 |
|
663 |
ret = kvm_put_fpu(env); |
664 |
if (ret < 0) |
665 |
return ret;
|
666 |
|
667 |
ret = kvm_put_sregs(env); |
668 |
if (ret < 0) |
669 |
return ret;
|
670 |
|
671 |
ret = kvm_put_msrs(env); |
672 |
if (ret < 0) |
673 |
return ret;
|
674 |
|
675 |
ret = kvm_put_mp_state(env); |
676 |
if (ret < 0) |
677 |
return ret;
|
678 |
|
679 |
ret = kvm_get_mp_state(env); |
680 |
if (ret < 0) |
681 |
return ret;
|
682 |
|
683 |
return 0; |
684 |
} |
685 |
|
686 |
int kvm_arch_get_registers(CPUState *env)
|
687 |
{ |
688 |
int ret;
|
689 |
|
690 |
ret = kvm_getput_regs(env, 0);
|
691 |
if (ret < 0) |
692 |
return ret;
|
693 |
|
694 |
ret = kvm_get_fpu(env); |
695 |
if (ret < 0) |
696 |
return ret;
|
697 |
|
698 |
ret = kvm_get_sregs(env); |
699 |
if (ret < 0) |
700 |
return ret;
|
701 |
|
702 |
ret = kvm_get_msrs(env); |
703 |
if (ret < 0) |
704 |
return ret;
|
705 |
|
706 |
return 0; |
707 |
} |
708 |
|
709 |
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
710 |
{ |
711 |
/* Try to inject an interrupt if the guest can accept it */
|
712 |
if (run->ready_for_interrupt_injection &&
|
713 |
(env->interrupt_request & CPU_INTERRUPT_HARD) && |
714 |
(env->eflags & IF_MASK)) { |
715 |
int irq;
|
716 |
|
717 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
718 |
irq = cpu_get_pic_interrupt(env); |
719 |
if (irq >= 0) { |
720 |
struct kvm_interrupt intr;
|
721 |
intr.irq = irq; |
722 |
/* FIXME: errors */
|
723 |
dprintf("injected interrupt %d\n", irq);
|
724 |
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
725 |
} |
726 |
} |
727 |
|
728 |
/* If we have an interrupt but the guest is not ready to receive an
|
729 |
* interrupt, request an interrupt window exit. This will
|
730 |
* cause a return to userspace as soon as the guest is ready to
|
731 |
* receive interrupts. */
|
732 |
if ((env->interrupt_request & CPU_INTERRUPT_HARD))
|
733 |
run->request_interrupt_window = 1;
|
734 |
else
|
735 |
run->request_interrupt_window = 0;
|
736 |
|
737 |
dprintf("setting tpr\n");
|
738 |
run->cr8 = cpu_get_apic_tpr(env); |
739 |
|
740 |
return 0; |
741 |
} |
742 |
|
743 |
int kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
744 |
{ |
745 |
if (run->if_flag)
|
746 |
env->eflags |= IF_MASK; |
747 |
else
|
748 |
env->eflags &= ~IF_MASK; |
749 |
|
750 |
cpu_set_apic_tpr(env, run->cr8); |
751 |
cpu_set_apic_base(env, run->apic_base); |
752 |
|
753 |
return 0; |
754 |
} |
755 |
|
756 |
static int kvm_handle_halt(CPUState *env) |
757 |
{ |
758 |
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
759 |
(env->eflags & IF_MASK)) && |
760 |
!(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
761 |
env->halted = 1;
|
762 |
env->exception_index = EXCP_HLT; |
763 |
return 0; |
764 |
} |
765 |
|
766 |
return 1; |
767 |
} |
768 |
|
769 |
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
770 |
{ |
771 |
int ret = 0; |
772 |
|
773 |
switch (run->exit_reason) {
|
774 |
case KVM_EXIT_HLT:
|
775 |
dprintf("handle_hlt\n");
|
776 |
ret = kvm_handle_halt(env); |
777 |
break;
|
778 |
} |
779 |
|
780 |
return ret;
|
781 |
} |
782 |
|
783 |
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
784 |
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
785 |
{ |
786 |
const static uint8_t int3 = 0xcc; |
787 |
|
788 |
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
789 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) |
790 |
return -EINVAL;
|
791 |
return 0; |
792 |
} |
793 |
|
794 |
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
795 |
{ |
796 |
uint8_t int3; |
797 |
|
798 |
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || |
799 |
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) |
800 |
return -EINVAL;
|
801 |
return 0; |
802 |
} |
803 |
|
804 |
static struct { |
805 |
target_ulong addr; |
806 |
int len;
|
807 |
int type;
|
808 |
} hw_breakpoint[4];
|
809 |
|
810 |
static int nb_hw_breakpoint; |
811 |
|
812 |
static int find_hw_breakpoint(target_ulong addr, int len, int type) |
813 |
{ |
814 |
int n;
|
815 |
|
816 |
for (n = 0; n < nb_hw_breakpoint; n++) |
817 |
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
818 |
(hw_breakpoint[n].len == len || len == -1))
|
819 |
return n;
|
820 |
return -1; |
821 |
} |
822 |
|
823 |
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
824 |
target_ulong len, int type)
|
825 |
{ |
826 |
switch (type) {
|
827 |
case GDB_BREAKPOINT_HW:
|
828 |
len = 1;
|
829 |
break;
|
830 |
case GDB_WATCHPOINT_WRITE:
|
831 |
case GDB_WATCHPOINT_ACCESS:
|
832 |
switch (len) {
|
833 |
case 1: |
834 |
break;
|
835 |
case 2: |
836 |
case 4: |
837 |
case 8: |
838 |
if (addr & (len - 1)) |
839 |
return -EINVAL;
|
840 |
break;
|
841 |
default:
|
842 |
return -EINVAL;
|
843 |
} |
844 |
break;
|
845 |
default:
|
846 |
return -ENOSYS;
|
847 |
} |
848 |
|
849 |
if (nb_hw_breakpoint == 4) |
850 |
return -ENOBUFS;
|
851 |
|
852 |
if (find_hw_breakpoint(addr, len, type) >= 0) |
853 |
return -EEXIST;
|
854 |
|
855 |
hw_breakpoint[nb_hw_breakpoint].addr = addr; |
856 |
hw_breakpoint[nb_hw_breakpoint].len = len; |
857 |
hw_breakpoint[nb_hw_breakpoint].type = type; |
858 |
nb_hw_breakpoint++; |
859 |
|
860 |
return 0; |
861 |
} |
862 |
|
863 |
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
864 |
target_ulong len, int type)
|
865 |
{ |
866 |
int n;
|
867 |
|
868 |
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
869 |
if (n < 0) |
870 |
return -ENOENT;
|
871 |
|
872 |
nb_hw_breakpoint--; |
873 |
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; |
874 |
|
875 |
return 0; |
876 |
} |
877 |
|
878 |
void kvm_arch_remove_all_hw_breakpoints(void) |
879 |
{ |
880 |
nb_hw_breakpoint = 0;
|
881 |
} |
882 |
|
883 |
static CPUWatchpoint hw_watchpoint;
|
884 |
|
885 |
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) |
886 |
{ |
887 |
int handle = 0; |
888 |
int n;
|
889 |
|
890 |
if (arch_info->exception == 1) { |
891 |
if (arch_info->dr6 & (1 << 14)) { |
892 |
if (cpu_single_env->singlestep_enabled)
|
893 |
handle = 1;
|
894 |
} else {
|
895 |
for (n = 0; n < 4; n++) |
896 |
if (arch_info->dr6 & (1 << n)) |
897 |
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { |
898 |
case 0x0: |
899 |
handle = 1;
|
900 |
break;
|
901 |
case 0x1: |
902 |
handle = 1;
|
903 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
904 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
905 |
hw_watchpoint.flags = BP_MEM_WRITE; |
906 |
break;
|
907 |
case 0x3: |
908 |
handle = 1;
|
909 |
cpu_single_env->watchpoint_hit = &hw_watchpoint; |
910 |
hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
911 |
hw_watchpoint.flags = BP_MEM_ACCESS; |
912 |
break;
|
913 |
} |
914 |
} |
915 |
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) |
916 |
handle = 1;
|
917 |
|
918 |
if (!handle)
|
919 |
kvm_update_guest_debug(cpu_single_env, |
920 |
(arch_info->exception == 1) ?
|
921 |
KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP); |
922 |
|
923 |
return handle;
|
924 |
} |
925 |
|
926 |
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) |
927 |
{ |
928 |
const uint8_t type_code[] = {
|
929 |
[GDB_BREAKPOINT_HW] = 0x0,
|
930 |
[GDB_WATCHPOINT_WRITE] = 0x1,
|
931 |
[GDB_WATCHPOINT_ACCESS] = 0x3
|
932 |
}; |
933 |
const uint8_t len_code[] = {
|
934 |
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 |
935 |
}; |
936 |
int n;
|
937 |
|
938 |
if (kvm_sw_breakpoints_active(env))
|
939 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; |
940 |
|
941 |
if (nb_hw_breakpoint > 0) { |
942 |
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; |
943 |
dbg->arch.debugreg[7] = 0x0600; |
944 |
for (n = 0; n < nb_hw_breakpoint; n++) { |
945 |
dbg->arch.debugreg[n] = hw_breakpoint[n].addr; |
946 |
dbg->arch.debugreg[7] |= (2 << (n * 2)) | |
947 |
(type_code[hw_breakpoint[n].type] << (16 + n*4)) | |
948 |
(len_code[hw_breakpoint[n].len] << (18 + n*4)); |
949 |
} |
950 |
} |
951 |
} |
952 |
#endif /* KVM_CAP_SET_GUEST_DEBUG */ |