root / target-i386 / kvm.c @ 05330448
History | View | Annotate | Download (16.9 kB)
1 |
/*
|
---|---|
2 |
* QEMU KVM support
|
3 |
*
|
4 |
* Copyright (C) 2006-2008 Qumranet Technologies
|
5 |
* Copyright IBM, Corp. 2008
|
6 |
*
|
7 |
* Authors:
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
11 |
* See the COPYING file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include <sys/types.h> |
16 |
#include <sys/ioctl.h> |
17 |
#include <sys/mman.h> |
18 |
|
19 |
#include <linux/kvm.h> |
20 |
|
21 |
#include "qemu-common.h" |
22 |
#include "sysemu.h" |
23 |
#include "kvm.h" |
24 |
#include "cpu.h" |
25 |
|
26 |
//#define DEBUG_KVM
|
27 |
|
28 |
#ifdef DEBUG_KVM
|
29 |
#define dprintf(fmt, ...) \
|
30 |
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
31 |
#else
|
32 |
#define dprintf(fmt, ...) \
|
33 |
do { } while (0) |
34 |
#endif
|
35 |
|
36 |
int kvm_arch_init_vcpu(CPUState *env)
|
37 |
{ |
38 |
struct {
|
39 |
struct kvm_cpuid cpuid;
|
40 |
struct kvm_cpuid_entry entries[100]; |
41 |
} __attribute__((packed)) cpuid_data; |
42 |
int limit, i, cpuid_i;
|
43 |
uint32_t eax, ebx, ecx, edx; |
44 |
|
45 |
cpuid_i = 0;
|
46 |
|
47 |
cpu_x86_cpuid(env, 0, &eax, &ebx, &ecx, &edx);
|
48 |
limit = eax; |
49 |
|
50 |
for (i = 0; i <= limit; i++) { |
51 |
struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
|
52 |
|
53 |
cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx); |
54 |
c->function = i; |
55 |
c->eax = eax; |
56 |
c->ebx = ebx; |
57 |
c->ecx = ecx; |
58 |
c->edx = edx; |
59 |
} |
60 |
|
61 |
cpu_x86_cpuid(env, 0x80000000, &eax, &ebx, &ecx, &edx);
|
62 |
limit = eax; |
63 |
|
64 |
for (i = 0x80000000; i <= limit; i++) { |
65 |
struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
|
66 |
|
67 |
cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx); |
68 |
c->function = i; |
69 |
c->eax = eax; |
70 |
c->ebx = ebx; |
71 |
c->ecx = ecx; |
72 |
c->edx = edx; |
73 |
} |
74 |
|
75 |
cpuid_data.cpuid.nent = cpuid_i; |
76 |
|
77 |
return kvm_vcpu_ioctl(env, KVM_SET_CPUID, &cpuid_data);
|
78 |
} |
79 |
|
80 |
static int kvm_has_msr_star(CPUState *env) |
81 |
{ |
82 |
static int has_msr_star; |
83 |
int ret;
|
84 |
|
85 |
/* first time */
|
86 |
if (has_msr_star == 0) { |
87 |
struct kvm_msr_list msr_list, *kvm_msr_list;
|
88 |
|
89 |
has_msr_star = -1;
|
90 |
|
91 |
/* Obtain MSR list from KVM. These are the MSRs that we must
|
92 |
* save/restore */
|
93 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); |
94 |
if (ret < 0) |
95 |
return 0; |
96 |
|
97 |
msr_list.nmsrs = 0;
|
98 |
kvm_msr_list = qemu_mallocz(sizeof(msr_list) +
|
99 |
msr_list.nmsrs * sizeof(msr_list.indices[0])); |
100 |
if (kvm_msr_list == NULL) |
101 |
return 0; |
102 |
|
103 |
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
104 |
if (ret >= 0) { |
105 |
int i;
|
106 |
|
107 |
for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
108 |
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
109 |
has_msr_star = 1;
|
110 |
break;
|
111 |
} |
112 |
} |
113 |
} |
114 |
|
115 |
free(kvm_msr_list); |
116 |
} |
117 |
|
118 |
if (has_msr_star == 1) |
119 |
return 1; |
120 |
return 0; |
121 |
} |
122 |
|
123 |
int kvm_arch_init(KVMState *s, int smp_cpus) |
124 |
{ |
125 |
int ret;
|
126 |
|
127 |
/* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
|
128 |
* directly. In order to use vm86 mode, a TSS is needed. Since this
|
129 |
* must be part of guest physical memory, we need to allocate it. Older
|
130 |
* versions of KVM just assumed that it would be at the end of physical
|
131 |
* memory but that doesn't work with more than 4GB of memory. We simply
|
132 |
* refuse to work with those older versions of KVM. */
|
133 |
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, (void *)KVM_CAP_SET_TSS_ADDR);
|
134 |
if (ret <= 0) { |
135 |
fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
|
136 |
return ret;
|
137 |
} |
138 |
|
139 |
/* this address is 3 pages before the bios, and the bios should present
|
140 |
* as unavaible memory. FIXME, need to ensure the e820 map deals with
|
141 |
* this?
|
142 |
*/
|
143 |
return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, (void *)0xfffbd000); |
144 |
} |
145 |
|
146 |
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
147 |
{ |
148 |
lhs->selector = rhs->selector; |
149 |
lhs->base = rhs->base; |
150 |
lhs->limit = rhs->limit; |
151 |
lhs->type = 3;
|
152 |
lhs->present = 1;
|
153 |
lhs->dpl = 3;
|
154 |
lhs->db = 0;
|
155 |
lhs->s = 1;
|
156 |
lhs->l = 0;
|
157 |
lhs->g = 0;
|
158 |
lhs->avl = 0;
|
159 |
lhs->unusable = 0;
|
160 |
} |
161 |
|
162 |
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
163 |
{ |
164 |
unsigned flags = rhs->flags;
|
165 |
lhs->selector = rhs->selector; |
166 |
lhs->base = rhs->base; |
167 |
lhs->limit = rhs->limit; |
168 |
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
169 |
lhs->present = (flags & DESC_P_MASK) != 0;
|
170 |
lhs->dpl = rhs->selector & 3;
|
171 |
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
172 |
lhs->s = (flags & DESC_S_MASK) != 0;
|
173 |
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
174 |
lhs->g = (flags & DESC_G_MASK) != 0;
|
175 |
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
176 |
lhs->unusable = 0;
|
177 |
} |
178 |
|
179 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
180 |
{ |
181 |
lhs->selector = rhs->selector; |
182 |
lhs->base = rhs->base; |
183 |
lhs->limit = rhs->limit; |
184 |
lhs->flags = |
185 |
(rhs->type << DESC_TYPE_SHIFT) |
186 |
| (rhs->present * DESC_P_MASK) |
187 |
| (rhs->dpl << DESC_DPL_SHIFT) |
188 |
| (rhs->db << DESC_B_SHIFT) |
189 |
| (rhs->s * DESC_S_MASK) |
190 |
| (rhs->l << DESC_L_SHIFT) |
191 |
| (rhs->g * DESC_G_MASK) |
192 |
| (rhs->avl * DESC_AVL_MASK); |
193 |
} |
194 |
|
195 |
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
196 |
{ |
197 |
if (set)
|
198 |
*kvm_reg = *qemu_reg; |
199 |
else
|
200 |
*qemu_reg = *kvm_reg; |
201 |
} |
202 |
|
203 |
static int kvm_getput_regs(CPUState *env, int set) |
204 |
{ |
205 |
struct kvm_regs regs;
|
206 |
int ret = 0; |
207 |
|
208 |
if (!set) {
|
209 |
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
210 |
if (ret < 0) |
211 |
return ret;
|
212 |
} |
213 |
|
214 |
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
215 |
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
216 |
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
217 |
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
218 |
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
219 |
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
220 |
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
221 |
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
222 |
#ifdef TARGET_X86_64
|
223 |
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
224 |
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
225 |
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
226 |
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
227 |
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
228 |
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
229 |
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
230 |
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
231 |
#endif
|
232 |
|
233 |
kvm_getput_reg(®s.rflags, &env->eflags, set); |
234 |
kvm_getput_reg(®s.rip, &env->eip, set); |
235 |
|
236 |
if (set)
|
237 |
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
238 |
|
239 |
return ret;
|
240 |
} |
241 |
|
242 |
static int kvm_put_fpu(CPUState *env) |
243 |
{ |
244 |
struct kvm_fpu fpu;
|
245 |
int i;
|
246 |
|
247 |
memset(&fpu, 0, sizeof fpu); |
248 |
fpu.fsw = env->fpus & ~(7 << 11); |
249 |
fpu.fsw |= (env->fpstt & 7) << 11; |
250 |
fpu.fcw = env->fpuc; |
251 |
for (i = 0; i < 8; ++i) |
252 |
fpu.ftwx |= (!env->fptags[i]) << i; |
253 |
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
254 |
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
255 |
fpu.mxcsr = env->mxcsr; |
256 |
|
257 |
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
258 |
} |
259 |
|
260 |
static int kvm_put_sregs(CPUState *env) |
261 |
{ |
262 |
struct kvm_sregs sregs;
|
263 |
|
264 |
memcpy(sregs.interrupt_bitmap, |
265 |
env->interrupt_bitmap, |
266 |
sizeof(sregs.interrupt_bitmap));
|
267 |
|
268 |
if ((env->eflags & VM_MASK)) {
|
269 |
set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
270 |
set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
271 |
set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
272 |
set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
273 |
set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
274 |
set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
275 |
} else {
|
276 |
set_seg(&sregs.cs, &env->segs[R_CS]); |
277 |
set_seg(&sregs.ds, &env->segs[R_DS]); |
278 |
set_seg(&sregs.es, &env->segs[R_ES]); |
279 |
set_seg(&sregs.fs, &env->segs[R_FS]); |
280 |
set_seg(&sregs.gs, &env->segs[R_GS]); |
281 |
set_seg(&sregs.ss, &env->segs[R_SS]); |
282 |
|
283 |
if (env->cr[0] & CR0_PE_MASK) { |
284 |
/* force ss cpl to cs cpl */
|
285 |
sregs.ss.selector = (sregs.ss.selector & ~3) |
|
286 |
(sregs.cs.selector & 3);
|
287 |
sregs.ss.dpl = sregs.ss.selector & 3;
|
288 |
} |
289 |
} |
290 |
|
291 |
set_seg(&sregs.tr, &env->tr); |
292 |
set_seg(&sregs.ldt, &env->ldt); |
293 |
|
294 |
sregs.idt.limit = env->idt.limit; |
295 |
sregs.idt.base = env->idt.base; |
296 |
sregs.gdt.limit = env->gdt.limit; |
297 |
sregs.gdt.base = env->gdt.base; |
298 |
|
299 |
sregs.cr0 = env->cr[0];
|
300 |
sregs.cr2 = env->cr[2];
|
301 |
sregs.cr3 = env->cr[3];
|
302 |
sregs.cr4 = env->cr[4];
|
303 |
|
304 |
sregs.cr8 = cpu_get_apic_tpr(env); |
305 |
sregs.apic_base = cpu_get_apic_base(env); |
306 |
|
307 |
sregs.efer = env->efer; |
308 |
|
309 |
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
310 |
} |
311 |
|
312 |
static void kvm_msr_entry_set(struct kvm_msr_entry *entry, |
313 |
uint32_t index, uint64_t value) |
314 |
{ |
315 |
entry->index = index; |
316 |
entry->data = value; |
317 |
} |
318 |
|
319 |
static int kvm_put_msrs(CPUState *env) |
320 |
{ |
321 |
struct {
|
322 |
struct kvm_msrs info;
|
323 |
struct kvm_msr_entry entries[100]; |
324 |
} msr_data; |
325 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
326 |
int n = 0; |
327 |
|
328 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
329 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
330 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
331 |
if (kvm_has_msr_star(env))
|
332 |
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); |
333 |
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); |
334 |
#ifdef TARGET_X86_64
|
335 |
/* FIXME if lm capable */
|
336 |
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); |
337 |
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); |
338 |
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); |
339 |
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); |
340 |
#endif
|
341 |
msr_data.info.nmsrs = n; |
342 |
|
343 |
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
344 |
|
345 |
} |
346 |
|
347 |
|
348 |
static int kvm_get_fpu(CPUState *env) |
349 |
{ |
350 |
struct kvm_fpu fpu;
|
351 |
int i, ret;
|
352 |
|
353 |
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); |
354 |
if (ret < 0) |
355 |
return ret;
|
356 |
|
357 |
env->fpstt = (fpu.fsw >> 11) & 7; |
358 |
env->fpus = fpu.fsw; |
359 |
env->fpuc = fpu.fcw; |
360 |
for (i = 0; i < 8; ++i) |
361 |
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
362 |
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
363 |
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
364 |
env->mxcsr = fpu.mxcsr; |
365 |
|
366 |
return 0; |
367 |
} |
368 |
|
369 |
static int kvm_get_sregs(CPUState *env) |
370 |
{ |
371 |
struct kvm_sregs sregs;
|
372 |
uint32_t hflags; |
373 |
int ret;
|
374 |
|
375 |
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
376 |
if (ret < 0) |
377 |
return ret;
|
378 |
|
379 |
memcpy(env->interrupt_bitmap, |
380 |
sregs.interrupt_bitmap, |
381 |
sizeof(sregs.interrupt_bitmap));
|
382 |
|
383 |
get_seg(&env->segs[R_CS], &sregs.cs); |
384 |
get_seg(&env->segs[R_DS], &sregs.ds); |
385 |
get_seg(&env->segs[R_ES], &sregs.es); |
386 |
get_seg(&env->segs[R_FS], &sregs.fs); |
387 |
get_seg(&env->segs[R_GS], &sregs.gs); |
388 |
get_seg(&env->segs[R_SS], &sregs.ss); |
389 |
|
390 |
get_seg(&env->tr, &sregs.tr); |
391 |
get_seg(&env->ldt, &sregs.ldt); |
392 |
|
393 |
env->idt.limit = sregs.idt.limit; |
394 |
env->idt.base = sregs.idt.base; |
395 |
env->gdt.limit = sregs.gdt.limit; |
396 |
env->gdt.base = sregs.gdt.base; |
397 |
|
398 |
env->cr[0] = sregs.cr0;
|
399 |
env->cr[2] = sregs.cr2;
|
400 |
env->cr[3] = sregs.cr3;
|
401 |
env->cr[4] = sregs.cr4;
|
402 |
|
403 |
cpu_set_apic_base(env, sregs.apic_base); |
404 |
|
405 |
env->efer = sregs.efer; |
406 |
//cpu_set_apic_tpr(env, sregs.cr8);
|
407 |
|
408 |
#define HFLAG_COPY_MASK ~( \
|
409 |
HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
410 |
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
411 |
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
412 |
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
413 |
|
414 |
|
415 |
|
416 |
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
417 |
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
418 |
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
419 |
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
420 |
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
421 |
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
422 |
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); |
423 |
|
424 |
if (env->efer & MSR_EFER_LMA) {
|
425 |
hflags |= HF_LMA_MASK; |
426 |
} |
427 |
|
428 |
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
429 |
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
430 |
} else {
|
431 |
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
432 |
(DESC_B_SHIFT - HF_CS32_SHIFT); |
433 |
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
434 |
(DESC_B_SHIFT - HF_SS32_SHIFT); |
435 |
if (!(env->cr[0] & CR0_PE_MASK) || |
436 |
(env->eflags & VM_MASK) || |
437 |
!(hflags & HF_CS32_MASK)) { |
438 |
hflags |= HF_ADDSEG_MASK; |
439 |
} else {
|
440 |
hflags |= ((env->segs[R_DS].base | |
441 |
env->segs[R_ES].base | |
442 |
env->segs[R_SS].base) != 0) <<
|
443 |
HF_ADDSEG_SHIFT; |
444 |
} |
445 |
} |
446 |
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; |
447 |
env->cc_src = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
448 |
env->df = 1 - (2 * ((env->eflags >> 10) & 1)); |
449 |
env->cc_op = CC_OP_EFLAGS; |
450 |
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
451 |
|
452 |
return 0; |
453 |
} |
454 |
|
455 |
static int kvm_get_msrs(CPUState *env) |
456 |
{ |
457 |
struct {
|
458 |
struct kvm_msrs info;
|
459 |
struct kvm_msr_entry entries[100]; |
460 |
} msr_data; |
461 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
462 |
int ret, i, n;
|
463 |
|
464 |
n = 0;
|
465 |
msrs[n++].index = MSR_IA32_SYSENTER_CS; |
466 |
msrs[n++].index = MSR_IA32_SYSENTER_ESP; |
467 |
msrs[n++].index = MSR_IA32_SYSENTER_EIP; |
468 |
if (kvm_has_msr_star(env))
|
469 |
msrs[n++].index = MSR_STAR; |
470 |
msrs[n++].index = MSR_IA32_TSC; |
471 |
#ifdef TARGET_X86_64
|
472 |
/* FIXME lm_capable_kernel */
|
473 |
msrs[n++].index = MSR_CSTAR; |
474 |
msrs[n++].index = MSR_KERNELGSBASE; |
475 |
msrs[n++].index = MSR_FMASK; |
476 |
msrs[n++].index = MSR_LSTAR; |
477 |
#endif
|
478 |
msr_data.info.nmsrs = n; |
479 |
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); |
480 |
if (ret < 0) |
481 |
return ret;
|
482 |
|
483 |
for (i = 0; i < ret; i++) { |
484 |
switch (msrs[i].index) {
|
485 |
case MSR_IA32_SYSENTER_CS:
|
486 |
env->sysenter_cs = msrs[i].data; |
487 |
break;
|
488 |
case MSR_IA32_SYSENTER_ESP:
|
489 |
env->sysenter_esp = msrs[i].data; |
490 |
break;
|
491 |
case MSR_IA32_SYSENTER_EIP:
|
492 |
env->sysenter_eip = msrs[i].data; |
493 |
break;
|
494 |
case MSR_STAR:
|
495 |
env->star = msrs[i].data; |
496 |
break;
|
497 |
#ifdef TARGET_X86_64
|
498 |
case MSR_CSTAR:
|
499 |
env->cstar = msrs[i].data; |
500 |
break;
|
501 |
case MSR_KERNELGSBASE:
|
502 |
env->kernelgsbase = msrs[i].data; |
503 |
break;
|
504 |
case MSR_FMASK:
|
505 |
env->fmask = msrs[i].data; |
506 |
break;
|
507 |
case MSR_LSTAR:
|
508 |
env->lstar = msrs[i].data; |
509 |
break;
|
510 |
#endif
|
511 |
case MSR_IA32_TSC:
|
512 |
env->tsc = msrs[i].data; |
513 |
break;
|
514 |
} |
515 |
} |
516 |
|
517 |
return 0; |
518 |
} |
519 |
|
520 |
int kvm_arch_put_registers(CPUState *env)
|
521 |
{ |
522 |
int ret;
|
523 |
|
524 |
ret = kvm_getput_regs(env, 1);
|
525 |
if (ret < 0) |
526 |
return ret;
|
527 |
|
528 |
ret = kvm_put_fpu(env); |
529 |
if (ret < 0) |
530 |
return ret;
|
531 |
|
532 |
ret = kvm_put_sregs(env); |
533 |
if (ret < 0) |
534 |
return ret;
|
535 |
|
536 |
ret = kvm_put_msrs(env); |
537 |
if (ret < 0) |
538 |
return ret;
|
539 |
|
540 |
return 0; |
541 |
} |
542 |
|
543 |
int kvm_arch_get_registers(CPUState *env)
|
544 |
{ |
545 |
int ret;
|
546 |
|
547 |
ret = kvm_getput_regs(env, 0);
|
548 |
if (ret < 0) |
549 |
return ret;
|
550 |
|
551 |
ret = kvm_get_fpu(env); |
552 |
if (ret < 0) |
553 |
return ret;
|
554 |
|
555 |
ret = kvm_get_sregs(env); |
556 |
if (ret < 0) |
557 |
return ret;
|
558 |
|
559 |
ret = kvm_get_msrs(env); |
560 |
if (ret < 0) |
561 |
return ret;
|
562 |
|
563 |
return 0; |
564 |
} |
565 |
|
566 |
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
567 |
{ |
568 |
/* Try to inject an interrupt if the guest can accept it */
|
569 |
if (run->ready_for_interrupt_injection &&
|
570 |
(env->interrupt_request & CPU_INTERRUPT_HARD) && |
571 |
(env->eflags & IF_MASK)) { |
572 |
int irq;
|
573 |
|
574 |
env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
575 |
irq = cpu_get_pic_interrupt(env); |
576 |
if (irq >= 0) { |
577 |
struct kvm_interrupt intr;
|
578 |
intr.irq = irq; |
579 |
/* FIXME: errors */
|
580 |
dprintf("injected interrupt %d\n", irq);
|
581 |
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
582 |
} |
583 |
} |
584 |
|
585 |
/* If we have an interrupt but the guest is not ready to receive an
|
586 |
* interrupt, request an interrupt window exit. This will
|
587 |
* cause a return to userspace as soon as the guest is ready to
|
588 |
* receive interrupts. */
|
589 |
if ((env->interrupt_request & CPU_INTERRUPT_HARD))
|
590 |
run->request_interrupt_window = 1;
|
591 |
else
|
592 |
run->request_interrupt_window = 0;
|
593 |
|
594 |
dprintf("setting tpr\n");
|
595 |
run->cr8 = cpu_get_apic_tpr(env); |
596 |
|
597 |
return 0; |
598 |
} |
599 |
|
600 |
int kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
601 |
{ |
602 |
if (run->if_flag)
|
603 |
env->eflags |= IF_MASK; |
604 |
else
|
605 |
env->eflags &= ~IF_MASK; |
606 |
|
607 |
cpu_set_apic_tpr(env, run->cr8); |
608 |
cpu_set_apic_base(env, run->apic_base); |
609 |
|
610 |
return 0; |
611 |
} |
612 |
|
613 |
static int kvm_handle_halt(CPUState *env) |
614 |
{ |
615 |
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
616 |
(env->eflags & IF_MASK)) && |
617 |
!(env->interrupt_request & CPU_INTERRUPT_NMI)) { |
618 |
env->halted = 1;
|
619 |
env->exception_index = EXCP_HLT; |
620 |
return 0; |
621 |
} |
622 |
|
623 |
return 1; |
624 |
} |
625 |
|
626 |
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
627 |
{ |
628 |
int ret = 0; |
629 |
|
630 |
switch (run->exit_reason) {
|
631 |
case KVM_EXIT_HLT:
|
632 |
dprintf("handle_hlt\n");
|
633 |
ret = kvm_handle_halt(env); |
634 |
break;
|
635 |
} |
636 |
|
637 |
return ret;
|
638 |
} |