root / target-i386 / machine.c @ 216c07c3
History | View | Annotate | Download (12 kB)
1 |
#include "hw/hw.h" |
---|---|
2 |
#include "hw/boards.h" |
3 |
#include "hw/pc.h" |
4 |
#include "hw/isa.h" |
5 |
#include "host-utils.h" |
6 |
|
7 |
#include "exec-all.h" |
8 |
#include "kvm.h" |
9 |
|
10 |
static const VMStateDescription vmstate_segment = { |
11 |
.name = "segment",
|
12 |
.version_id = 1,
|
13 |
.minimum_version_id = 1,
|
14 |
.minimum_version_id_old = 1,
|
15 |
.fields = (VMStateField []) { |
16 |
VMSTATE_UINT32(selector, SegmentCache), |
17 |
VMSTATE_UINTTL(base, SegmentCache), |
18 |
VMSTATE_UINT32(limit, SegmentCache), |
19 |
VMSTATE_UINT32(flags, SegmentCache), |
20 |
VMSTATE_END_OF_LIST() |
21 |
} |
22 |
}; |
23 |
|
24 |
static void cpu_put_seg(QEMUFile *f, SegmentCache *dt) |
25 |
{ |
26 |
vmstate_save_state(f, &vmstate_segment, dt); |
27 |
} |
28 |
|
29 |
static void cpu_get_seg(QEMUFile *f, SegmentCache *dt) |
30 |
{ |
31 |
vmstate_load_state(f, &vmstate_segment, dt, vmstate_segment.version_id); |
32 |
} |
33 |
|
34 |
static const VMStateDescription vmstate_xmm_reg = { |
35 |
.name = "xmm_reg",
|
36 |
.version_id = 1,
|
37 |
.minimum_version_id = 1,
|
38 |
.minimum_version_id_old = 1,
|
39 |
.fields = (VMStateField []) { |
40 |
VMSTATE_UINT64(XMM_Q(0), XMMReg),
|
41 |
VMSTATE_UINT64(XMM_Q(1), XMMReg),
|
42 |
VMSTATE_END_OF_LIST() |
43 |
} |
44 |
}; |
45 |
|
46 |
static void cpu_put_xmm_reg(QEMUFile *f, XMMReg *xmm_reg) |
47 |
{ |
48 |
vmstate_save_state(f, &vmstate_xmm_reg, xmm_reg); |
49 |
} |
50 |
|
51 |
static void cpu_get_xmm_reg(QEMUFile *f, XMMReg *xmm_reg) |
52 |
{ |
53 |
vmstate_load_state(f, &vmstate_xmm_reg, xmm_reg, vmstate_xmm_reg.version_id); |
54 |
} |
55 |
|
56 |
static const VMStateDescription vmstate_mtrr_var = { |
57 |
.name = "mtrr_var",
|
58 |
.version_id = 1,
|
59 |
.minimum_version_id = 1,
|
60 |
.minimum_version_id_old = 1,
|
61 |
.fields = (VMStateField []) { |
62 |
VMSTATE_UINT64(base, MTRRVar), |
63 |
VMSTATE_UINT64(mask, MTRRVar), |
64 |
VMSTATE_END_OF_LIST() |
65 |
} |
66 |
}; |
67 |
|
68 |
static void cpu_put_mtrr_var(QEMUFile *f, MTRRVar *mtrr_var) |
69 |
{ |
70 |
vmstate_save_state(f, &vmstate_mtrr_var, mtrr_var); |
71 |
} |
72 |
|
73 |
static void cpu_get_mtrr_var(QEMUFile *f, MTRRVar *mtrr_var) |
74 |
{ |
75 |
vmstate_load_state(f, &vmstate_mtrr_var, mtrr_var, vmstate_mtrr_var.version_id); |
76 |
} |
77 |
|
78 |
static void cpu_pre_save(void *opaque) |
79 |
{ |
80 |
CPUState *env = opaque; |
81 |
int i, bit;
|
82 |
|
83 |
cpu_synchronize_state(env); |
84 |
|
85 |
/* FPU */
|
86 |
env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; |
87 |
env->fptag_vmstate = 0;
|
88 |
for(i = 0; i < 8; i++) { |
89 |
env->fptag_vmstate |= ((!env->fptags[i]) << i); |
90 |
} |
91 |
|
92 |
#ifdef USE_X86LDOUBLE
|
93 |
env->fpregs_format_vmstate = 0;
|
94 |
#else
|
95 |
env->fpregs_format_vmstate = 1;
|
96 |
#endif
|
97 |
|
98 |
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
99 |
to find it and save its number instead (-1 for none). */
|
100 |
env->pending_irq_vmstate = -1;
|
101 |
for (i = 0; i < ARRAY_SIZE(env->interrupt_bitmap); i++) { |
102 |
if (env->interrupt_bitmap[i]) {
|
103 |
bit = ctz64(env->interrupt_bitmap[i]); |
104 |
env->pending_irq_vmstate = i * 64 + bit;
|
105 |
break;
|
106 |
} |
107 |
} |
108 |
} |
109 |
|
110 |
void cpu_save(QEMUFile *f, void *opaque) |
111 |
{ |
112 |
CPUState *env = opaque; |
113 |
int i;
|
114 |
|
115 |
cpu_pre_save(opaque); |
116 |
|
117 |
for(i = 0; i < CPU_NB_REGS; i++) |
118 |
qemu_put_betls(f, &env->regs[i]); |
119 |
qemu_put_betls(f, &env->eip); |
120 |
qemu_put_betls(f, &env->eflags); |
121 |
qemu_put_be32s(f, &env->hflags); |
122 |
|
123 |
/* FPU */
|
124 |
qemu_put_be16s(f, &env->fpuc); |
125 |
qemu_put_be16s(f, &env->fpus_vmstate); |
126 |
qemu_put_be16s(f, &env->fptag_vmstate); |
127 |
|
128 |
qemu_put_be16s(f, &env->fpregs_format_vmstate); |
129 |
|
130 |
for(i = 0; i < 8; i++) { |
131 |
#ifdef USE_X86LDOUBLE
|
132 |
{ |
133 |
uint64_t mant; |
134 |
uint16_t exp; |
135 |
/* we save the real CPU data (in case of MMX usage only 'mant'
|
136 |
contains the MMX register */
|
137 |
cpu_get_fp80(&mant, &exp, env->fpregs[i].d); |
138 |
qemu_put_be64(f, mant); |
139 |
qemu_put_be16(f, exp); |
140 |
} |
141 |
#else
|
142 |
/* if we use doubles for float emulation, we save the doubles to
|
143 |
avoid losing information in case of MMX usage. It can give
|
144 |
problems if the image is restored on a CPU where long
|
145 |
doubles are used instead. */
|
146 |
qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0));
|
147 |
#endif
|
148 |
} |
149 |
|
150 |
for(i = 0; i < 6; i++) |
151 |
cpu_put_seg(f, &env->segs[i]); |
152 |
cpu_put_seg(f, &env->ldt); |
153 |
cpu_put_seg(f, &env->tr); |
154 |
cpu_put_seg(f, &env->gdt); |
155 |
cpu_put_seg(f, &env->idt); |
156 |
|
157 |
qemu_put_be32s(f, &env->sysenter_cs); |
158 |
qemu_put_betls(f, &env->sysenter_esp); |
159 |
qemu_put_betls(f, &env->sysenter_eip); |
160 |
|
161 |
qemu_put_betls(f, &env->cr[0]);
|
162 |
qemu_put_betls(f, &env->cr[2]);
|
163 |
qemu_put_betls(f, &env->cr[3]);
|
164 |
qemu_put_betls(f, &env->cr[4]);
|
165 |
|
166 |
for(i = 0; i < 8; i++) |
167 |
qemu_put_betls(f, &env->dr[i]); |
168 |
|
169 |
/* MMU */
|
170 |
qemu_put_sbe32s(f, &env->a20_mask); |
171 |
|
172 |
/* XMM */
|
173 |
qemu_put_be32s(f, &env->mxcsr); |
174 |
for(i = 0; i < CPU_NB_REGS; i++) { |
175 |
cpu_put_xmm_reg(f, &env->xmm_regs[i]); |
176 |
} |
177 |
|
178 |
#ifdef TARGET_X86_64
|
179 |
qemu_put_be64s(f, &env->efer); |
180 |
qemu_put_be64s(f, &env->star); |
181 |
qemu_put_be64s(f, &env->lstar); |
182 |
qemu_put_be64s(f, &env->cstar); |
183 |
qemu_put_be64s(f, &env->fmask); |
184 |
qemu_put_be64s(f, &env->kernelgsbase); |
185 |
#endif
|
186 |
qemu_put_be32s(f, &env->smbase); |
187 |
|
188 |
qemu_put_be64s(f, &env->pat); |
189 |
qemu_put_be32s(f, &env->hflags2); |
190 |
|
191 |
qemu_put_be64s(f, &env->vm_hsave); |
192 |
qemu_put_be64s(f, &env->vm_vmcb); |
193 |
qemu_put_be64s(f, &env->tsc_offset); |
194 |
qemu_put_be64s(f, &env->intercept); |
195 |
qemu_put_be16s(f, &env->intercept_cr_read); |
196 |
qemu_put_be16s(f, &env->intercept_cr_write); |
197 |
qemu_put_be16s(f, &env->intercept_dr_read); |
198 |
qemu_put_be16s(f, &env->intercept_dr_write); |
199 |
qemu_put_be32s(f, &env->intercept_exceptions); |
200 |
qemu_put_8s(f, &env->v_tpr); |
201 |
|
202 |
/* MTRRs */
|
203 |
for(i = 0; i < 11; i++) |
204 |
qemu_put_be64s(f, &env->mtrr_fixed[i]); |
205 |
qemu_put_be64s(f, &env->mtrr_deftype); |
206 |
for(i = 0; i < 8; i++) { |
207 |
cpu_put_mtrr_var(f, &env->mtrr_var[i]); |
208 |
} |
209 |
|
210 |
/* KVM-related states */
|
211 |
|
212 |
qemu_put_sbe32s(f, &env->pending_irq_vmstate); |
213 |
qemu_put_be32s(f, &env->mp_state); |
214 |
qemu_put_be64s(f, &env->tsc); |
215 |
|
216 |
/* MCE */
|
217 |
qemu_put_be64s(f, &env->mcg_cap); |
218 |
qemu_put_be64s(f, &env->mcg_status); |
219 |
qemu_put_be64s(f, &env->mcg_ctl); |
220 |
for (i = 0; i < MCE_BANKS_DEF * 4; i++) { |
221 |
qemu_put_be64s(f, &env->mce_banks[i]); |
222 |
} |
223 |
qemu_put_be64s(f, &env->tsc_aux); |
224 |
} |
225 |
|
226 |
#ifdef USE_X86LDOUBLE
|
227 |
/* XXX: add that in a FPU generic layer */
|
228 |
union x86_longdouble {
|
229 |
uint64_t mant; |
230 |
uint16_t exp; |
231 |
}; |
232 |
|
233 |
#define MANTD1(fp) (fp & ((1LL << 52) - 1)) |
234 |
#define EXPBIAS1 1023 |
235 |
#define EXPD1(fp) ((fp >> 52) & 0x7FF) |
236 |
#define SIGND1(fp) ((fp >> 32) & 0x80000000) |
237 |
|
238 |
static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp) |
239 |
{ |
240 |
int e;
|
241 |
/* mantissa */
|
242 |
p->mant = (MANTD1(temp) << 11) | (1LL << 63); |
243 |
/* exponent + sign */
|
244 |
e = EXPD1(temp) - EXPBIAS1 + 16383;
|
245 |
e |= SIGND1(temp) >> 16;
|
246 |
p->exp = e; |
247 |
} |
248 |
#endif
|
249 |
|
250 |
static int cpu_pre_load(void *opaque) |
251 |
{ |
252 |
CPUState *env = opaque; |
253 |
|
254 |
cpu_synchronize_state(env); |
255 |
return 0; |
256 |
} |
257 |
|
258 |
static int cpu_post_load(void *opaque, int version_id) |
259 |
{ |
260 |
CPUState *env = opaque; |
261 |
int i;
|
262 |
|
263 |
/* XXX: restore FPU round state */
|
264 |
env->fpstt = (env->fpus_vmstate >> 11) & 7; |
265 |
env->fpus = env->fpus_vmstate & ~0x3800;
|
266 |
env->fptag_vmstate ^= 0xff;
|
267 |
for(i = 0; i < 8; i++) { |
268 |
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
|
269 |
} |
270 |
|
271 |
cpu_breakpoint_remove_all(env, BP_CPU); |
272 |
cpu_watchpoint_remove_all(env, BP_CPU); |
273 |
for (i = 0; i < 4; i++) |
274 |
hw_breakpoint_insert(env, i); |
275 |
|
276 |
if (version_id >= 9) { |
277 |
memset(&env->interrupt_bitmap, 0, sizeof(env->interrupt_bitmap)); |
278 |
if (env->pending_irq_vmstate >= 0) { |
279 |
env->interrupt_bitmap[env->pending_irq_vmstate / 64] |=
|
280 |
(uint64_t)1 << (env->pending_irq_vmstate % 64); |
281 |
} |
282 |
} |
283 |
|
284 |
return cpu_post_load(env, version_id);
|
285 |
} |
286 |
|
287 |
int cpu_load(QEMUFile *f, void *opaque, int version_id) |
288 |
{ |
289 |
CPUState *env = opaque; |
290 |
int i, guess_mmx;
|
291 |
|
292 |
cpu_pre_load(env); |
293 |
|
294 |
if (version_id < 3 || version_id > CPU_SAVE_VERSION) |
295 |
return -EINVAL;
|
296 |
for(i = 0; i < CPU_NB_REGS; i++) |
297 |
qemu_get_betls(f, &env->regs[i]); |
298 |
qemu_get_betls(f, &env->eip); |
299 |
qemu_get_betls(f, &env->eflags); |
300 |
qemu_get_be32s(f, &env->hflags); |
301 |
|
302 |
qemu_get_be16s(f, &env->fpuc); |
303 |
qemu_get_be16s(f, &env->fpus_vmstate); |
304 |
qemu_get_be16s(f, &env->fptag_vmstate); |
305 |
qemu_get_be16s(f, &env->fpregs_format_vmstate); |
306 |
|
307 |
/* NOTE: we cannot always restore the FPU state if the image come
|
308 |
from a host with a different 'USE_X86LDOUBLE' define. We guess
|
309 |
if we are in an MMX state to restore correctly in that case. */
|
310 |
guess_mmx = ((env->fptag_vmstate == 0xff) && (env->fpus_vmstate & 0x3800) == 0); |
311 |
for(i = 0; i < 8; i++) { |
312 |
uint64_t mant; |
313 |
uint16_t exp; |
314 |
|
315 |
switch(env->fpregs_format_vmstate) {
|
316 |
case 0: |
317 |
mant = qemu_get_be64(f); |
318 |
exp = qemu_get_be16(f); |
319 |
#ifdef USE_X86LDOUBLE
|
320 |
env->fpregs[i].d = cpu_set_fp80(mant, exp); |
321 |
#else
|
322 |
/* difficult case */
|
323 |
if (guess_mmx)
|
324 |
env->fpregs[i].mmx.MMX_Q(0) = mant;
|
325 |
else
|
326 |
env->fpregs[i].d = cpu_set_fp80(mant, exp); |
327 |
#endif
|
328 |
break;
|
329 |
case 1: |
330 |
mant = qemu_get_be64(f); |
331 |
#ifdef USE_X86LDOUBLE
|
332 |
{ |
333 |
union x86_longdouble *p;
|
334 |
/* difficult case */
|
335 |
p = (void *)&env->fpregs[i];
|
336 |
if (guess_mmx) {
|
337 |
p->mant = mant; |
338 |
p->exp = 0xffff;
|
339 |
} else {
|
340 |
fp64_to_fp80(p, mant); |
341 |
} |
342 |
} |
343 |
#else
|
344 |
env->fpregs[i].mmx.MMX_Q(0) = mant;
|
345 |
#endif
|
346 |
break;
|
347 |
default:
|
348 |
return -EINVAL;
|
349 |
} |
350 |
} |
351 |
|
352 |
for(i = 0; i < 6; i++) |
353 |
cpu_get_seg(f, &env->segs[i]); |
354 |
cpu_get_seg(f, &env->ldt); |
355 |
cpu_get_seg(f, &env->tr); |
356 |
cpu_get_seg(f, &env->gdt); |
357 |
cpu_get_seg(f, &env->idt); |
358 |
|
359 |
qemu_get_be32s(f, &env->sysenter_cs); |
360 |
if (version_id >= 7) { |
361 |
qemu_get_betls(f, &env->sysenter_esp); |
362 |
qemu_get_betls(f, &env->sysenter_eip); |
363 |
} else {
|
364 |
env->sysenter_esp = qemu_get_be32(f); |
365 |
env->sysenter_eip = qemu_get_be32(f); |
366 |
} |
367 |
|
368 |
qemu_get_betls(f, &env->cr[0]);
|
369 |
qemu_get_betls(f, &env->cr[2]);
|
370 |
qemu_get_betls(f, &env->cr[3]);
|
371 |
qemu_get_betls(f, &env->cr[4]);
|
372 |
|
373 |
for(i = 0; i < 8; i++) |
374 |
qemu_get_betls(f, &env->dr[i]); |
375 |
|
376 |
qemu_get_sbe32s(f, &env->a20_mask); |
377 |
|
378 |
qemu_get_be32s(f, &env->mxcsr); |
379 |
for(i = 0; i < CPU_NB_REGS; i++) { |
380 |
cpu_get_xmm_reg(f, &env->xmm_regs[i]); |
381 |
} |
382 |
|
383 |
#ifdef TARGET_X86_64
|
384 |
qemu_get_be64s(f, &env->efer); |
385 |
qemu_get_be64s(f, &env->star); |
386 |
qemu_get_be64s(f, &env->lstar); |
387 |
qemu_get_be64s(f, &env->cstar); |
388 |
qemu_get_be64s(f, &env->fmask); |
389 |
qemu_get_be64s(f, &env->kernelgsbase); |
390 |
#endif
|
391 |
if (version_id >= 4) { |
392 |
qemu_get_be32s(f, &env->smbase); |
393 |
} |
394 |
if (version_id >= 5) { |
395 |
qemu_get_be64s(f, &env->pat); |
396 |
qemu_get_be32s(f, &env->hflags2); |
397 |
if (version_id < 6) |
398 |
qemu_get_be32s(f, &env->halted); |
399 |
|
400 |
qemu_get_be64s(f, &env->vm_hsave); |
401 |
qemu_get_be64s(f, &env->vm_vmcb); |
402 |
qemu_get_be64s(f, &env->tsc_offset); |
403 |
qemu_get_be64s(f, &env->intercept); |
404 |
qemu_get_be16s(f, &env->intercept_cr_read); |
405 |
qemu_get_be16s(f, &env->intercept_cr_write); |
406 |
qemu_get_be16s(f, &env->intercept_dr_read); |
407 |
qemu_get_be16s(f, &env->intercept_dr_write); |
408 |
qemu_get_be32s(f, &env->intercept_exceptions); |
409 |
qemu_get_8s(f, &env->v_tpr); |
410 |
} |
411 |
|
412 |
if (version_id >= 8) { |
413 |
/* MTRRs */
|
414 |
for(i = 0; i < 11; i++) |
415 |
qemu_get_be64s(f, &env->mtrr_fixed[i]); |
416 |
qemu_get_be64s(f, &env->mtrr_deftype); |
417 |
for(i = 0; i < 8; i++) { |
418 |
cpu_get_mtrr_var(f, &env->mtrr_var[i]); |
419 |
} |
420 |
} |
421 |
|
422 |
if (version_id >= 9) { |
423 |
qemu_get_sbe32s(f, &env->pending_irq_vmstate); |
424 |
qemu_get_be32s(f, &env->mp_state); |
425 |
qemu_get_be64s(f, &env->tsc); |
426 |
} |
427 |
|
428 |
if (version_id >= 10) { |
429 |
qemu_get_be64s(f, &env->mcg_cap); |
430 |
qemu_get_be64s(f, &env->mcg_status); |
431 |
qemu_get_be64s(f, &env->mcg_ctl); |
432 |
for (i = 0; i < MCE_BANKS_DEF * 4; i++) { |
433 |
qemu_get_be64s(f, &env->mce_banks[i]); |
434 |
} |
435 |
} |
436 |
|
437 |
if (version_id >= 11) { |
438 |
qemu_get_be64s(f, &env->tsc_aux); |
439 |
} |
440 |
|
441 |
tlb_flush(env, 1);
|
442 |
return 0; |
443 |
} |