root / target-i386 / cpu.h @ 6f12a2a6
History | View | Annotate | Download (20.8 kB)
1 |
/*
|
---|---|
2 |
* i386 virtual CPU header
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#ifndef CPU_I386_H
|
21 |
#define CPU_I386_H
|
22 |
|
23 |
#include "config.h" |
24 |
|
25 |
#ifdef TARGET_X86_64
|
26 |
#define TARGET_LONG_BITS 64 |
27 |
#else
|
28 |
#define TARGET_LONG_BITS 32 |
29 |
#endif
|
30 |
|
31 |
/* target supports implicit self modifying code */
|
32 |
#define TARGET_HAS_SMC
|
33 |
/* support for self modifying code even if the modified instruction is
|
34 |
close to the modifying instruction */
|
35 |
#define TARGET_HAS_PRECISE_SMC
|
36 |
|
37 |
#define TARGET_HAS_ICE 1 |
38 |
|
39 |
#ifdef TARGET_X86_64
|
40 |
#define ELF_MACHINE EM_X86_64
|
41 |
#else
|
42 |
#define ELF_MACHINE EM_386
|
43 |
#endif
|
44 |
|
45 |
#include "cpu-defs.h" |
46 |
|
47 |
#include "softfloat.h" |
48 |
|
49 |
#define R_EAX 0 |
50 |
#define R_ECX 1 |
51 |
#define R_EDX 2 |
52 |
#define R_EBX 3 |
53 |
#define R_ESP 4 |
54 |
#define R_EBP 5 |
55 |
#define R_ESI 6 |
56 |
#define R_EDI 7 |
57 |
|
58 |
#define R_AL 0 |
59 |
#define R_CL 1 |
60 |
#define R_DL 2 |
61 |
#define R_BL 3 |
62 |
#define R_AH 4 |
63 |
#define R_CH 5 |
64 |
#define R_DH 6 |
65 |
#define R_BH 7 |
66 |
|
67 |
#define R_ES 0 |
68 |
#define R_CS 1 |
69 |
#define R_SS 2 |
70 |
#define R_DS 3 |
71 |
#define R_FS 4 |
72 |
#define R_GS 5 |
73 |
|
74 |
/* segment descriptor fields */
|
75 |
#define DESC_G_MASK (1 << 23) |
76 |
#define DESC_B_SHIFT 22 |
77 |
#define DESC_B_MASK (1 << DESC_B_SHIFT) |
78 |
#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ |
79 |
#define DESC_L_MASK (1 << DESC_L_SHIFT) |
80 |
#define DESC_AVL_MASK (1 << 20) |
81 |
#define DESC_P_MASK (1 << 15) |
82 |
#define DESC_DPL_SHIFT 13 |
83 |
#define DESC_DPL_MASK (1 << DESC_DPL_SHIFT) |
84 |
#define DESC_S_MASK (1 << 12) |
85 |
#define DESC_TYPE_SHIFT 8 |
86 |
#define DESC_A_MASK (1 << 8) |
87 |
|
88 |
#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ |
89 |
#define DESC_C_MASK (1 << 10) /* code: conforming */ |
90 |
#define DESC_R_MASK (1 << 9) /* code: readable */ |
91 |
|
92 |
#define DESC_E_MASK (1 << 10) /* data: expansion direction */ |
93 |
#define DESC_W_MASK (1 << 9) /* data: writable */ |
94 |
|
95 |
#define DESC_TSS_BUSY_MASK (1 << 9) |
96 |
|
97 |
/* eflags masks */
|
98 |
#define CC_C 0x0001 |
99 |
#define CC_P 0x0004 |
100 |
#define CC_A 0x0010 |
101 |
#define CC_Z 0x0040 |
102 |
#define CC_S 0x0080 |
103 |
#define CC_O 0x0800 |
104 |
|
105 |
#define TF_SHIFT 8 |
106 |
#define IOPL_SHIFT 12 |
107 |
#define VM_SHIFT 17 |
108 |
|
109 |
#define TF_MASK 0x00000100 |
110 |
#define IF_MASK 0x00000200 |
111 |
#define DF_MASK 0x00000400 |
112 |
#define IOPL_MASK 0x00003000 |
113 |
#define NT_MASK 0x00004000 |
114 |
#define RF_MASK 0x00010000 |
115 |
#define VM_MASK 0x00020000 |
116 |
#define AC_MASK 0x00040000 |
117 |
#define VIF_MASK 0x00080000 |
118 |
#define VIP_MASK 0x00100000 |
119 |
#define ID_MASK 0x00200000 |
120 |
|
121 |
/* hidden flags - used internally by qemu to represent additional cpu
|
122 |
states. Only the CPL, INHIBIT_IRQ and HALTED are not redundant. We avoid
|
123 |
using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
|
124 |
with eflags. */
|
125 |
/* current cpl */
|
126 |
#define HF_CPL_SHIFT 0 |
127 |
/* true if soft mmu is being used */
|
128 |
#define HF_SOFTMMU_SHIFT 2 |
129 |
/* true if hardware interrupts must be disabled for next instruction */
|
130 |
#define HF_INHIBIT_IRQ_SHIFT 3 |
131 |
/* 16 or 32 segments */
|
132 |
#define HF_CS32_SHIFT 4 |
133 |
#define HF_SS32_SHIFT 5 |
134 |
/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
|
135 |
#define HF_ADDSEG_SHIFT 6 |
136 |
/* copy of CR0.PE (protected mode) */
|
137 |
#define HF_PE_SHIFT 7 |
138 |
#define HF_TF_SHIFT 8 /* must be same as eflags */ |
139 |
#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ |
140 |
#define HF_EM_SHIFT 10 |
141 |
#define HF_TS_SHIFT 11 |
142 |
#define HF_IOPL_SHIFT 12 /* must be same as eflags */ |
143 |
#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ |
144 |
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ |
145 |
#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ |
146 |
#define HF_VM_SHIFT 17 /* must be same as eflags */ |
147 |
#define HF_HALTED_SHIFT 18 /* CPU halted */ |
148 |
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */ |
149 |
#define HF_GIF_SHIFT 20 /* if set CPU takes interrupts */ |
150 |
#define HF_HIF_SHIFT 21 /* shadow copy of IF_MASK when in SVM */ |
151 |
|
152 |
#define HF_CPL_MASK (3 << HF_CPL_SHIFT) |
153 |
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) |
154 |
#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) |
155 |
#define HF_CS32_MASK (1 << HF_CS32_SHIFT) |
156 |
#define HF_SS32_MASK (1 << HF_SS32_SHIFT) |
157 |
#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) |
158 |
#define HF_PE_MASK (1 << HF_PE_SHIFT) |
159 |
#define HF_TF_MASK (1 << HF_TF_SHIFT) |
160 |
#define HF_MP_MASK (1 << HF_MP_SHIFT) |
161 |
#define HF_EM_MASK (1 << HF_EM_SHIFT) |
162 |
#define HF_TS_MASK (1 << HF_TS_SHIFT) |
163 |
#define HF_LMA_MASK (1 << HF_LMA_SHIFT) |
164 |
#define HF_CS64_MASK (1 << HF_CS64_SHIFT) |
165 |
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) |
166 |
#define HF_HALTED_MASK (1 << HF_HALTED_SHIFT) |
167 |
#define HF_SMM_MASK (1 << HF_SMM_SHIFT) |
168 |
#define HF_GIF_MASK (1 << HF_GIF_SHIFT) |
169 |
#define HF_HIF_MASK (1 << HF_HIF_SHIFT) |
170 |
|
171 |
#define CR0_PE_MASK (1 << 0) |
172 |
#define CR0_MP_MASK (1 << 1) |
173 |
#define CR0_EM_MASK (1 << 2) |
174 |
#define CR0_TS_MASK (1 << 3) |
175 |
#define CR0_ET_MASK (1 << 4) |
176 |
#define CR0_NE_MASK (1 << 5) |
177 |
#define CR0_WP_MASK (1 << 16) |
178 |
#define CR0_AM_MASK (1 << 18) |
179 |
#define CR0_PG_MASK (1 << 31) |
180 |
|
181 |
#define CR4_VME_MASK (1 << 0) |
182 |
#define CR4_PVI_MASK (1 << 1) |
183 |
#define CR4_TSD_MASK (1 << 2) |
184 |
#define CR4_DE_MASK (1 << 3) |
185 |
#define CR4_PSE_MASK (1 << 4) |
186 |
#define CR4_PAE_MASK (1 << 5) |
187 |
#define CR4_PGE_MASK (1 << 7) |
188 |
#define CR4_PCE_MASK (1 << 8) |
189 |
#define CR4_OSFXSR_MASK (1 << 9) |
190 |
#define CR4_OSXMMEXCPT_MASK (1 << 10) |
191 |
|
192 |
#define PG_PRESENT_BIT 0 |
193 |
#define PG_RW_BIT 1 |
194 |
#define PG_USER_BIT 2 |
195 |
#define PG_PWT_BIT 3 |
196 |
#define PG_PCD_BIT 4 |
197 |
#define PG_ACCESSED_BIT 5 |
198 |
#define PG_DIRTY_BIT 6 |
199 |
#define PG_PSE_BIT 7 |
200 |
#define PG_GLOBAL_BIT 8 |
201 |
#define PG_NX_BIT 63 |
202 |
|
203 |
#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) |
204 |
#define PG_RW_MASK (1 << PG_RW_BIT) |
205 |
#define PG_USER_MASK (1 << PG_USER_BIT) |
206 |
#define PG_PWT_MASK (1 << PG_PWT_BIT) |
207 |
#define PG_PCD_MASK (1 << PG_PCD_BIT) |
208 |
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) |
209 |
#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) |
210 |
#define PG_PSE_MASK (1 << PG_PSE_BIT) |
211 |
#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) |
212 |
#define PG_NX_MASK (1LL << PG_NX_BIT) |
213 |
|
214 |
#define PG_ERROR_W_BIT 1 |
215 |
|
216 |
#define PG_ERROR_P_MASK 0x01 |
217 |
#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) |
218 |
#define PG_ERROR_U_MASK 0x04 |
219 |
#define PG_ERROR_RSVD_MASK 0x08 |
220 |
#define PG_ERROR_I_D_MASK 0x10 |
221 |
|
222 |
#define MSR_IA32_APICBASE 0x1b |
223 |
#define MSR_IA32_APICBASE_BSP (1<<8) |
224 |
#define MSR_IA32_APICBASE_ENABLE (1<<11) |
225 |
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
226 |
|
227 |
#define MSR_IA32_SYSENTER_CS 0x174 |
228 |
#define MSR_IA32_SYSENTER_ESP 0x175 |
229 |
#define MSR_IA32_SYSENTER_EIP 0x176 |
230 |
|
231 |
#define MSR_MCG_CAP 0x179 |
232 |
#define MSR_MCG_STATUS 0x17a |
233 |
#define MSR_MCG_CTL 0x17b |
234 |
|
235 |
#define MSR_PAT 0x277 |
236 |
|
237 |
#define MSR_EFER 0xc0000080 |
238 |
|
239 |
#define MSR_EFER_SCE (1 << 0) |
240 |
#define MSR_EFER_LME (1 << 8) |
241 |
#define MSR_EFER_LMA (1 << 10) |
242 |
#define MSR_EFER_NXE (1 << 11) |
243 |
#define MSR_EFER_FFXSR (1 << 14) |
244 |
|
245 |
#define MSR_STAR 0xc0000081 |
246 |
#define MSR_LSTAR 0xc0000082 |
247 |
#define MSR_CSTAR 0xc0000083 |
248 |
#define MSR_FMASK 0xc0000084 |
249 |
#define MSR_FSBASE 0xc0000100 |
250 |
#define MSR_GSBASE 0xc0000101 |
251 |
#define MSR_KERNELGSBASE 0xc0000102 |
252 |
|
253 |
#define MSR_VM_HSAVE_PA 0xc0010117 |
254 |
|
255 |
/* cpuid_features bits */
|
256 |
#define CPUID_FP87 (1 << 0) |
257 |
#define CPUID_VME (1 << 1) |
258 |
#define CPUID_DE (1 << 2) |
259 |
#define CPUID_PSE (1 << 3) |
260 |
#define CPUID_TSC (1 << 4) |
261 |
#define CPUID_MSR (1 << 5) |
262 |
#define CPUID_PAE (1 << 6) |
263 |
#define CPUID_MCE (1 << 7) |
264 |
#define CPUID_CX8 (1 << 8) |
265 |
#define CPUID_APIC (1 << 9) |
266 |
#define CPUID_SEP (1 << 11) /* sysenter/sysexit */ |
267 |
#define CPUID_MTRR (1 << 12) |
268 |
#define CPUID_PGE (1 << 13) |
269 |
#define CPUID_MCA (1 << 14) |
270 |
#define CPUID_CMOV (1 << 15) |
271 |
#define CPUID_PAT (1 << 16) |
272 |
#define CPUID_PSE36 (1 << 17) |
273 |
#define CPUID_PN (1 << 18) |
274 |
#define CPUID_CLFLUSH (1 << 19) |
275 |
#define CPUID_DTS (1 << 21) |
276 |
#define CPUID_ACPI (1 << 22) |
277 |
#define CPUID_MMX (1 << 23) |
278 |
#define CPUID_FXSR (1 << 24) |
279 |
#define CPUID_SSE (1 << 25) |
280 |
#define CPUID_SSE2 (1 << 26) |
281 |
#define CPUID_SS (1 << 27) |
282 |
#define CPUID_HT (1 << 28) |
283 |
#define CPUID_TM (1 << 29) |
284 |
#define CPUID_IA64 (1 << 30) |
285 |
#define CPUID_PBE (1 << 31) |
286 |
|
287 |
#define CPUID_EXT_SSE3 (1 << 0) |
288 |
#define CPUID_EXT_MONITOR (1 << 3) |
289 |
#define CPUID_EXT_DSCPL (1 << 4) |
290 |
#define CPUID_EXT_VMX (1 << 5) |
291 |
#define CPUID_EXT_SMX (1 << 6) |
292 |
#define CPUID_EXT_EST (1 << 7) |
293 |
#define CPUID_EXT_TM2 (1 << 8) |
294 |
#define CPUID_EXT_SSSE3 (1 << 9) |
295 |
#define CPUID_EXT_CID (1 << 10) |
296 |
#define CPUID_EXT_CX16 (1 << 13) |
297 |
#define CPUID_EXT_XTPR (1 << 14) |
298 |
#define CPUID_EXT_DCA (1 << 17) |
299 |
#define CPUID_EXT_POPCNT (1 << 22) |
300 |
|
301 |
#define CPUID_EXT2_SYSCALL (1 << 11) |
302 |
#define CPUID_EXT2_MP (1 << 19) |
303 |
#define CPUID_EXT2_NX (1 << 20) |
304 |
#define CPUID_EXT2_MMXEXT (1 << 22) |
305 |
#define CPUID_EXT2_FFXSR (1 << 25) |
306 |
#define CPUID_EXT2_PDPE1GB (1 << 26) |
307 |
#define CPUID_EXT2_RDTSCP (1 << 27) |
308 |
#define CPUID_EXT2_LM (1 << 29) |
309 |
#define CPUID_EXT2_3DNOWEXT (1 << 30) |
310 |
#define CPUID_EXT2_3DNOW (1 << 31) |
311 |
|
312 |
#define CPUID_EXT3_LAHF_LM (1 << 0) |
313 |
#define CPUID_EXT3_CMP_LEG (1 << 1) |
314 |
#define CPUID_EXT3_SVM (1 << 2) |
315 |
#define CPUID_EXT3_EXTAPIC (1 << 3) |
316 |
#define CPUID_EXT3_CR8LEG (1 << 4) |
317 |
#define CPUID_EXT3_ABM (1 << 5) |
318 |
#define CPUID_EXT3_SSE4A (1 << 6) |
319 |
#define CPUID_EXT3_MISALIGNSSE (1 << 7) |
320 |
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8) |
321 |
#define CPUID_EXT3_OSVW (1 << 9) |
322 |
#define CPUID_EXT3_IBS (1 << 10) |
323 |
|
324 |
#define EXCP00_DIVZ 0 |
325 |
#define EXCP01_SSTP 1 |
326 |
#define EXCP02_NMI 2 |
327 |
#define EXCP03_INT3 3 |
328 |
#define EXCP04_INTO 4 |
329 |
#define EXCP05_BOUND 5 |
330 |
#define EXCP06_ILLOP 6 |
331 |
#define EXCP07_PREX 7 |
332 |
#define EXCP08_DBLE 8 |
333 |
#define EXCP09_XERR 9 |
334 |
#define EXCP0A_TSS 10 |
335 |
#define EXCP0B_NOSEG 11 |
336 |
#define EXCP0C_STACK 12 |
337 |
#define EXCP0D_GPF 13 |
338 |
#define EXCP0E_PAGE 14 |
339 |
#define EXCP10_COPR 16 |
340 |
#define EXCP11_ALGN 17 |
341 |
#define EXCP12_MCHK 18 |
342 |
|
343 |
enum {
|
344 |
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
|
345 |
CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
|
346 |
|
347 |
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
|
348 |
CC_OP_MULW, |
349 |
CC_OP_MULL, |
350 |
CC_OP_MULQ, |
351 |
|
352 |
CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
353 |
CC_OP_ADDW, |
354 |
CC_OP_ADDL, |
355 |
CC_OP_ADDQ, |
356 |
|
357 |
CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
358 |
CC_OP_ADCW, |
359 |
CC_OP_ADCL, |
360 |
CC_OP_ADCQ, |
361 |
|
362 |
CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
363 |
CC_OP_SUBW, |
364 |
CC_OP_SUBL, |
365 |
CC_OP_SUBQ, |
366 |
|
367 |
CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
368 |
CC_OP_SBBW, |
369 |
CC_OP_SBBL, |
370 |
CC_OP_SBBQ, |
371 |
|
372 |
CC_OP_LOGICB, /* modify all flags, CC_DST = res */
|
373 |
CC_OP_LOGICW, |
374 |
CC_OP_LOGICL, |
375 |
CC_OP_LOGICQ, |
376 |
|
377 |
CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
378 |
CC_OP_INCW, |
379 |
CC_OP_INCL, |
380 |
CC_OP_INCQ, |
381 |
|
382 |
CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
383 |
CC_OP_DECW, |
384 |
CC_OP_DECL, |
385 |
CC_OP_DECQ, |
386 |
|
387 |
CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
|
388 |
CC_OP_SHLW, |
389 |
CC_OP_SHLL, |
390 |
CC_OP_SHLQ, |
391 |
|
392 |
CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
|
393 |
CC_OP_SARW, |
394 |
CC_OP_SARL, |
395 |
CC_OP_SARQ, |
396 |
|
397 |
CC_OP_NB, |
398 |
}; |
399 |
|
400 |
#ifdef FLOATX80
|
401 |
#define USE_X86LDOUBLE
|
402 |
#endif
|
403 |
|
404 |
#ifdef USE_X86LDOUBLE
|
405 |
typedef floatx80 CPU86_LDouble;
|
406 |
#else
|
407 |
typedef float64 CPU86_LDouble;
|
408 |
#endif
|
409 |
|
410 |
typedef struct SegmentCache { |
411 |
uint32_t selector; |
412 |
target_ulong base; |
413 |
uint32_t limit; |
414 |
uint32_t flags; |
415 |
} SegmentCache; |
416 |
|
417 |
typedef union { |
418 |
uint8_t _b[16];
|
419 |
uint16_t _w[8];
|
420 |
uint32_t _l[4];
|
421 |
uint64_t _q[2];
|
422 |
float32 _s[4];
|
423 |
float64 _d[2];
|
424 |
} XMMReg; |
425 |
|
426 |
typedef union { |
427 |
uint8_t _b[8];
|
428 |
uint16_t _w[2];
|
429 |
uint32_t _l[1];
|
430 |
uint64_t q; |
431 |
} MMXReg; |
432 |
|
433 |
#ifdef WORDS_BIGENDIAN
|
434 |
#define XMM_B(n) _b[15 - (n)] |
435 |
#define XMM_W(n) _w[7 - (n)] |
436 |
#define XMM_L(n) _l[3 - (n)] |
437 |
#define XMM_S(n) _s[3 - (n)] |
438 |
#define XMM_Q(n) _q[1 - (n)] |
439 |
#define XMM_D(n) _d[1 - (n)] |
440 |
|
441 |
#define MMX_B(n) _b[7 - (n)] |
442 |
#define MMX_W(n) _w[3 - (n)] |
443 |
#define MMX_L(n) _l[1 - (n)] |
444 |
#else
|
445 |
#define XMM_B(n) _b[n]
|
446 |
#define XMM_W(n) _w[n]
|
447 |
#define XMM_L(n) _l[n]
|
448 |
#define XMM_S(n) _s[n]
|
449 |
#define XMM_Q(n) _q[n]
|
450 |
#define XMM_D(n) _d[n]
|
451 |
|
452 |
#define MMX_B(n) _b[n]
|
453 |
#define MMX_W(n) _w[n]
|
454 |
#define MMX_L(n) _l[n]
|
455 |
#endif
|
456 |
#define MMX_Q(n) q
|
457 |
|
458 |
#ifdef TARGET_X86_64
|
459 |
#define CPU_NB_REGS 16 |
460 |
#else
|
461 |
#define CPU_NB_REGS 8 |
462 |
#endif
|
463 |
|
464 |
#define NB_MMU_MODES 2 |
465 |
|
466 |
typedef struct CPUX86State { |
467 |
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
468 |
/* temporaries if we cannot store them in host registers */
|
469 |
target_ulong t0, t1, t2; |
470 |
#endif
|
471 |
|
472 |
/* standard registers */
|
473 |
target_ulong regs[CPU_NB_REGS]; |
474 |
target_ulong eip; |
475 |
target_ulong eflags; /* eflags register. During CPU emulation, CC
|
476 |
flags and DF are set to zero because they are
|
477 |
stored elsewhere */
|
478 |
|
479 |
/* emulator internal eflags handling */
|
480 |
target_ulong cc_src; |
481 |
target_ulong cc_dst; |
482 |
uint32_t cc_op; |
483 |
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
|
484 |
uint32_t hflags; /* hidden flags, see HF_xxx constants */
|
485 |
|
486 |
/* segments */
|
487 |
SegmentCache segs[6]; /* selector values */ |
488 |
SegmentCache ldt; |
489 |
SegmentCache tr; |
490 |
SegmentCache gdt; /* only base and limit are used */
|
491 |
SegmentCache idt; /* only base and limit are used */
|
492 |
|
493 |
target_ulong cr[5]; /* NOTE: cr1 is unused */ |
494 |
uint32_t a20_mask; |
495 |
|
496 |
/* FPU state */
|
497 |
unsigned int fpstt; /* top of stack index */ |
498 |
unsigned int fpus; |
499 |
unsigned int fpuc; |
500 |
uint8_t fptags[8]; /* 0 = valid, 1 = empty */ |
501 |
union {
|
502 |
#ifdef USE_X86LDOUBLE
|
503 |
CPU86_LDouble d __attribute__((aligned(16)));
|
504 |
#else
|
505 |
CPU86_LDouble d; |
506 |
#endif
|
507 |
MMXReg mmx; |
508 |
} fpregs[8];
|
509 |
|
510 |
/* emulator internal variables */
|
511 |
float_status fp_status; |
512 |
CPU86_LDouble ft0; |
513 |
union {
|
514 |
float f;
|
515 |
double d;
|
516 |
int i32;
|
517 |
int64_t i64; |
518 |
} fp_convert; |
519 |
|
520 |
float_status sse_status; |
521 |
uint32_t mxcsr; |
522 |
XMMReg xmm_regs[CPU_NB_REGS]; |
523 |
XMMReg xmm_t0; |
524 |
MMXReg mmx_t0; |
525 |
|
526 |
/* sysenter registers */
|
527 |
uint32_t sysenter_cs; |
528 |
uint32_t sysenter_esp; |
529 |
uint32_t sysenter_eip; |
530 |
uint64_t efer; |
531 |
uint64_t star; |
532 |
|
533 |
target_phys_addr_t vm_hsave; |
534 |
target_phys_addr_t vm_vmcb; |
535 |
uint64_t intercept; |
536 |
uint16_t intercept_cr_read; |
537 |
uint16_t intercept_cr_write; |
538 |
uint16_t intercept_dr_read; |
539 |
uint16_t intercept_dr_write; |
540 |
uint32_t intercept_exceptions; |
541 |
|
542 |
#ifdef TARGET_X86_64
|
543 |
target_ulong lstar; |
544 |
target_ulong cstar; |
545 |
target_ulong fmask; |
546 |
target_ulong kernelgsbase; |
547 |
#endif
|
548 |
|
549 |
uint64_t pat; |
550 |
|
551 |
/* exception/interrupt handling */
|
552 |
jmp_buf jmp_env; |
553 |
int exception_index;
|
554 |
int error_code;
|
555 |
int exception_is_int;
|
556 |
target_ulong exception_next_eip; |
557 |
target_ulong dr[8]; /* debug registers */ |
558 |
uint32_t smbase; |
559 |
int interrupt_request;
|
560 |
int user_mode_only; /* user mode only simulation */ |
561 |
int old_exception; /* exception in flight */ |
562 |
|
563 |
CPU_COMMON |
564 |
|
565 |
/* processor features (e.g. for CPUID insn) */
|
566 |
uint32_t cpuid_level; |
567 |
uint32_t cpuid_vendor1; |
568 |
uint32_t cpuid_vendor2; |
569 |
uint32_t cpuid_vendor3; |
570 |
uint32_t cpuid_version; |
571 |
uint32_t cpuid_features; |
572 |
uint32_t cpuid_ext_features; |
573 |
uint32_t cpuid_xlevel; |
574 |
uint32_t cpuid_model[12];
|
575 |
uint32_t cpuid_ext2_features; |
576 |
uint32_t cpuid_ext3_features; |
577 |
uint32_t cpuid_apic_id; |
578 |
|
579 |
#ifdef USE_KQEMU
|
580 |
int kqemu_enabled;
|
581 |
int last_io_time;
|
582 |
#endif
|
583 |
/* in order to simplify APIC support, we leave this pointer to the
|
584 |
user */
|
585 |
struct APICState *apic_state;
|
586 |
} CPUX86State; |
587 |
|
588 |
CPUX86State *cpu_x86_init(const char *cpu_model); |
589 |
int cpu_x86_exec(CPUX86State *s);
|
590 |
void cpu_x86_close(CPUX86State *s);
|
591 |
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, |
592 |
...)); |
593 |
int cpu_get_pic_interrupt(CPUX86State *s);
|
594 |
/* MSDOS compatibility mode FPU exception support */
|
595 |
void cpu_set_ferr(CPUX86State *s);
|
596 |
|
597 |
/* this function must always be used to load data in the segment
|
598 |
cache: it synchronizes the hflags with the segment cache values */
|
599 |
static inline void cpu_x86_load_seg_cache(CPUX86State *env, |
600 |
int seg_reg, unsigned int selector, |
601 |
target_ulong base, |
602 |
unsigned int limit, |
603 |
unsigned int flags) |
604 |
{ |
605 |
SegmentCache *sc; |
606 |
unsigned int new_hflags; |
607 |
|
608 |
sc = &env->segs[seg_reg]; |
609 |
sc->selector = selector; |
610 |
sc->base = base; |
611 |
sc->limit = limit; |
612 |
sc->flags = flags; |
613 |
|
614 |
/* update the hidden flags */
|
615 |
{ |
616 |
if (seg_reg == R_CS) {
|
617 |
#ifdef TARGET_X86_64
|
618 |
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
|
619 |
/* long mode */
|
620 |
env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
621 |
env->hflags &= ~(HF_ADDSEG_MASK); |
622 |
} else
|
623 |
#endif
|
624 |
{ |
625 |
/* legacy / compatibility case */
|
626 |
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) |
627 |
>> (DESC_B_SHIFT - HF_CS32_SHIFT); |
628 |
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | |
629 |
new_hflags; |
630 |
} |
631 |
} |
632 |
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) |
633 |
>> (DESC_B_SHIFT - HF_SS32_SHIFT); |
634 |
if (env->hflags & HF_CS64_MASK) {
|
635 |
/* zero base assumed for DS, ES and SS in long mode */
|
636 |
} else if (!(env->cr[0] & CR0_PE_MASK) || |
637 |
(env->eflags & VM_MASK) || |
638 |
!(env->hflags & HF_CS32_MASK)) { |
639 |
/* XXX: try to avoid this test. The problem comes from the
|
640 |
fact that is real mode or vm86 mode we only modify the
|
641 |
'base' and 'selector' fields of the segment cache to go
|
642 |
faster. A solution may be to force addseg to one in
|
643 |
translate-i386.c. */
|
644 |
new_hflags |= HF_ADDSEG_MASK; |
645 |
} else {
|
646 |
new_hflags |= ((env->segs[R_DS].base | |
647 |
env->segs[R_ES].base | |
648 |
env->segs[R_SS].base) != 0) <<
|
649 |
HF_ADDSEG_SHIFT; |
650 |
} |
651 |
env->hflags = (env->hflags & |
652 |
~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; |
653 |
} |
654 |
} |
655 |
|
656 |
/* wrapper, just in case memory mappings must be changed */
|
657 |
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) |
658 |
{ |
659 |
#if HF_CPL_MASK == 3 |
660 |
s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; |
661 |
#else
|
662 |
#error HF_CPL_MASK is hardcoded
|
663 |
#endif
|
664 |
} |
665 |
|
666 |
/* used for debug or cpu save/restore */
|
667 |
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
|
668 |
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper); |
669 |
|
670 |
/* the following helpers are only usable in user mode simulation as
|
671 |
they can trigger unexpected exceptions */
|
672 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); |
673 |
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); |
674 |
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); |
675 |
|
676 |
/* you can call this signal handler from your SIGBUS and SIGSEGV
|
677 |
signal handlers to inform the virtual CPU of exceptions. non zero
|
678 |
is returned if the signal was handled by the virtual CPU. */
|
679 |
int cpu_x86_signal_handler(int host_signum, void *pinfo, |
680 |
void *puc);
|
681 |
void cpu_x86_set_a20(CPUX86State *env, int a20_state); |
682 |
|
683 |
uint64_t cpu_get_tsc(CPUX86State *env); |
684 |
|
685 |
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
|
686 |
uint64_t cpu_get_apic_base(CPUX86State *env); |
687 |
void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
|
688 |
#ifndef NO_CPU_IO_DEFS
|
689 |
uint8_t cpu_get_apic_tpr(CPUX86State *env); |
690 |
#endif
|
691 |
void cpu_smm_update(CPUX86State *env);
|
692 |
|
693 |
/* will be suppressed */
|
694 |
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
|
695 |
|
696 |
/* used to debug */
|
697 |
#define X86_DUMP_FPU 0x0001 /* dump FPU state too */ |
698 |
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ |
699 |
|
700 |
#ifdef USE_KQEMU
|
701 |
static inline int cpu_get_time_fast(void) |
702 |
{ |
703 |
int low, high;
|
704 |
asm volatile("rdtsc" : "=a" (low), "=d" (high)); |
705 |
return low;
|
706 |
} |
707 |
#endif
|
708 |
|
709 |
#define TARGET_PAGE_BITS 12 |
710 |
|
711 |
#define CPUState CPUX86State
|
712 |
#define cpu_init cpu_x86_init
|
713 |
#define cpu_exec cpu_x86_exec
|
714 |
#define cpu_gen_code cpu_x86_gen_code
|
715 |
#define cpu_signal_handler cpu_x86_signal_handler
|
716 |
#define cpu_list x86_cpu_list
|
717 |
|
718 |
/* MMU modes definitions */
|
719 |
#define MMU_MODE0_SUFFIX _kernel
|
720 |
#define MMU_MODE1_SUFFIX _user
|
721 |
#define MMU_USER_IDX 1 |
722 |
static inline int cpu_mmu_index (CPUState *env) |
723 |
{ |
724 |
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0; |
725 |
} |
726 |
|
727 |
#include "cpu-all.h" |
728 |
|
729 |
#include "svm.h" |
730 |
|
731 |
#endif /* CPU_I386_H */ |