Statistics
| Branch: | Revision:

root / target-i386 / cpu.h @ 79c4f6b0

History | View | Annotate | Download (26.5 kB)

1
/*
2
 * i386 virtual CPU header
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#ifndef CPU_I386_H
21
#define CPU_I386_H
22

    
23
#include "config.h"
24

    
25
#ifdef TARGET_X86_64
26
#define TARGET_LONG_BITS 64
27
#else
28
#define TARGET_LONG_BITS 32
29
#endif
30

    
31
/* target supports implicit self modifying code */
32
#define TARGET_HAS_SMC
33
/* support for self modifying code even if the modified instruction is
34
   close to the modifying instruction */
35
#define TARGET_HAS_PRECISE_SMC
36

    
37
#define TARGET_HAS_ICE 1
38

    
39
#ifdef TARGET_X86_64
40
#define ELF_MACHINE        EM_X86_64
41
#else
42
#define ELF_MACHINE        EM_386
43
#endif
44

    
45
#define CPUState struct CPUX86State
46

    
47
#include "cpu-defs.h"
48

    
49
#include "softfloat.h"
50

    
51
#define R_EAX 0
52
#define R_ECX 1
53
#define R_EDX 2
54
#define R_EBX 3
55
#define R_ESP 4
56
#define R_EBP 5
57
#define R_ESI 6
58
#define R_EDI 7
59

    
60
#define R_AL 0
61
#define R_CL 1
62
#define R_DL 2
63
#define R_BL 3
64
#define R_AH 4
65
#define R_CH 5
66
#define R_DH 6
67
#define R_BH 7
68

    
69
#define R_ES 0
70
#define R_CS 1
71
#define R_SS 2
72
#define R_DS 3
73
#define R_FS 4
74
#define R_GS 5
75

    
76
/* segment descriptor fields */
77
#define DESC_G_MASK     (1 << 23)
78
#define DESC_B_SHIFT    22
79
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
80
#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
81
#define DESC_L_MASK     (1 << DESC_L_SHIFT)
82
#define DESC_AVL_MASK   (1 << 20)
83
#define DESC_P_MASK     (1 << 15)
84
#define DESC_DPL_SHIFT  13
85
#define DESC_DPL_MASK   (3 << DESC_DPL_SHIFT)
86
#define DESC_S_MASK     (1 << 12)
87
#define DESC_TYPE_SHIFT 8
88
#define DESC_TYPE_MASK  (15 << DESC_TYPE_SHIFT)
89
#define DESC_A_MASK     (1 << 8)
90

    
91
#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
92
#define DESC_C_MASK     (1 << 10) /* code: conforming */
93
#define DESC_R_MASK     (1 << 9)  /* code: readable */
94

    
95
#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
96
#define DESC_W_MASK     (1 << 9)  /* data: writable */
97

    
98
#define DESC_TSS_BUSY_MASK (1 << 9)
99

    
100
/* eflags masks */
101
#define CC_C           0x0001
102
#define CC_P         0x0004
103
#define CC_A        0x0010
104
#define CC_Z        0x0040
105
#define CC_S    0x0080
106
#define CC_O    0x0800
107

    
108
#define TF_SHIFT   8
109
#define IOPL_SHIFT 12
110
#define VM_SHIFT   17
111

    
112
#define TF_MASK                 0x00000100
113
#define IF_MASK                 0x00000200
114
#define DF_MASK                 0x00000400
115
#define IOPL_MASK                0x00003000
116
#define NT_MASK                         0x00004000
117
#define RF_MASK                        0x00010000
118
#define VM_MASK                        0x00020000
119
#define AC_MASK                        0x00040000
120
#define VIF_MASK                0x00080000
121
#define VIP_MASK                0x00100000
122
#define ID_MASK                 0x00200000
123

    
124
/* hidden flags - used internally by qemu to represent additional cpu
125
   states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
126
   redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
127
   position to ease oring with eflags. */
128
/* current cpl */
129
#define HF_CPL_SHIFT         0
130
/* true if soft mmu is being used */
131
#define HF_SOFTMMU_SHIFT     2
132
/* true if hardware interrupts must be disabled for next instruction */
133
#define HF_INHIBIT_IRQ_SHIFT 3
134
/* 16 or 32 segments */
135
#define HF_CS32_SHIFT        4
136
#define HF_SS32_SHIFT        5
137
/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
138
#define HF_ADDSEG_SHIFT      6
139
/* copy of CR0.PE (protected mode) */
140
#define HF_PE_SHIFT          7
141
#define HF_TF_SHIFT          8 /* must be same as eflags */
142
#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
143
#define HF_EM_SHIFT         10
144
#define HF_TS_SHIFT         11
145
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
146
#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
147
#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
148
#define HF_RF_SHIFT         16 /* must be same as eflags */
149
#define HF_VM_SHIFT         17 /* must be same as eflags */
150
#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
151
#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
152
#define HF_SVMI_SHIFT       21 /* SVM intercepts are active */
153
#define HF_OSFXSR_SHIFT     22 /* CR4.OSFXSR */
154

    
155
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
156
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
157
#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
158
#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
159
#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
160
#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
161
#define HF_PE_MASK           (1 << HF_PE_SHIFT)
162
#define HF_TF_MASK           (1 << HF_TF_SHIFT)
163
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
164
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
165
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
166
#define HF_IOPL_MASK         (3 << HF_IOPL_SHIFT)
167
#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
168
#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
169
#define HF_RF_MASK           (1 << HF_RF_SHIFT)
170
#define HF_VM_MASK           (1 << HF_VM_SHIFT)
171
#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
172
#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
173
#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
174
#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
175

    
176
/* hflags2 */
177

    
178
#define HF2_GIF_SHIFT        0 /* if set CPU takes interrupts */
179
#define HF2_HIF_SHIFT        1 /* value of IF_MASK when entering SVM */
180
#define HF2_NMI_SHIFT        2 /* CPU serving NMI */
181
#define HF2_VINTR_SHIFT      3 /* value of V_INTR_MASKING bit */
182

    
183
#define HF2_GIF_MASK          (1 << HF2_GIF_SHIFT)
184
#define HF2_HIF_MASK          (1 << HF2_HIF_SHIFT) 
185
#define HF2_NMI_MASK          (1 << HF2_NMI_SHIFT)
186
#define HF2_VINTR_MASK        (1 << HF2_VINTR_SHIFT)
187

    
188
#define CR0_PE_SHIFT 0
189
#define CR0_MP_SHIFT 1
190

    
191
#define CR0_PE_MASK  (1 << 0)
192
#define CR0_MP_MASK  (1 << 1)
193
#define CR0_EM_MASK  (1 << 2)
194
#define CR0_TS_MASK  (1 << 3)
195
#define CR0_ET_MASK  (1 << 4)
196
#define CR0_NE_MASK  (1 << 5)
197
#define CR0_WP_MASK  (1 << 16)
198
#define CR0_AM_MASK  (1 << 18)
199
#define CR0_PG_MASK  (1 << 31)
200

    
201
#define CR4_VME_MASK  (1 << 0)
202
#define CR4_PVI_MASK  (1 << 1)
203
#define CR4_TSD_MASK  (1 << 2)
204
#define CR4_DE_MASK   (1 << 3)
205
#define CR4_PSE_MASK  (1 << 4)
206
#define CR4_PAE_MASK  (1 << 5)
207
#define CR4_MCE_MASK  (1 << 6)
208
#define CR4_PGE_MASK  (1 << 7)
209
#define CR4_PCE_MASK  (1 << 8)
210
#define CR4_OSFXSR_SHIFT 9
211
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
212
#define CR4_OSXMMEXCPT_MASK  (1 << 10)
213

    
214
#define DR6_BD          (1 << 13)
215
#define DR6_BS          (1 << 14)
216
#define DR6_BT          (1 << 15)
217
#define DR6_FIXED_1     0xffff0ff0
218

    
219
#define DR7_GD          (1 << 13)
220
#define DR7_TYPE_SHIFT  16
221
#define DR7_LEN_SHIFT   18
222
#define DR7_FIXED_1     0x00000400
223

    
224
#define PG_PRESENT_BIT        0
225
#define PG_RW_BIT        1
226
#define PG_USER_BIT        2
227
#define PG_PWT_BIT        3
228
#define PG_PCD_BIT        4
229
#define PG_ACCESSED_BIT        5
230
#define PG_DIRTY_BIT        6
231
#define PG_PSE_BIT        7
232
#define PG_GLOBAL_BIT        8
233
#define PG_NX_BIT        63
234

    
235
#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
236
#define PG_RW_MASK         (1 << PG_RW_BIT)
237
#define PG_USER_MASK         (1 << PG_USER_BIT)
238
#define PG_PWT_MASK         (1 << PG_PWT_BIT)
239
#define PG_PCD_MASK         (1 << PG_PCD_BIT)
240
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
241
#define PG_DIRTY_MASK         (1 << PG_DIRTY_BIT)
242
#define PG_PSE_MASK         (1 << PG_PSE_BIT)
243
#define PG_GLOBAL_MASK         (1 << PG_GLOBAL_BIT)
244
#define PG_NX_MASK         (1LL << PG_NX_BIT)
245

    
246
#define PG_ERROR_W_BIT     1
247

    
248
#define PG_ERROR_P_MASK    0x01
249
#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
250
#define PG_ERROR_U_MASK    0x04
251
#define PG_ERROR_RSVD_MASK 0x08
252
#define PG_ERROR_I_D_MASK  0x10
253

    
254
#define MCG_CTL_P        (1UL<<8)   /* MCG_CAP register available */
255

    
256
#define MCE_CAP_DEF        MCG_CTL_P
257
#define MCE_BANKS_DEF        10
258

    
259
#define MCG_STATUS_MCIP        (1UL<<2)   /* machine check in progress */
260

    
261
#define MCI_STATUS_VAL        (1UL<<63)  /* valid error */
262
#define MCI_STATUS_OVER        (1UL<<62)  /* previous errors lost */
263
#define MCI_STATUS_UC        (1UL<<61)  /* uncorrected error */
264

    
265
#define MSR_IA32_TSC                    0x10
266
#define MSR_IA32_APICBASE               0x1b
267
#define MSR_IA32_APICBASE_BSP           (1<<8)
268
#define MSR_IA32_APICBASE_ENABLE        (1<<11)
269
#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
270

    
271
#define MSR_MTRRcap                        0xfe
272
#define MSR_MTRRcap_VCNT                8
273
#define MSR_MTRRcap_FIXRANGE_SUPPORT        (1 << 8)
274
#define MSR_MTRRcap_WC_SUPPORTED        (1 << 10)
275

    
276
#define MSR_IA32_SYSENTER_CS            0x174
277
#define MSR_IA32_SYSENTER_ESP           0x175
278
#define MSR_IA32_SYSENTER_EIP           0x176
279

    
280
#define MSR_MCG_CAP                     0x179
281
#define MSR_MCG_STATUS                  0x17a
282
#define MSR_MCG_CTL                     0x17b
283

    
284
#define MSR_IA32_PERF_STATUS            0x198
285

    
286
#define MSR_MTRRphysBase(reg)                (0x200 + 2 * (reg))
287
#define MSR_MTRRphysMask(reg)                (0x200 + 2 * (reg) + 1)
288

    
289
#define MSR_MTRRfix64K_00000                0x250
290
#define MSR_MTRRfix16K_80000                0x258
291
#define MSR_MTRRfix16K_A0000                0x259
292
#define MSR_MTRRfix4K_C0000                0x268
293
#define MSR_MTRRfix4K_C8000                0x269
294
#define MSR_MTRRfix4K_D0000                0x26a
295
#define MSR_MTRRfix4K_D8000                0x26b
296
#define MSR_MTRRfix4K_E0000                0x26c
297
#define MSR_MTRRfix4K_E8000                0x26d
298
#define MSR_MTRRfix4K_F0000                0x26e
299
#define MSR_MTRRfix4K_F8000                0x26f
300

    
301
#define MSR_PAT                         0x277
302

    
303
#define MSR_MTRRdefType                        0x2ff
304

    
305
#define MSR_MC0_CTL                        0x400
306
#define MSR_MC0_STATUS                        0x401
307
#define MSR_MC0_ADDR                        0x402
308
#define MSR_MC0_MISC                        0x403
309

    
310
#define MSR_EFER                        0xc0000080
311

    
312
#define MSR_EFER_SCE   (1 << 0)
313
#define MSR_EFER_LME   (1 << 8)
314
#define MSR_EFER_LMA   (1 << 10)
315
#define MSR_EFER_NXE   (1 << 11)
316
#define MSR_EFER_SVME  (1 << 12)
317
#define MSR_EFER_FFXSR (1 << 14)
318

    
319
#define MSR_STAR                        0xc0000081
320
#define MSR_LSTAR                       0xc0000082
321
#define MSR_CSTAR                       0xc0000083
322
#define MSR_FMASK                       0xc0000084
323
#define MSR_FSBASE                      0xc0000100
324
#define MSR_GSBASE                      0xc0000101
325
#define MSR_KERNELGSBASE                0xc0000102
326

    
327
#define MSR_VM_HSAVE_PA                 0xc0010117
328

    
329
/* cpuid_features bits */
330
#define CPUID_FP87 (1 << 0)
331
#define CPUID_VME  (1 << 1)
332
#define CPUID_DE   (1 << 2)
333
#define CPUID_PSE  (1 << 3)
334
#define CPUID_TSC  (1 << 4)
335
#define CPUID_MSR  (1 << 5)
336
#define CPUID_PAE  (1 << 6)
337
#define CPUID_MCE  (1 << 7)
338
#define CPUID_CX8  (1 << 8)
339
#define CPUID_APIC (1 << 9)
340
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
341
#define CPUID_MTRR (1 << 12)
342
#define CPUID_PGE  (1 << 13)
343
#define CPUID_MCA  (1 << 14)
344
#define CPUID_CMOV (1 << 15)
345
#define CPUID_PAT  (1 << 16)
346
#define CPUID_PSE36   (1 << 17)
347
#define CPUID_PN   (1 << 18)
348
#define CPUID_CLFLUSH (1 << 19)
349
#define CPUID_DTS (1 << 21)
350
#define CPUID_ACPI (1 << 22)
351
#define CPUID_MMX  (1 << 23)
352
#define CPUID_FXSR (1 << 24)
353
#define CPUID_SSE  (1 << 25)
354
#define CPUID_SSE2 (1 << 26)
355
#define CPUID_SS (1 << 27)
356
#define CPUID_HT (1 << 28)
357
#define CPUID_TM (1 << 29)
358
#define CPUID_IA64 (1 << 30)
359
#define CPUID_PBE (1 << 31)
360

    
361
#define CPUID_EXT_SSE3     (1 << 0)
362
#define CPUID_EXT_DTES64   (1 << 2)
363
#define CPUID_EXT_MONITOR  (1 << 3)
364
#define CPUID_EXT_DSCPL    (1 << 4)
365
#define CPUID_EXT_VMX      (1 << 5)
366
#define CPUID_EXT_SMX      (1 << 6)
367
#define CPUID_EXT_EST      (1 << 7)
368
#define CPUID_EXT_TM2      (1 << 8)
369
#define CPUID_EXT_SSSE3    (1 << 9)
370
#define CPUID_EXT_CID      (1 << 10)
371
#define CPUID_EXT_CX16     (1 << 13)
372
#define CPUID_EXT_XTPR     (1 << 14)
373
#define CPUID_EXT_PDCM     (1 << 15)
374
#define CPUID_EXT_DCA      (1 << 18)
375
#define CPUID_EXT_SSE41    (1 << 19)
376
#define CPUID_EXT_SSE42    (1 << 20)
377
#define CPUID_EXT_X2APIC   (1 << 21)
378
#define CPUID_EXT_MOVBE    (1 << 22)
379
#define CPUID_EXT_POPCNT   (1 << 23)
380
#define CPUID_EXT_XSAVE    (1 << 26)
381
#define CPUID_EXT_OSXSAVE  (1 << 27)
382
#define CPUID_EXT_HYPERVISOR  (1 << 31)
383

    
384
#define CPUID_EXT2_SYSCALL (1 << 11)
385
#define CPUID_EXT2_MP      (1 << 19)
386
#define CPUID_EXT2_NX      (1 << 20)
387
#define CPUID_EXT2_MMXEXT  (1 << 22)
388
#define CPUID_EXT2_FFXSR   (1 << 25)
389
#define CPUID_EXT2_PDPE1GB (1 << 26)
390
#define CPUID_EXT2_RDTSCP  (1 << 27)
391
#define CPUID_EXT2_LM      (1 << 29)
392
#define CPUID_EXT2_3DNOWEXT (1 << 30)
393
#define CPUID_EXT2_3DNOW   (1 << 31)
394

    
395
#define CPUID_EXT3_LAHF_LM (1 << 0)
396
#define CPUID_EXT3_CMP_LEG (1 << 1)
397
#define CPUID_EXT3_SVM     (1 << 2)
398
#define CPUID_EXT3_EXTAPIC (1 << 3)
399
#define CPUID_EXT3_CR8LEG  (1 << 4)
400
#define CPUID_EXT3_ABM     (1 << 5)
401
#define CPUID_EXT3_SSE4A   (1 << 6)
402
#define CPUID_EXT3_MISALIGNSSE (1 << 7)
403
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
404
#define CPUID_EXT3_OSVW    (1 << 9)
405
#define CPUID_EXT3_IBS     (1 << 10)
406
#define CPUID_EXT3_SKINIT  (1 << 12)
407

    
408
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
409
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
410
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
411

    
412
#define CPUID_VENDOR_AMD_1   0x68747541 /* "Auth" */
413
#define CPUID_VENDOR_AMD_2   0x69746e65 /* "enti" */ 
414
#define CPUID_VENDOR_AMD_3   0x444d4163 /* "cAMD" */
415

    
416
#define CPUID_MWAIT_IBE     (1 << 1) /* Interrupts can exit capability */
417
#define CPUID_MWAIT_EMX     (1 << 0) /* enumeration supported */
418

    
419
#define EXCP00_DIVZ        0
420
#define EXCP01_DB        1
421
#define EXCP02_NMI        2
422
#define EXCP03_INT3        3
423
#define EXCP04_INTO        4
424
#define EXCP05_BOUND        5
425
#define EXCP06_ILLOP        6
426
#define EXCP07_PREX        7
427
#define EXCP08_DBLE        8
428
#define EXCP09_XERR        9
429
#define EXCP0A_TSS        10
430
#define EXCP0B_NOSEG        11
431
#define EXCP0C_STACK        12
432
#define EXCP0D_GPF        13
433
#define EXCP0E_PAGE        14
434
#define EXCP10_COPR        16
435
#define EXCP11_ALGN        17
436
#define EXCP12_MCHK        18
437

    
438
#define EXCP_SYSCALL    0x100 /* only happens in user only emulation
439
                                 for syscall instruction */
440

    
441
enum {
442
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
443
    CC_OP_EFLAGS,  /* all cc are explicitly computed, CC_SRC = flags */
444

    
445
    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
446
    CC_OP_MULW,
447
    CC_OP_MULL,
448
    CC_OP_MULQ,
449

    
450
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
451
    CC_OP_ADDW,
452
    CC_OP_ADDL,
453
    CC_OP_ADDQ,
454

    
455
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
456
    CC_OP_ADCW,
457
    CC_OP_ADCL,
458
    CC_OP_ADCQ,
459

    
460
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
461
    CC_OP_SUBW,
462
    CC_OP_SUBL,
463
    CC_OP_SUBQ,
464

    
465
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
466
    CC_OP_SBBW,
467
    CC_OP_SBBL,
468
    CC_OP_SBBQ,
469

    
470
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
471
    CC_OP_LOGICW,
472
    CC_OP_LOGICL,
473
    CC_OP_LOGICQ,
474

    
475
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
476
    CC_OP_INCW,
477
    CC_OP_INCL,
478
    CC_OP_INCQ,
479

    
480
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
481
    CC_OP_DECW,
482
    CC_OP_DECL,
483
    CC_OP_DECQ,
484

    
485
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
486
    CC_OP_SHLW,
487
    CC_OP_SHLL,
488
    CC_OP_SHLQ,
489

    
490
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
491
    CC_OP_SARW,
492
    CC_OP_SARL,
493
    CC_OP_SARQ,
494

    
495
    CC_OP_NB,
496
};
497

    
498
#ifdef FLOATX80
499
#define USE_X86LDOUBLE
500
#endif
501

    
502
#ifdef USE_X86LDOUBLE
503
typedef floatx80 CPU86_LDouble;
504
#else
505
typedef float64 CPU86_LDouble;
506
#endif
507

    
508
typedef struct SegmentCache {
509
    uint32_t selector;
510
    target_ulong base;
511
    uint32_t limit;
512
    uint32_t flags;
513
} SegmentCache;
514

    
515
typedef union {
516
    uint8_t _b[16];
517
    uint16_t _w[8];
518
    uint32_t _l[4];
519
    uint64_t _q[2];
520
    float32 _s[4];
521
    float64 _d[2];
522
} XMMReg;
523

    
524
typedef union {
525
    uint8_t _b[8];
526
    uint16_t _w[4];
527
    uint32_t _l[2];
528
    float32 _s[2];
529
    uint64_t q;
530
} MMXReg;
531

    
532
#ifdef WORDS_BIGENDIAN
533
#define XMM_B(n) _b[15 - (n)]
534
#define XMM_W(n) _w[7 - (n)]
535
#define XMM_L(n) _l[3 - (n)]
536
#define XMM_S(n) _s[3 - (n)]
537
#define XMM_Q(n) _q[1 - (n)]
538
#define XMM_D(n) _d[1 - (n)]
539

    
540
#define MMX_B(n) _b[7 - (n)]
541
#define MMX_W(n) _w[3 - (n)]
542
#define MMX_L(n) _l[1 - (n)]
543
#define MMX_S(n) _s[1 - (n)]
544
#else
545
#define XMM_B(n) _b[n]
546
#define XMM_W(n) _w[n]
547
#define XMM_L(n) _l[n]
548
#define XMM_S(n) _s[n]
549
#define XMM_Q(n) _q[n]
550
#define XMM_D(n) _d[n]
551

    
552
#define MMX_B(n) _b[n]
553
#define MMX_W(n) _w[n]
554
#define MMX_L(n) _l[n]
555
#define MMX_S(n) _s[n]
556
#endif
557
#define MMX_Q(n) q
558

    
559
#ifdef TARGET_X86_64
560
#define CPU_NB_REGS 16
561
#else
562
#define CPU_NB_REGS 8
563
#endif
564

    
565
#define NB_MMU_MODES 2
566

    
567
typedef struct CPUX86State {
568
    /* standard registers */
569
    target_ulong regs[CPU_NB_REGS];
570
    target_ulong eip;
571
    target_ulong eflags; /* eflags register. During CPU emulation, CC
572
                        flags and DF are set to zero because they are
573
                        stored elsewhere */
574

    
575
    /* emulator internal eflags handling */
576
    target_ulong cc_src;
577
    target_ulong cc_dst;
578
    uint32_t cc_op;
579
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
580
    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
581
                        are known at translation time. */
582
    uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
583

    
584
    /* segments */
585
    SegmentCache segs[6]; /* selector values */
586
    SegmentCache ldt;
587
    SegmentCache tr;
588
    SegmentCache gdt; /* only base and limit are used */
589
    SegmentCache idt; /* only base and limit are used */
590

    
591
    target_ulong cr[5]; /* NOTE: cr1 is unused */
592
    uint64_t a20_mask;
593

    
594
    /* FPU state */
595
    unsigned int fpstt; /* top of stack index */
596
    unsigned int fpus;
597
    unsigned int fpuc;
598
    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
599
    union {
600
#ifdef USE_X86LDOUBLE
601
        CPU86_LDouble d __attribute__((aligned(16)));
602
#else
603
        CPU86_LDouble d;
604
#endif
605
        MMXReg mmx;
606
    } fpregs[8];
607

    
608
    /* emulator internal variables */
609
    float_status fp_status;
610
    CPU86_LDouble ft0;
611

    
612
    float_status mmx_status; /* for 3DNow! float ops */
613
    float_status sse_status;
614
    uint32_t mxcsr;
615
    XMMReg xmm_regs[CPU_NB_REGS];
616
    XMMReg xmm_t0;
617
    MMXReg mmx_t0;
618
    target_ulong cc_tmp; /* temporary for rcr/rcl */
619

    
620
    /* sysenter registers */
621
    uint32_t sysenter_cs;
622
    target_ulong sysenter_esp;
623
    target_ulong sysenter_eip;
624
    uint64_t efer;
625
    uint64_t star;
626

    
627
    uint64_t vm_hsave;
628
    uint64_t vm_vmcb;
629
    uint64_t tsc_offset;
630
    uint64_t intercept;
631
    uint16_t intercept_cr_read;
632
    uint16_t intercept_cr_write;
633
    uint16_t intercept_dr_read;
634
    uint16_t intercept_dr_write;
635
    uint32_t intercept_exceptions;
636
    uint8_t v_tpr;
637

    
638
#ifdef TARGET_X86_64
639
    target_ulong lstar;
640
    target_ulong cstar;
641
    target_ulong fmask;
642
    target_ulong kernelgsbase;
643
#endif
644

    
645
    uint64_t tsc;
646

    
647
    uint64_t pat;
648

    
649
    /* exception/interrupt handling */
650
    int error_code;
651
    int exception_is_int;
652
    target_ulong exception_next_eip;
653
    target_ulong dr[8]; /* debug registers */
654
    union {
655
        CPUBreakpoint *cpu_breakpoint[4];
656
        CPUWatchpoint *cpu_watchpoint[4];
657
    }; /* break/watchpoints for dr[0..3] */
658
    uint32_t smbase;
659
    int old_exception;  /* exception in flight */
660

    
661
    CPU_COMMON
662

    
663
    /* processor features (e.g. for CPUID insn) */
664
    uint32_t cpuid_level;
665
    uint32_t cpuid_vendor1;
666
    uint32_t cpuid_vendor2;
667
    uint32_t cpuid_vendor3;
668
    uint32_t cpuid_version;
669
    uint32_t cpuid_features;
670
    uint32_t cpuid_ext_features;
671
    uint32_t cpuid_xlevel;
672
    uint32_t cpuid_model[12];
673
    uint32_t cpuid_ext2_features;
674
    uint32_t cpuid_ext3_features;
675
    uint32_t cpuid_apic_id;
676
    int cpuid_vendor_override;
677

    
678
    /* MTRRs */
679
    uint64_t mtrr_fixed[11];
680
    uint64_t mtrr_deftype;
681
    struct {
682
        uint64_t base;
683
        uint64_t mask;
684
    } mtrr_var[8];
685

    
686
#ifdef CONFIG_KQEMU
687
    int kqemu_enabled;
688
    int last_io_time;
689
#endif
690

    
691
    /* For KVM */
692
    uint64_t interrupt_bitmap[256 / 64];
693
    uint32_t mp_state;
694

    
695
    /* in order to simplify APIC support, we leave this pointer to the
696
       user */
697
    struct APICState *apic_state;
698

    
699
    uint64 mcg_cap;
700
    uint64 mcg_status;
701
    uint64 mcg_ctl;
702
    uint64 *mce_banks;
703
} CPUX86State;
704

    
705
CPUX86State *cpu_x86_init(const char *cpu_model);
706
int cpu_x86_exec(CPUX86State *s);
707
void cpu_x86_close(CPUX86State *s);
708
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt,
709
                                                 ...));
710
int cpu_get_pic_interrupt(CPUX86State *s);
711
/* MSDOS compatibility mode FPU exception support */
712
void cpu_set_ferr(CPUX86State *s);
713

    
714
/* this function must always be used to load data in the segment
715
   cache: it synchronizes the hflags with the segment cache values */
716
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
717
                                          int seg_reg, unsigned int selector,
718
                                          target_ulong base,
719
                                          unsigned int limit,
720
                                          unsigned int flags)
721
{
722
    SegmentCache *sc;
723
    unsigned int new_hflags;
724

    
725
    sc = &env->segs[seg_reg];
726
    sc->selector = selector;
727
    sc->base = base;
728
    sc->limit = limit;
729
    sc->flags = flags;
730

    
731
    /* update the hidden flags */
732
    {
733
        if (seg_reg == R_CS) {
734
#ifdef TARGET_X86_64
735
            if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
736
                /* long mode */
737
                env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
738
                env->hflags &= ~(HF_ADDSEG_MASK);
739
            } else
740
#endif
741
            {
742
                /* legacy / compatibility case */
743
                new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
744
                    >> (DESC_B_SHIFT - HF_CS32_SHIFT);
745
                env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
746
                    new_hflags;
747
            }
748
        }
749
        new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
750
            >> (DESC_B_SHIFT - HF_SS32_SHIFT);
751
        if (env->hflags & HF_CS64_MASK) {
752
            /* zero base assumed for DS, ES and SS in long mode */
753
        } else if (!(env->cr[0] & CR0_PE_MASK) ||
754
                   (env->eflags & VM_MASK) ||
755
                   !(env->hflags & HF_CS32_MASK)) {
756
            /* XXX: try to avoid this test. The problem comes from the
757
               fact that is real mode or vm86 mode we only modify the
758
               'base' and 'selector' fields of the segment cache to go
759
               faster. A solution may be to force addseg to one in
760
               translate-i386.c. */
761
            new_hflags |= HF_ADDSEG_MASK;
762
        } else {
763
            new_hflags |= ((env->segs[R_DS].base |
764
                            env->segs[R_ES].base |
765
                            env->segs[R_SS].base) != 0) <<
766
                HF_ADDSEG_SHIFT;
767
        }
768
        env->hflags = (env->hflags &
769
                       ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
770
    }
771
}
772

    
773
/* wrapper, just in case memory mappings must be changed */
774
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
775
{
776
#if HF_CPL_MASK == 3
777
    s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
778
#else
779
#error HF_CPL_MASK is hardcoded
780
#endif
781
}
782

    
783
/* op_helper.c */
784
/* used for debug or cpu save/restore */
785
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
786
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
787

    
788
/* cpu-exec.c */
789
/* the following helpers are only usable in user mode simulation as
790
   they can trigger unexpected exceptions */
791
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
792
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
793
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
794

    
795
/* you can call this signal handler from your SIGBUS and SIGSEGV
796
   signal handlers to inform the virtual CPU of exceptions. non zero
797
   is returned if the signal was handled by the virtual CPU.  */
798
int cpu_x86_signal_handler(int host_signum, void *pinfo,
799
                           void *puc);
800

    
801
/* helper.c */
802
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
803
                             int is_write, int mmu_idx, int is_softmmu);
804
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
805
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
806
                   uint32_t *eax, uint32_t *ebx,
807
                   uint32_t *ecx, uint32_t *edx);
808

    
809
static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
810
{
811
    return (dr7 >> (index * 2)) & 3;
812
}
813

    
814
static inline int hw_breakpoint_type(unsigned long dr7, int index)
815
{
816
    return (dr7 >> (DR7_TYPE_SHIFT + (index * 2))) & 3;
817
}
818

    
819
static inline int hw_breakpoint_len(unsigned long dr7, int index)
820
{
821
    int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 2))) & 3);
822
    return (len == 2) ? 8 : len + 1;
823
}
824

    
825
void hw_breakpoint_insert(CPUX86State *env, int index);
826
void hw_breakpoint_remove(CPUX86State *env, int index);
827
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
828

    
829
/* will be suppressed */
830
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
831
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
832
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
833

    
834
/* hw/apic.c */
835
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
836
uint64_t cpu_get_apic_base(CPUX86State *env);
837
void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
838
#ifndef NO_CPU_IO_DEFS
839
uint8_t cpu_get_apic_tpr(CPUX86State *env);
840
#endif
841

    
842
/* hw/pc.c */
843
void cpu_smm_update(CPUX86State *env);
844
uint64_t cpu_get_tsc(CPUX86State *env);
845

    
846
/* used to debug */
847
#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
848
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
849

    
850
#ifdef CONFIG_KQEMU
851
static inline int cpu_get_time_fast(void)
852
{
853
    int low, high;
854
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
855
    return low;
856
}
857
#endif
858

    
859
#define TARGET_PAGE_BITS 12
860

    
861
#define cpu_init cpu_x86_init
862
#define cpu_exec cpu_x86_exec
863
#define cpu_gen_code cpu_x86_gen_code
864
#define cpu_signal_handler cpu_x86_signal_handler
865
#define cpu_list x86_cpu_list
866

    
867
#define CPU_SAVE_VERSION 10
868

    
869
/* MMU modes definitions */
870
#define MMU_MODE0_SUFFIX _kernel
871
#define MMU_MODE1_SUFFIX _user
872
#define MMU_USER_IDX 1
873
static inline int cpu_mmu_index (CPUState *env)
874
{
875
    return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
876
}
877

    
878
/* translate.c */
879
void optimize_flags_init(void);
880

    
881
typedef struct CCTable {
882
    int (*compute_all)(void); /* return all the flags */
883
    int (*compute_c)(void);  /* return the C flag */
884
} CCTable;
885

    
886
#if defined(CONFIG_USER_ONLY)
887
static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
888
{
889
    if (newsp)
890
        env->regs[R_ESP] = newsp;
891
    env->regs[R_EAX] = 0;
892
}
893
#endif
894

    
895
#include "cpu-all.h"
896
#include "exec-all.h"
897

    
898
#include "svm.h"
899

    
900
static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
901
{
902
    env->eip = tb->pc - tb->cs_base;
903
}
904

    
905
static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
906
                                        target_ulong *cs_base, int *flags)
907
{
908
    *cs_base = env->segs[R_CS].base;
909
    *pc = *cs_base + env->eip;
910
    *flags = env->hflags |
911
        (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
912
}
913

    
914
void apic_init_reset(CPUState *env);
915
void apic_sipi(CPUState *env);
916
void do_cpu_init(CPUState *env);
917
void do_cpu_sipi(CPUState *env);
918
#endif /* CPU_I386_H */