Statistics
| Branch: | Revision:

root / target-i386 / cpu.h @ a88790a1

History | View | Annotate | Download (27.6 kB)

1
/*
2
 * i386 virtual CPU header
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#ifndef CPU_I386_H
20
#define CPU_I386_H
21

    
22
#include "config.h"
23

    
24
#ifdef TARGET_X86_64
25
#define TARGET_LONG_BITS 64
26
#else
27
#define TARGET_LONG_BITS 32
28
#endif
29

    
30
/* target supports implicit self modifying code */
31
#define TARGET_HAS_SMC
32
/* support for self modifying code even if the modified instruction is
33
   close to the modifying instruction */
34
#define TARGET_HAS_PRECISE_SMC
35

    
36
#define TARGET_HAS_ICE 1
37

    
38
#ifdef TARGET_X86_64
39
#define ELF_MACHINE        EM_X86_64
40
#else
41
#define ELF_MACHINE        EM_386
42
#endif
43

    
44
#define CPUState struct CPUX86State
45

    
46
#include "cpu-defs.h"
47

    
48
#include "softfloat.h"
49

    
50
#define R_EAX 0
51
#define R_ECX 1
52
#define R_EDX 2
53
#define R_EBX 3
54
#define R_ESP 4
55
#define R_EBP 5
56
#define R_ESI 6
57
#define R_EDI 7
58

    
59
#define R_AL 0
60
#define R_CL 1
61
#define R_DL 2
62
#define R_BL 3
63
#define R_AH 4
64
#define R_CH 5
65
#define R_DH 6
66
#define R_BH 7
67

    
68
#define R_ES 0
69
#define R_CS 1
70
#define R_SS 2
71
#define R_DS 3
72
#define R_FS 4
73
#define R_GS 5
74

    
75
/* segment descriptor fields */
76
#define DESC_G_MASK     (1 << 23)
77
#define DESC_B_SHIFT    22
78
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
79
#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
80
#define DESC_L_MASK     (1 << DESC_L_SHIFT)
81
#define DESC_AVL_MASK   (1 << 20)
82
#define DESC_P_MASK     (1 << 15)
83
#define DESC_DPL_SHIFT  13
84
#define DESC_DPL_MASK   (3 << DESC_DPL_SHIFT)
85
#define DESC_S_MASK     (1 << 12)
86
#define DESC_TYPE_SHIFT 8
87
#define DESC_TYPE_MASK  (15 << DESC_TYPE_SHIFT)
88
#define DESC_A_MASK     (1 << 8)
89

    
90
#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
91
#define DESC_C_MASK     (1 << 10) /* code: conforming */
92
#define DESC_R_MASK     (1 << 9)  /* code: readable */
93

    
94
#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
95
#define DESC_W_MASK     (1 << 9)  /* data: writable */
96

    
97
#define DESC_TSS_BUSY_MASK (1 << 9)
98

    
99
/* eflags masks */
100
#define CC_C           0x0001
101
#define CC_P         0x0004
102
#define CC_A        0x0010
103
#define CC_Z        0x0040
104
#define CC_S    0x0080
105
#define CC_O    0x0800
106

    
107
#define TF_SHIFT   8
108
#define IOPL_SHIFT 12
109
#define VM_SHIFT   17
110

    
111
#define TF_MASK                 0x00000100
112
#define IF_MASK                 0x00000200
113
#define DF_MASK                 0x00000400
114
#define IOPL_MASK                0x00003000
115
#define NT_MASK                         0x00004000
116
#define RF_MASK                        0x00010000
117
#define VM_MASK                        0x00020000
118
#define AC_MASK                        0x00040000
119
#define VIF_MASK                0x00080000
120
#define VIP_MASK                0x00100000
121
#define ID_MASK                 0x00200000
122

    
123
/* hidden flags - used internally by qemu to represent additional cpu
124
   states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
125
   redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
126
   position to ease oring with eflags. */
127
/* current cpl */
128
#define HF_CPL_SHIFT         0
129
/* true if soft mmu is being used */
130
#define HF_SOFTMMU_SHIFT     2
131
/* true if hardware interrupts must be disabled for next instruction */
132
#define HF_INHIBIT_IRQ_SHIFT 3
133
/* 16 or 32 segments */
134
#define HF_CS32_SHIFT        4
135
#define HF_SS32_SHIFT        5
136
/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
137
#define HF_ADDSEG_SHIFT      6
138
/* copy of CR0.PE (protected mode) */
139
#define HF_PE_SHIFT          7
140
#define HF_TF_SHIFT          8 /* must be same as eflags */
141
#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
142
#define HF_EM_SHIFT         10
143
#define HF_TS_SHIFT         11
144
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
145
#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
146
#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
147
#define HF_RF_SHIFT         16 /* must be same as eflags */
148
#define HF_VM_SHIFT         17 /* must be same as eflags */
149
#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
150
#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
151
#define HF_SVMI_SHIFT       21 /* SVM intercepts are active */
152
#define HF_OSFXSR_SHIFT     22 /* CR4.OSFXSR */
153

    
154
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
155
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
156
#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
157
#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
158
#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
159
#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
160
#define HF_PE_MASK           (1 << HF_PE_SHIFT)
161
#define HF_TF_MASK           (1 << HF_TF_SHIFT)
162
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
163
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
164
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
165
#define HF_IOPL_MASK         (3 << HF_IOPL_SHIFT)
166
#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
167
#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
168
#define HF_RF_MASK           (1 << HF_RF_SHIFT)
169
#define HF_VM_MASK           (1 << HF_VM_SHIFT)
170
#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
171
#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
172
#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
173
#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
174

    
175
/* hflags2 */
176

    
177
#define HF2_GIF_SHIFT        0 /* if set CPU takes interrupts */
178
#define HF2_HIF_SHIFT        1 /* value of IF_MASK when entering SVM */
179
#define HF2_NMI_SHIFT        2 /* CPU serving NMI */
180
#define HF2_VINTR_SHIFT      3 /* value of V_INTR_MASKING bit */
181

    
182
#define HF2_GIF_MASK          (1 << HF2_GIF_SHIFT)
183
#define HF2_HIF_MASK          (1 << HF2_HIF_SHIFT) 
184
#define HF2_NMI_MASK          (1 << HF2_NMI_SHIFT)
185
#define HF2_VINTR_MASK        (1 << HF2_VINTR_SHIFT)
186

    
187
#define CR0_PE_SHIFT 0
188
#define CR0_MP_SHIFT 1
189

    
190
#define CR0_PE_MASK  (1 << 0)
191
#define CR0_MP_MASK  (1 << 1)
192
#define CR0_EM_MASK  (1 << 2)
193
#define CR0_TS_MASK  (1 << 3)
194
#define CR0_ET_MASK  (1 << 4)
195
#define CR0_NE_MASK  (1 << 5)
196
#define CR0_WP_MASK  (1 << 16)
197
#define CR0_AM_MASK  (1 << 18)
198
#define CR0_PG_MASK  (1 << 31)
199

    
200
#define CR4_VME_MASK  (1 << 0)
201
#define CR4_PVI_MASK  (1 << 1)
202
#define CR4_TSD_MASK  (1 << 2)
203
#define CR4_DE_MASK   (1 << 3)
204
#define CR4_PSE_MASK  (1 << 4)
205
#define CR4_PAE_MASK  (1 << 5)
206
#define CR4_MCE_MASK  (1 << 6)
207
#define CR4_PGE_MASK  (1 << 7)
208
#define CR4_PCE_MASK  (1 << 8)
209
#define CR4_OSFXSR_SHIFT 9
210
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
211
#define CR4_OSXMMEXCPT_MASK  (1 << 10)
212

    
213
#define DR6_BD          (1 << 13)
214
#define DR6_BS          (1 << 14)
215
#define DR6_BT          (1 << 15)
216
#define DR6_FIXED_1     0xffff0ff0
217

    
218
#define DR7_GD          (1 << 13)
219
#define DR7_TYPE_SHIFT  16
220
#define DR7_LEN_SHIFT   18
221
#define DR7_FIXED_1     0x00000400
222

    
223
#define PG_PRESENT_BIT        0
224
#define PG_RW_BIT        1
225
#define PG_USER_BIT        2
226
#define PG_PWT_BIT        3
227
#define PG_PCD_BIT        4
228
#define PG_ACCESSED_BIT        5
229
#define PG_DIRTY_BIT        6
230
#define PG_PSE_BIT        7
231
#define PG_GLOBAL_BIT        8
232
#define PG_NX_BIT        63
233

    
234
#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
235
#define PG_RW_MASK         (1 << PG_RW_BIT)
236
#define PG_USER_MASK         (1 << PG_USER_BIT)
237
#define PG_PWT_MASK         (1 << PG_PWT_BIT)
238
#define PG_PCD_MASK         (1 << PG_PCD_BIT)
239
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
240
#define PG_DIRTY_MASK         (1 << PG_DIRTY_BIT)
241
#define PG_PSE_MASK         (1 << PG_PSE_BIT)
242
#define PG_GLOBAL_MASK         (1 << PG_GLOBAL_BIT)
243
#define PG_NX_MASK         (1LL << PG_NX_BIT)
244

    
245
#define PG_ERROR_W_BIT     1
246

    
247
#define PG_ERROR_P_MASK    0x01
248
#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
249
#define PG_ERROR_U_MASK    0x04
250
#define PG_ERROR_RSVD_MASK 0x08
251
#define PG_ERROR_I_D_MASK  0x10
252

    
253
#define MCG_CTL_P        (1UL<<8)   /* MCG_CAP register available */
254

    
255
#define MCE_CAP_DEF        MCG_CTL_P
256
#define MCE_BANKS_DEF        10
257

    
258
#define MCG_STATUS_MCIP        (1ULL<<2)   /* machine check in progress */
259

    
260
#define MCI_STATUS_VAL        (1ULL<<63)  /* valid error */
261
#define MCI_STATUS_OVER        (1ULL<<62)  /* previous errors lost */
262
#define MCI_STATUS_UC        (1ULL<<61)  /* uncorrected error */
263

    
264
#define MSR_IA32_TSC                    0x10
265
#define MSR_IA32_APICBASE               0x1b
266
#define MSR_IA32_APICBASE_BSP           (1<<8)
267
#define MSR_IA32_APICBASE_ENABLE        (1<<11)
268
#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
269

    
270
#define MSR_MTRRcap                        0xfe
271
#define MSR_MTRRcap_VCNT                8
272
#define MSR_MTRRcap_FIXRANGE_SUPPORT        (1 << 8)
273
#define MSR_MTRRcap_WC_SUPPORTED        (1 << 10)
274

    
275
#define MSR_IA32_SYSENTER_CS            0x174
276
#define MSR_IA32_SYSENTER_ESP           0x175
277
#define MSR_IA32_SYSENTER_EIP           0x176
278

    
279
#define MSR_MCG_CAP                     0x179
280
#define MSR_MCG_STATUS                  0x17a
281
#define MSR_MCG_CTL                     0x17b
282

    
283
#define MSR_IA32_PERF_STATUS            0x198
284

    
285
#define MSR_MTRRphysBase(reg)                (0x200 + 2 * (reg))
286
#define MSR_MTRRphysMask(reg)                (0x200 + 2 * (reg) + 1)
287

    
288
#define MSR_MTRRfix64K_00000                0x250
289
#define MSR_MTRRfix16K_80000                0x258
290
#define MSR_MTRRfix16K_A0000                0x259
291
#define MSR_MTRRfix4K_C0000                0x268
292
#define MSR_MTRRfix4K_C8000                0x269
293
#define MSR_MTRRfix4K_D0000                0x26a
294
#define MSR_MTRRfix4K_D8000                0x26b
295
#define MSR_MTRRfix4K_E0000                0x26c
296
#define MSR_MTRRfix4K_E8000                0x26d
297
#define MSR_MTRRfix4K_F0000                0x26e
298
#define MSR_MTRRfix4K_F8000                0x26f
299

    
300
#define MSR_PAT                         0x277
301

    
302
#define MSR_MTRRdefType                        0x2ff
303

    
304
#define MSR_MC0_CTL                        0x400
305
#define MSR_MC0_STATUS                        0x401
306
#define MSR_MC0_ADDR                        0x402
307
#define MSR_MC0_MISC                        0x403
308

    
309
#define MSR_EFER                        0xc0000080
310

    
311
#define MSR_EFER_SCE   (1 << 0)
312
#define MSR_EFER_LME   (1 << 8)
313
#define MSR_EFER_LMA   (1 << 10)
314
#define MSR_EFER_NXE   (1 << 11)
315
#define MSR_EFER_SVME  (1 << 12)
316
#define MSR_EFER_FFXSR (1 << 14)
317

    
318
#define MSR_STAR                        0xc0000081
319
#define MSR_LSTAR                       0xc0000082
320
#define MSR_CSTAR                       0xc0000083
321
#define MSR_FMASK                       0xc0000084
322
#define MSR_FSBASE                      0xc0000100
323
#define MSR_GSBASE                      0xc0000101
324
#define MSR_KERNELGSBASE                0xc0000102
325
#define MSR_TSC_AUX                     0xc0000103
326

    
327
#define MSR_VM_HSAVE_PA                 0xc0010117
328

    
329
/* cpuid_features bits */
330
#define CPUID_FP87 (1 << 0)
331
#define CPUID_VME  (1 << 1)
332
#define CPUID_DE   (1 << 2)
333
#define CPUID_PSE  (1 << 3)
334
#define CPUID_TSC  (1 << 4)
335
#define CPUID_MSR  (1 << 5)
336
#define CPUID_PAE  (1 << 6)
337
#define CPUID_MCE  (1 << 7)
338
#define CPUID_CX8  (1 << 8)
339
#define CPUID_APIC (1 << 9)
340
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
341
#define CPUID_MTRR (1 << 12)
342
#define CPUID_PGE  (1 << 13)
343
#define CPUID_MCA  (1 << 14)
344
#define CPUID_CMOV (1 << 15)
345
#define CPUID_PAT  (1 << 16)
346
#define CPUID_PSE36   (1 << 17)
347
#define CPUID_PN   (1 << 18)
348
#define CPUID_CLFLUSH (1 << 19)
349
#define CPUID_DTS (1 << 21)
350
#define CPUID_ACPI (1 << 22)
351
#define CPUID_MMX  (1 << 23)
352
#define CPUID_FXSR (1 << 24)
353
#define CPUID_SSE  (1 << 25)
354
#define CPUID_SSE2 (1 << 26)
355
#define CPUID_SS (1 << 27)
356
#define CPUID_HT (1 << 28)
357
#define CPUID_TM (1 << 29)
358
#define CPUID_IA64 (1 << 30)
359
#define CPUID_PBE (1 << 31)
360

    
361
#define CPUID_EXT_SSE3     (1 << 0)
362
#define CPUID_EXT_DTES64   (1 << 2)
363
#define CPUID_EXT_MONITOR  (1 << 3)
364
#define CPUID_EXT_DSCPL    (1 << 4)
365
#define CPUID_EXT_VMX      (1 << 5)
366
#define CPUID_EXT_SMX      (1 << 6)
367
#define CPUID_EXT_EST      (1 << 7)
368
#define CPUID_EXT_TM2      (1 << 8)
369
#define CPUID_EXT_SSSE3    (1 << 9)
370
#define CPUID_EXT_CID      (1 << 10)
371
#define CPUID_EXT_CX16     (1 << 13)
372
#define CPUID_EXT_XTPR     (1 << 14)
373
#define CPUID_EXT_PDCM     (1 << 15)
374
#define CPUID_EXT_DCA      (1 << 18)
375
#define CPUID_EXT_SSE41    (1 << 19)
376
#define CPUID_EXT_SSE42    (1 << 20)
377
#define CPUID_EXT_X2APIC   (1 << 21)
378
#define CPUID_EXT_MOVBE    (1 << 22)
379
#define CPUID_EXT_POPCNT   (1 << 23)
380
#define CPUID_EXT_XSAVE    (1 << 26)
381
#define CPUID_EXT_OSXSAVE  (1 << 27)
382
#define CPUID_EXT_HYPERVISOR  (1 << 31)
383

    
384
#define CPUID_EXT2_SYSCALL (1 << 11)
385
#define CPUID_EXT2_MP      (1 << 19)
386
#define CPUID_EXT2_NX      (1 << 20)
387
#define CPUID_EXT2_MMXEXT  (1 << 22)
388
#define CPUID_EXT2_FFXSR   (1 << 25)
389
#define CPUID_EXT2_PDPE1GB (1 << 26)
390
#define CPUID_EXT2_RDTSCP  (1 << 27)
391
#define CPUID_EXT2_LM      (1 << 29)
392
#define CPUID_EXT2_3DNOWEXT (1 << 30)
393
#define CPUID_EXT2_3DNOW   (1 << 31)
394

    
395
#define CPUID_EXT3_LAHF_LM (1 << 0)
396
#define CPUID_EXT3_CMP_LEG (1 << 1)
397
#define CPUID_EXT3_SVM     (1 << 2)
398
#define CPUID_EXT3_EXTAPIC (1 << 3)
399
#define CPUID_EXT3_CR8LEG  (1 << 4)
400
#define CPUID_EXT3_ABM     (1 << 5)
401
#define CPUID_EXT3_SSE4A   (1 << 6)
402
#define CPUID_EXT3_MISALIGNSSE (1 << 7)
403
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
404
#define CPUID_EXT3_OSVW    (1 << 9)
405
#define CPUID_EXT3_IBS     (1 << 10)
406
#define CPUID_EXT3_SKINIT  (1 << 12)
407

    
408
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
409
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
410
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
411

    
412
#define CPUID_VENDOR_AMD_1   0x68747541 /* "Auth" */
413
#define CPUID_VENDOR_AMD_2   0x69746e65 /* "enti" */ 
414
#define CPUID_VENDOR_AMD_3   0x444d4163 /* "cAMD" */
415

    
416
#define CPUID_MWAIT_IBE     (1 << 1) /* Interrupts can exit capability */
417
#define CPUID_MWAIT_EMX     (1 << 0) /* enumeration supported */
418

    
419
#define EXCP00_DIVZ        0
420
#define EXCP01_DB        1
421
#define EXCP02_NMI        2
422
#define EXCP03_INT3        3
423
#define EXCP04_INTO        4
424
#define EXCP05_BOUND        5
425
#define EXCP06_ILLOP        6
426
#define EXCP07_PREX        7
427
#define EXCP08_DBLE        8
428
#define EXCP09_XERR        9
429
#define EXCP0A_TSS        10
430
#define EXCP0B_NOSEG        11
431
#define EXCP0C_STACK        12
432
#define EXCP0D_GPF        13
433
#define EXCP0E_PAGE        14
434
#define EXCP10_COPR        16
435
#define EXCP11_ALGN        17
436
#define EXCP12_MCHK        18
437

    
438
#define EXCP_SYSCALL    0x100 /* only happens in user only emulation
439
                                 for syscall instruction */
440

    
441
enum {
442
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
443
    CC_OP_EFLAGS,  /* all cc are explicitly computed, CC_SRC = flags */
444

    
445
    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
446
    CC_OP_MULW,
447
    CC_OP_MULL,
448
    CC_OP_MULQ,
449

    
450
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
451
    CC_OP_ADDW,
452
    CC_OP_ADDL,
453
    CC_OP_ADDQ,
454

    
455
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
456
    CC_OP_ADCW,
457
    CC_OP_ADCL,
458
    CC_OP_ADCQ,
459

    
460
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
461
    CC_OP_SUBW,
462
    CC_OP_SUBL,
463
    CC_OP_SUBQ,
464

    
465
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
466
    CC_OP_SBBW,
467
    CC_OP_SBBL,
468
    CC_OP_SBBQ,
469

    
470
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
471
    CC_OP_LOGICW,
472
    CC_OP_LOGICL,
473
    CC_OP_LOGICQ,
474

    
475
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
476
    CC_OP_INCW,
477
    CC_OP_INCL,
478
    CC_OP_INCQ,
479

    
480
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
481
    CC_OP_DECW,
482
    CC_OP_DECL,
483
    CC_OP_DECQ,
484

    
485
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
486
    CC_OP_SHLW,
487
    CC_OP_SHLL,
488
    CC_OP_SHLQ,
489

    
490
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
491
    CC_OP_SARW,
492
    CC_OP_SARL,
493
    CC_OP_SARQ,
494

    
495
    CC_OP_NB,
496
};
497

    
498
#ifdef FLOATX80
499
#define USE_X86LDOUBLE
500
#endif
501

    
502
#ifdef USE_X86LDOUBLE
503
typedef floatx80 CPU86_LDouble;
504
#else
505
typedef float64 CPU86_LDouble;
506
#endif
507

    
508
typedef struct SegmentCache {
509
    uint32_t selector;
510
    target_ulong base;
511
    uint32_t limit;
512
    uint32_t flags;
513
} SegmentCache;
514

    
515
typedef union {
516
    uint8_t _b[16];
517
    uint16_t _w[8];
518
    uint32_t _l[4];
519
    uint64_t _q[2];
520
    float32 _s[4];
521
    float64 _d[2];
522
} XMMReg;
523

    
524
typedef union {
525
    uint8_t _b[8];
526
    uint16_t _w[4];
527
    uint32_t _l[2];
528
    float32 _s[2];
529
    uint64_t q;
530
} MMXReg;
531

    
532
#ifdef HOST_WORDS_BIGENDIAN
533
#define XMM_B(n) _b[15 - (n)]
534
#define XMM_W(n) _w[7 - (n)]
535
#define XMM_L(n) _l[3 - (n)]
536
#define XMM_S(n) _s[3 - (n)]
537
#define XMM_Q(n) _q[1 - (n)]
538
#define XMM_D(n) _d[1 - (n)]
539

    
540
#define MMX_B(n) _b[7 - (n)]
541
#define MMX_W(n) _w[3 - (n)]
542
#define MMX_L(n) _l[1 - (n)]
543
#define MMX_S(n) _s[1 - (n)]
544
#else
545
#define XMM_B(n) _b[n]
546
#define XMM_W(n) _w[n]
547
#define XMM_L(n) _l[n]
548
#define XMM_S(n) _s[n]
549
#define XMM_Q(n) _q[n]
550
#define XMM_D(n) _d[n]
551

    
552
#define MMX_B(n) _b[n]
553
#define MMX_W(n) _w[n]
554
#define MMX_L(n) _l[n]
555
#define MMX_S(n) _s[n]
556
#endif
557
#define MMX_Q(n) q
558

    
559
typedef union {
560
#ifdef USE_X86LDOUBLE
561
    CPU86_LDouble d __attribute__((aligned(16)));
562
#else
563
    CPU86_LDouble d;
564
#endif
565
    MMXReg mmx;
566
} FPReg;
567

    
568
typedef struct {
569
    uint64_t base;
570
    uint64_t mask;
571
} MTRRVar;
572

    
573
#define CPU_NB_REGS64 16
574
#define CPU_NB_REGS32 8
575

    
576
#ifdef TARGET_X86_64
577
#define CPU_NB_REGS CPU_NB_REGS64
578
#else
579
#define CPU_NB_REGS CPU_NB_REGS32
580
#endif
581

    
582
#define NB_MMU_MODES 2
583

    
584
typedef struct CPUX86State {
585
    /* standard registers */
586
    target_ulong regs[CPU_NB_REGS];
587
    target_ulong eip;
588
    target_ulong eflags; /* eflags register. During CPU emulation, CC
589
                        flags and DF are set to zero because they are
590
                        stored elsewhere */
591

    
592
    /* emulator internal eflags handling */
593
    target_ulong cc_src;
594
    target_ulong cc_dst;
595
    uint32_t cc_op;
596
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
597
    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
598
                        are known at translation time. */
599
    uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
600

    
601
    /* segments */
602
    SegmentCache segs[6]; /* selector values */
603
    SegmentCache ldt;
604
    SegmentCache tr;
605
    SegmentCache gdt; /* only base and limit are used */
606
    SegmentCache idt; /* only base and limit are used */
607

    
608
    target_ulong cr[5]; /* NOTE: cr1 is unused */
609
    int32_t a20_mask;
610

    
611
    /* FPU state */
612
    unsigned int fpstt; /* top of stack index */
613
    uint16_t fpus;
614
    uint16_t fpuc;
615
    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
616
    FPReg fpregs[8];
617

    
618
    /* emulator internal variables */
619
    float_status fp_status;
620
    CPU86_LDouble ft0;
621

    
622
    float_status mmx_status; /* for 3DNow! float ops */
623
    float_status sse_status;
624
    uint32_t mxcsr;
625
    XMMReg xmm_regs[CPU_NB_REGS];
626
    XMMReg xmm_t0;
627
    MMXReg mmx_t0;
628
    target_ulong cc_tmp; /* temporary for rcr/rcl */
629

    
630
    /* sysenter registers */
631
    uint32_t sysenter_cs;
632
    target_ulong sysenter_esp;
633
    target_ulong sysenter_eip;
634
    uint64_t efer;
635
    uint64_t star;
636

    
637
    uint64_t vm_hsave;
638
    uint64_t vm_vmcb;
639
    uint64_t tsc_offset;
640
    uint64_t intercept;
641
    uint16_t intercept_cr_read;
642
    uint16_t intercept_cr_write;
643
    uint16_t intercept_dr_read;
644
    uint16_t intercept_dr_write;
645
    uint32_t intercept_exceptions;
646
    uint8_t v_tpr;
647

    
648
#ifdef TARGET_X86_64
649
    target_ulong lstar;
650
    target_ulong cstar;
651
    target_ulong fmask;
652
    target_ulong kernelgsbase;
653
#endif
654
    uint64_t system_time_msr;
655
    uint64_t wall_clock_msr;
656

    
657
    uint64_t tsc;
658

    
659
    uint64_t pat;
660

    
661
    /* exception/interrupt handling */
662
    int error_code;
663
    int exception_is_int;
664
    target_ulong exception_next_eip;
665
    target_ulong dr[8]; /* debug registers */
666
    union {
667
        CPUBreakpoint *cpu_breakpoint[4];
668
        CPUWatchpoint *cpu_watchpoint[4];
669
    }; /* break/watchpoints for dr[0..3] */
670
    uint32_t smbase;
671
    int old_exception;  /* exception in flight */
672

    
673
    CPU_COMMON
674

    
675
    /* processor features (e.g. for CPUID insn) */
676
    uint32_t cpuid_level;
677
    uint32_t cpuid_vendor1;
678
    uint32_t cpuid_vendor2;
679
    uint32_t cpuid_vendor3;
680
    uint32_t cpuid_version;
681
    uint32_t cpuid_features;
682
    uint32_t cpuid_ext_features;
683
    uint32_t cpuid_xlevel;
684
    uint32_t cpuid_model[12];
685
    uint32_t cpuid_ext2_features;
686
    uint32_t cpuid_ext3_features;
687
    uint32_t cpuid_apic_id;
688
    int cpuid_vendor_override;
689

    
690
    /* MTRRs */
691
    uint64_t mtrr_fixed[11];
692
    uint64_t mtrr_deftype;
693
    MTRRVar mtrr_var[8];
694

    
695
    /* For KVM */
696
    uint32_t mp_state;
697
    int32_t exception_injected;
698
    int32_t interrupt_injected;
699
    uint8_t soft_interrupt;
700
    uint8_t nmi_injected;
701
    uint8_t nmi_pending;
702
    uint8_t has_error_code;
703
    uint32_t sipi_vector;
704
    uint32_t cpuid_kvm_features;
705
    
706
    /* in order to simplify APIC support, we leave this pointer to the
707
       user */
708
    struct DeviceState *apic_state;
709

    
710
    uint64 mcg_cap;
711
    uint64 mcg_status;
712
    uint64 mcg_ctl;
713
    uint64 mce_banks[MCE_BANKS_DEF*4];
714

    
715
    uint64_t tsc_aux;
716

    
717
    /* vmstate */
718
    uint16_t fpus_vmstate;
719
    uint16_t fptag_vmstate;
720
    uint16_t fpregs_format_vmstate;
721

    
722
    uint64_t xstate_bv;
723
    XMMReg ymmh_regs[CPU_NB_REGS];
724

    
725
    uint64_t xcr0;
726
} CPUX86State;
727

    
728
CPUX86State *cpu_x86_init(const char *cpu_model);
729
int cpu_x86_exec(CPUX86State *s);
730
void cpu_x86_close(CPUX86State *s);
731
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
732
                   const char *optarg);
733
void x86_cpudef_setup(void);
734

    
735
int cpu_get_pic_interrupt(CPUX86State *s);
736
/* MSDOS compatibility mode FPU exception support */
737
void cpu_set_ferr(CPUX86State *s);
738

    
739
/* this function must always be used to load data in the segment
740
   cache: it synchronizes the hflags with the segment cache values */
741
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
742
                                          int seg_reg, unsigned int selector,
743
                                          target_ulong base,
744
                                          unsigned int limit,
745
                                          unsigned int flags)
746
{
747
    SegmentCache *sc;
748
    unsigned int new_hflags;
749

    
750
    sc = &env->segs[seg_reg];
751
    sc->selector = selector;
752
    sc->base = base;
753
    sc->limit = limit;
754
    sc->flags = flags;
755

    
756
    /* update the hidden flags */
757
    {
758
        if (seg_reg == R_CS) {
759
#ifdef TARGET_X86_64
760
            if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
761
                /* long mode */
762
                env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
763
                env->hflags &= ~(HF_ADDSEG_MASK);
764
            } else
765
#endif
766
            {
767
                /* legacy / compatibility case */
768
                new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
769
                    >> (DESC_B_SHIFT - HF_CS32_SHIFT);
770
                env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
771
                    new_hflags;
772
            }
773
        }
774
        new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
775
            >> (DESC_B_SHIFT - HF_SS32_SHIFT);
776
        if (env->hflags & HF_CS64_MASK) {
777
            /* zero base assumed for DS, ES and SS in long mode */
778
        } else if (!(env->cr[0] & CR0_PE_MASK) ||
779
                   (env->eflags & VM_MASK) ||
780
                   !(env->hflags & HF_CS32_MASK)) {
781
            /* XXX: try to avoid this test. The problem comes from the
782
               fact that is real mode or vm86 mode we only modify the
783
               'base' and 'selector' fields of the segment cache to go
784
               faster. A solution may be to force addseg to one in
785
               translate-i386.c. */
786
            new_hflags |= HF_ADDSEG_MASK;
787
        } else {
788
            new_hflags |= ((env->segs[R_DS].base |
789
                            env->segs[R_ES].base |
790
                            env->segs[R_SS].base) != 0) <<
791
                HF_ADDSEG_SHIFT;
792
        }
793
        env->hflags = (env->hflags &
794
                       ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
795
    }
796
}
797

    
798
static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
799
                                               int sipi_vector)
800
{
801
    env->eip = 0;
802
    cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
803
                           sipi_vector << 12,
804
                           env->segs[R_CS].limit,
805
                           env->segs[R_CS].flags);
806
    env->halted = 0;
807
}
808

    
809
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
810
                            target_ulong *base, unsigned int *limit,
811
                            unsigned int *flags);
812

    
813
/* wrapper, just in case memory mappings must be changed */
814
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
815
{
816
#if HF_CPL_MASK == 3
817
    s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
818
#else
819
#error HF_CPL_MASK is hardcoded
820
#endif
821
}
822

    
823
/* op_helper.c */
824
/* used for debug or cpu save/restore */
825
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
826
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
827

    
828
/* cpu-exec.c */
829
/* the following helpers are only usable in user mode simulation as
830
   they can trigger unexpected exceptions */
831
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
832
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
833
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
834

    
835
/* you can call this signal handler from your SIGBUS and SIGSEGV
836
   signal handlers to inform the virtual CPU of exceptions. non zero
837
   is returned if the signal was handled by the virtual CPU.  */
838
int cpu_x86_signal_handler(int host_signum, void *pinfo,
839
                           void *puc);
840

    
841
/* cpuid.c */
842
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
843
                   uint32_t *eax, uint32_t *ebx,
844
                   uint32_t *ecx, uint32_t *edx);
845
int cpu_x86_register (CPUX86State *env, const char *cpu_model);
846
void cpu_clear_apic_feature(CPUX86State *env);
847

    
848
/* helper.c */
849
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
850
                             int is_write, int mmu_idx, int is_softmmu);
851
#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
852
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
853

    
854
static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
855
{
856
    return (dr7 >> (index * 2)) & 3;
857
}
858

    
859
static inline int hw_breakpoint_type(unsigned long dr7, int index)
860
{
861
    return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
862
}
863

    
864
static inline int hw_breakpoint_len(unsigned long dr7, int index)
865
{
866
    int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
867
    return (len == 2) ? 8 : len + 1;
868
}
869

    
870
void hw_breakpoint_insert(CPUX86State *env, int index);
871
void hw_breakpoint_remove(CPUX86State *env, int index);
872
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
873

    
874
/* will be suppressed */
875
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
876
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
877
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
878

    
879
/* hw/pc.c */
880
void cpu_smm_update(CPUX86State *env);
881
uint64_t cpu_get_tsc(CPUX86State *env);
882

    
883
/* used to debug */
884
#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
885
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
886

    
887
#define TARGET_PAGE_BITS 12
888

    
889
#ifdef TARGET_X86_64
890
#define TARGET_PHYS_ADDR_SPACE_BITS 52
891
/* ??? This is really 48 bits, sign-extended, but the only thing
892
   accessible to userland with bit 48 set is the VSYSCALL, and that
893
   is handled via other mechanisms.  */
894
#define TARGET_VIRT_ADDR_SPACE_BITS 47
895
#else
896
#define TARGET_PHYS_ADDR_SPACE_BITS 36
897
#define TARGET_VIRT_ADDR_SPACE_BITS 32
898
#endif
899

    
900
#define cpu_init cpu_x86_init
901
#define cpu_exec cpu_x86_exec
902
#define cpu_gen_code cpu_x86_gen_code
903
#define cpu_signal_handler cpu_x86_signal_handler
904
#define cpu_list_id x86_cpu_list
905
#define cpudef_setup        x86_cpudef_setup
906

    
907
#define CPU_SAVE_VERSION 12
908

    
909
/* MMU modes definitions */
910
#define MMU_MODE0_SUFFIX _kernel
911
#define MMU_MODE1_SUFFIX _user
912
#define MMU_USER_IDX 1
913
static inline int cpu_mmu_index (CPUState *env)
914
{
915
    return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
916
}
917

    
918
/* translate.c */
919
void optimize_flags_init(void);
920

    
921
typedef struct CCTable {
922
    int (*compute_all)(void); /* return all the flags */
923
    int (*compute_c)(void);  /* return the C flag */
924
} CCTable;
925

    
926
#if defined(CONFIG_USER_ONLY)
927
static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
928
{
929
    if (newsp)
930
        env->regs[R_ESP] = newsp;
931
    env->regs[R_EAX] = 0;
932
}
933
#endif
934

    
935
#include "cpu-all.h"
936
#include "svm.h"
937

    
938
#if !defined(CONFIG_USER_ONLY)
939
#include "hw/apic.h"
940
#endif
941

    
942
static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
943
                                        target_ulong *cs_base, int *flags)
944
{
945
    *cs_base = env->segs[R_CS].base;
946
    *pc = *cs_base + env->eip;
947
    *flags = env->hflags |
948
        (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
949
}
950

    
951
void do_cpu_init(CPUState *env);
952
void do_cpu_sipi(CPUState *env);
953
#endif /* CPU_I386_H */