Statistics
| Branch: | Revision:

root / target-i386 / cpu.h @ 0650f1ab

History | View | Annotate | Download (22.8 kB)

1
/*
2
 * i386 virtual CPU header
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#ifndef CPU_I386_H
21
#define CPU_I386_H
22

    
23
#include "config.h"
24

    
25
#ifdef TARGET_X86_64
26
#define TARGET_LONG_BITS 64
27
#else
28
#define TARGET_LONG_BITS 32
29
#endif
30

    
31
/* target supports implicit self modifying code */
32
#define TARGET_HAS_SMC
33
/* support for self modifying code even if the modified instruction is
34
   close to the modifying instruction */
35
#define TARGET_HAS_PRECISE_SMC
36

    
37
#define TARGET_HAS_ICE 1
38

    
39
#ifdef TARGET_X86_64
40
#define ELF_MACHINE        EM_X86_64
41
#else
42
#define ELF_MACHINE        EM_386
43
#endif
44

    
45
#include "cpu-defs.h"
46

    
47
#include "softfloat.h"
48

    
49
#define R_EAX 0
50
#define R_ECX 1
51
#define R_EDX 2
52
#define R_EBX 3
53
#define R_ESP 4
54
#define R_EBP 5
55
#define R_ESI 6
56
#define R_EDI 7
57

    
58
#define R_AL 0
59
#define R_CL 1
60
#define R_DL 2
61
#define R_BL 3
62
#define R_AH 4
63
#define R_CH 5
64
#define R_DH 6
65
#define R_BH 7
66

    
67
#define R_ES 0
68
#define R_CS 1
69
#define R_SS 2
70
#define R_DS 3
71
#define R_FS 4
72
#define R_GS 5
73

    
74
/* segment descriptor fields */
75
#define DESC_G_MASK     (1 << 23)
76
#define DESC_B_SHIFT    22
77
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
78
#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
79
#define DESC_L_MASK     (1 << DESC_L_SHIFT)
80
#define DESC_AVL_MASK   (1 << 20)
81
#define DESC_P_MASK     (1 << 15)
82
#define DESC_DPL_SHIFT  13
83
#define DESC_DPL_MASK   (1 << DESC_DPL_SHIFT)
84
#define DESC_S_MASK     (1 << 12)
85
#define DESC_TYPE_SHIFT 8
86
#define DESC_A_MASK     (1 << 8)
87

    
88
#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
89
#define DESC_C_MASK     (1 << 10) /* code: conforming */
90
#define DESC_R_MASK     (1 << 9)  /* code: readable */
91

    
92
#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
93
#define DESC_W_MASK     (1 << 9)  /* data: writable */
94

    
95
#define DESC_TSS_BUSY_MASK (1 << 9)
96

    
97
/* eflags masks */
98
#define CC_C           0x0001
99
#define CC_P         0x0004
100
#define CC_A        0x0010
101
#define CC_Z        0x0040
102
#define CC_S    0x0080
103
#define CC_O    0x0800
104

    
105
#define TF_SHIFT   8
106
#define IOPL_SHIFT 12
107
#define VM_SHIFT   17
108

    
109
#define TF_MASK                 0x00000100
110
#define IF_MASK                 0x00000200
111
#define DF_MASK                 0x00000400
112
#define IOPL_MASK                0x00003000
113
#define NT_MASK                         0x00004000
114
#define RF_MASK                        0x00010000
115
#define VM_MASK                        0x00020000
116
#define AC_MASK                        0x00040000
117
#define VIF_MASK                0x00080000
118
#define VIP_MASK                0x00100000
119
#define ID_MASK                 0x00200000
120

    
121
/* hidden flags - used internally by qemu to represent additional cpu
122
   states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
123
   redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
124
   position to ease oring with eflags. */
125
/* current cpl */
126
#define HF_CPL_SHIFT         0
127
/* true if soft mmu is being used */
128
#define HF_SOFTMMU_SHIFT     2
129
/* true if hardware interrupts must be disabled for next instruction */
130
#define HF_INHIBIT_IRQ_SHIFT 3
131
/* 16 or 32 segments */
132
#define HF_CS32_SHIFT        4
133
#define HF_SS32_SHIFT        5
134
/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
135
#define HF_ADDSEG_SHIFT      6
136
/* copy of CR0.PE (protected mode) */
137
#define HF_PE_SHIFT          7
138
#define HF_TF_SHIFT          8 /* must be same as eflags */
139
#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
140
#define HF_EM_SHIFT         10
141
#define HF_TS_SHIFT         11
142
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
143
#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
144
#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
145
#define HF_OSFXSR_SHIFT     16 /* CR4.OSFXSR */
146
#define HF_VM_SHIFT         17 /* must be same as eflags */
147
#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
148
#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
149
#define HF_SVMI_SHIFT       21 /* SVM intercepts are active */
150

    
151
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
152
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
153
#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
154
#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
155
#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
156
#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
157
#define HF_PE_MASK           (1 << HF_PE_SHIFT)
158
#define HF_TF_MASK           (1 << HF_TF_SHIFT)
159
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
160
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
161
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
162
#define HF_IOPL_MASK         (3 << HF_IOPL_SHIFT)
163
#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
164
#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
165
#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
166
#define HF_VM_MASK           (1 << HF_VM_SHIFT)
167
#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
168
#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
169
#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
170

    
171
/* hflags2 */
172

    
173
#define HF2_GIF_SHIFT        0 /* if set CPU takes interrupts */
174
#define HF2_HIF_SHIFT        1 /* value of IF_MASK when entering SVM */
175
#define HF2_NMI_SHIFT        2 /* CPU serving NMI */
176
#define HF2_VINTR_SHIFT      3 /* value of V_INTR_MASKING bit */
177

    
178
#define HF2_GIF_MASK          (1 << HF2_GIF_SHIFT)
179
#define HF2_HIF_MASK          (1 << HF2_HIF_SHIFT) 
180
#define HF2_NMI_MASK          (1 << HF2_NMI_SHIFT)
181
#define HF2_VINTR_MASK        (1 << HF2_VINTR_SHIFT)
182

    
183
#define CR0_PE_SHIFT 0
184
#define CR0_MP_SHIFT 1
185

    
186
#define CR0_PE_MASK  (1 << 0)
187
#define CR0_MP_MASK  (1 << 1)
188
#define CR0_EM_MASK  (1 << 2)
189
#define CR0_TS_MASK  (1 << 3)
190
#define CR0_ET_MASK  (1 << 4)
191
#define CR0_NE_MASK  (1 << 5)
192
#define CR0_WP_MASK  (1 << 16)
193
#define CR0_AM_MASK  (1 << 18)
194
#define CR0_PG_MASK  (1 << 31)
195

    
196
#define CR4_VME_MASK  (1 << 0)
197
#define CR4_PVI_MASK  (1 << 1)
198
#define CR4_TSD_MASK  (1 << 2)
199
#define CR4_DE_MASK   (1 << 3)
200
#define CR4_PSE_MASK  (1 << 4)
201
#define CR4_PAE_MASK  (1 << 5)
202
#define CR4_PGE_MASK  (1 << 7)
203
#define CR4_PCE_MASK  (1 << 8)
204
#define CR4_OSFXSR_SHIFT 9
205
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
206
#define CR4_OSXMMEXCPT_MASK  (1 << 10)
207

    
208
#define PG_PRESENT_BIT        0
209
#define PG_RW_BIT        1
210
#define PG_USER_BIT        2
211
#define PG_PWT_BIT        3
212
#define PG_PCD_BIT        4
213
#define PG_ACCESSED_BIT        5
214
#define PG_DIRTY_BIT        6
215
#define PG_PSE_BIT        7
216
#define PG_GLOBAL_BIT        8
217
#define PG_NX_BIT        63
218

    
219
#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
220
#define PG_RW_MASK         (1 << PG_RW_BIT)
221
#define PG_USER_MASK         (1 << PG_USER_BIT)
222
#define PG_PWT_MASK         (1 << PG_PWT_BIT)
223
#define PG_PCD_MASK         (1 << PG_PCD_BIT)
224
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
225
#define PG_DIRTY_MASK         (1 << PG_DIRTY_BIT)
226
#define PG_PSE_MASK         (1 << PG_PSE_BIT)
227
#define PG_GLOBAL_MASK         (1 << PG_GLOBAL_BIT)
228
#define PG_NX_MASK         (1LL << PG_NX_BIT)
229

    
230
#define PG_ERROR_W_BIT     1
231

    
232
#define PG_ERROR_P_MASK    0x01
233
#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
234
#define PG_ERROR_U_MASK    0x04
235
#define PG_ERROR_RSVD_MASK 0x08
236
#define PG_ERROR_I_D_MASK  0x10
237

    
238
#define MSR_IA32_TSC                    0x10
239
#define MSR_IA32_APICBASE               0x1b
240
#define MSR_IA32_APICBASE_BSP           (1<<8)
241
#define MSR_IA32_APICBASE_ENABLE        (1<<11)
242
#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
243

    
244
#define MSR_IA32_SYSENTER_CS            0x174
245
#define MSR_IA32_SYSENTER_ESP           0x175
246
#define MSR_IA32_SYSENTER_EIP           0x176
247

    
248
#define MSR_MCG_CAP                     0x179
249
#define MSR_MCG_STATUS                  0x17a
250
#define MSR_MCG_CTL                     0x17b
251

    
252
#define MSR_IA32_PERF_STATUS            0x198
253

    
254
#define MSR_PAT                         0x277
255

    
256
#define MSR_EFER                        0xc0000080
257

    
258
#define MSR_EFER_SCE   (1 << 0)
259
#define MSR_EFER_LME   (1 << 8)
260
#define MSR_EFER_LMA   (1 << 10)
261
#define MSR_EFER_NXE   (1 << 11)
262
#define MSR_EFER_SVME  (1 << 12)
263
#define MSR_EFER_FFXSR (1 << 14)
264

    
265
#define MSR_STAR                        0xc0000081
266
#define MSR_LSTAR                       0xc0000082
267
#define MSR_CSTAR                       0xc0000083
268
#define MSR_FMASK                       0xc0000084
269
#define MSR_FSBASE                      0xc0000100
270
#define MSR_GSBASE                      0xc0000101
271
#define MSR_KERNELGSBASE                0xc0000102
272

    
273
#define MSR_VM_HSAVE_PA                 0xc0010117
274

    
275
/* cpuid_features bits */
276
#define CPUID_FP87 (1 << 0)
277
#define CPUID_VME  (1 << 1)
278
#define CPUID_DE   (1 << 2)
279
#define CPUID_PSE  (1 << 3)
280
#define CPUID_TSC  (1 << 4)
281
#define CPUID_MSR  (1 << 5)
282
#define CPUID_PAE  (1 << 6)
283
#define CPUID_MCE  (1 << 7)
284
#define CPUID_CX8  (1 << 8)
285
#define CPUID_APIC (1 << 9)
286
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
287
#define CPUID_MTRR (1 << 12)
288
#define CPUID_PGE  (1 << 13)
289
#define CPUID_MCA  (1 << 14)
290
#define CPUID_CMOV (1 << 15)
291
#define CPUID_PAT  (1 << 16)
292
#define CPUID_PSE36   (1 << 17)
293
#define CPUID_PN   (1 << 18)
294
#define CPUID_CLFLUSH (1 << 19)
295
#define CPUID_DTS (1 << 21)
296
#define CPUID_ACPI (1 << 22)
297
#define CPUID_MMX  (1 << 23)
298
#define CPUID_FXSR (1 << 24)
299
#define CPUID_SSE  (1 << 25)
300
#define CPUID_SSE2 (1 << 26)
301
#define CPUID_SS (1 << 27)
302
#define CPUID_HT (1 << 28)
303
#define CPUID_TM (1 << 29)
304
#define CPUID_IA64 (1 << 30)
305
#define CPUID_PBE (1 << 31)
306

    
307
#define CPUID_EXT_SSE3     (1 << 0)
308
#define CPUID_EXT_DTES64   (1 << 2)
309
#define CPUID_EXT_MONITOR  (1 << 3)
310
#define CPUID_EXT_DSCPL    (1 << 4)
311
#define CPUID_EXT_VMX      (1 << 5)
312
#define CPUID_EXT_SMX      (1 << 6)
313
#define CPUID_EXT_EST      (1 << 7)
314
#define CPUID_EXT_TM2      (1 << 8)
315
#define CPUID_EXT_SSSE3    (1 << 9)
316
#define CPUID_EXT_CID      (1 << 10)
317
#define CPUID_EXT_CX16     (1 << 13)
318
#define CPUID_EXT_XTPR     (1 << 14)
319
#define CPUID_EXT_PDCM     (1 << 15)
320
#define CPUID_EXT_DCA      (1 << 18)
321
#define CPUID_EXT_SSE41    (1 << 19)
322
#define CPUID_EXT_SSE42    (1 << 20)
323
#define CPUID_EXT_X2APIC   (1 << 21)
324
#define CPUID_EXT_MOVBE    (1 << 22)
325
#define CPUID_EXT_POPCNT   (1 << 23)
326
#define CPUID_EXT_XSAVE    (1 << 26)
327
#define CPUID_EXT_OSXSAVE  (1 << 27)
328

    
329
#define CPUID_EXT2_SYSCALL (1 << 11)
330
#define CPUID_EXT2_MP      (1 << 19)
331
#define CPUID_EXT2_NX      (1 << 20)
332
#define CPUID_EXT2_MMXEXT  (1 << 22)
333
#define CPUID_EXT2_FFXSR   (1 << 25)
334
#define CPUID_EXT2_PDPE1GB (1 << 26)
335
#define CPUID_EXT2_RDTSCP  (1 << 27)
336
#define CPUID_EXT2_LM      (1 << 29)
337
#define CPUID_EXT2_3DNOWEXT (1 << 30)
338
#define CPUID_EXT2_3DNOW   (1 << 31)
339

    
340
#define CPUID_EXT3_LAHF_LM (1 << 0)
341
#define CPUID_EXT3_CMP_LEG (1 << 1)
342
#define CPUID_EXT3_SVM     (1 << 2)
343
#define CPUID_EXT3_EXTAPIC (1 << 3)
344
#define CPUID_EXT3_CR8LEG  (1 << 4)
345
#define CPUID_EXT3_ABM     (1 << 5)
346
#define CPUID_EXT3_SSE4A   (1 << 6)
347
#define CPUID_EXT3_MISALIGNSSE (1 << 7)
348
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
349
#define CPUID_EXT3_OSVW    (1 << 9)
350
#define CPUID_EXT3_IBS     (1 << 10)
351
#define CPUID_EXT3_SKINIT  (1 << 12)
352

    
353
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
354
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
355
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
356

    
357
#define CPUID_VENDOR_AMD_1   0x68747541 /* "Auth" */
358
#define CPUID_VENDOR_AMD_2   0x69746e65 /* "enti" */ 
359
#define CPUID_VENDOR_AMD_3   0x444d4163 /* "cAMD" */
360

    
361
#define CPUID_MWAIT_IBE     (1 << 1) /* Interrupts can exit capability */
362
#define CPUID_MWAIT_EMX     (1 << 0) /* enumeration supported */
363

    
364
#define EXCP00_DIVZ        0
365
#define EXCP01_SSTP        1
366
#define EXCP02_NMI        2
367
#define EXCP03_INT3        3
368
#define EXCP04_INTO        4
369
#define EXCP05_BOUND        5
370
#define EXCP06_ILLOP        6
371
#define EXCP07_PREX        7
372
#define EXCP08_DBLE        8
373
#define EXCP09_XERR        9
374
#define EXCP0A_TSS        10
375
#define EXCP0B_NOSEG        11
376
#define EXCP0C_STACK        12
377
#define EXCP0D_GPF        13
378
#define EXCP0E_PAGE        14
379
#define EXCP10_COPR        16
380
#define EXCP11_ALGN        17
381
#define EXCP12_MCHK        18
382

    
383
#define EXCP_SYSCALL    0x100 /* only happens in user only emulation
384
                                 for syscall instruction */
385

    
386
enum {
387
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
388
    CC_OP_EFLAGS,  /* all cc are explicitly computed, CC_SRC = flags */
389

    
390
    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
391
    CC_OP_MULW,
392
    CC_OP_MULL,
393
    CC_OP_MULQ,
394

    
395
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
396
    CC_OP_ADDW,
397
    CC_OP_ADDL,
398
    CC_OP_ADDQ,
399

    
400
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
401
    CC_OP_ADCW,
402
    CC_OP_ADCL,
403
    CC_OP_ADCQ,
404

    
405
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
406
    CC_OP_SUBW,
407
    CC_OP_SUBL,
408
    CC_OP_SUBQ,
409

    
410
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
411
    CC_OP_SBBW,
412
    CC_OP_SBBL,
413
    CC_OP_SBBQ,
414

    
415
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
416
    CC_OP_LOGICW,
417
    CC_OP_LOGICL,
418
    CC_OP_LOGICQ,
419

    
420
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
421
    CC_OP_INCW,
422
    CC_OP_INCL,
423
    CC_OP_INCQ,
424

    
425
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
426
    CC_OP_DECW,
427
    CC_OP_DECL,
428
    CC_OP_DECQ,
429

    
430
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
431
    CC_OP_SHLW,
432
    CC_OP_SHLL,
433
    CC_OP_SHLQ,
434

    
435
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
436
    CC_OP_SARW,
437
    CC_OP_SARL,
438
    CC_OP_SARQ,
439

    
440
    CC_OP_NB,
441
};
442

    
443
#ifdef FLOATX80
444
#define USE_X86LDOUBLE
445
#endif
446

    
447
#ifdef USE_X86LDOUBLE
448
typedef floatx80 CPU86_LDouble;
449
#else
450
typedef float64 CPU86_LDouble;
451
#endif
452

    
453
typedef struct SegmentCache {
454
    uint32_t selector;
455
    target_ulong base;
456
    uint32_t limit;
457
    uint32_t flags;
458
} SegmentCache;
459

    
460
typedef union {
461
    uint8_t _b[16];
462
    uint16_t _w[8];
463
    uint32_t _l[4];
464
    uint64_t _q[2];
465
    float32 _s[4];
466
    float64 _d[2];
467
} XMMReg;
468

    
469
typedef union {
470
    uint8_t _b[8];
471
    uint16_t _w[4];
472
    uint32_t _l[2];
473
    float32 _s[2];
474
    uint64_t q;
475
} MMXReg;
476

    
477
#ifdef WORDS_BIGENDIAN
478
#define XMM_B(n) _b[15 - (n)]
479
#define XMM_W(n) _w[7 - (n)]
480
#define XMM_L(n) _l[3 - (n)]
481
#define XMM_S(n) _s[3 - (n)]
482
#define XMM_Q(n) _q[1 - (n)]
483
#define XMM_D(n) _d[1 - (n)]
484

    
485
#define MMX_B(n) _b[7 - (n)]
486
#define MMX_W(n) _w[3 - (n)]
487
#define MMX_L(n) _l[1 - (n)]
488
#define MMX_S(n) _s[1 - (n)]
489
#else
490
#define XMM_B(n) _b[n]
491
#define XMM_W(n) _w[n]
492
#define XMM_L(n) _l[n]
493
#define XMM_S(n) _s[n]
494
#define XMM_Q(n) _q[n]
495
#define XMM_D(n) _d[n]
496

    
497
#define MMX_B(n) _b[n]
498
#define MMX_W(n) _w[n]
499
#define MMX_L(n) _l[n]
500
#define MMX_S(n) _s[n]
501
#endif
502
#define MMX_Q(n) q
503

    
504
#ifdef TARGET_X86_64
505
#define CPU_NB_REGS 16
506
#else
507
#define CPU_NB_REGS 8
508
#endif
509

    
510
#define NB_MMU_MODES 2
511

    
512
typedef struct CPUX86State {
513
    /* standard registers */
514
    target_ulong regs[CPU_NB_REGS];
515
    target_ulong eip;
516
    target_ulong eflags; /* eflags register. During CPU emulation, CC
517
                        flags and DF are set to zero because they are
518
                        stored elsewhere */
519

    
520
    /* emulator internal eflags handling */
521
    target_ulong cc_src;
522
    target_ulong cc_dst;
523
    uint32_t cc_op;
524
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
525
    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
526
                        are known at translation time. */
527
    uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
528

    
529
    /* segments */
530
    SegmentCache segs[6]; /* selector values */
531
    SegmentCache ldt;
532
    SegmentCache tr;
533
    SegmentCache gdt; /* only base and limit are used */
534
    SegmentCache idt; /* only base and limit are used */
535

    
536
    target_ulong cr[5]; /* NOTE: cr1 is unused */
537
    uint64_t a20_mask;
538

    
539
    /* FPU state */
540
    unsigned int fpstt; /* top of stack index */
541
    unsigned int fpus;
542
    unsigned int fpuc;
543
    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
544
    union {
545
#ifdef USE_X86LDOUBLE
546
        CPU86_LDouble d __attribute__((aligned(16)));
547
#else
548
        CPU86_LDouble d;
549
#endif
550
        MMXReg mmx;
551
    } fpregs[8];
552

    
553
    /* emulator internal variables */
554
    float_status fp_status;
555
    CPU86_LDouble ft0;
556

    
557
    float_status mmx_status; /* for 3DNow! float ops */
558
    float_status sse_status;
559
    uint32_t mxcsr;
560
    XMMReg xmm_regs[CPU_NB_REGS];
561
    XMMReg xmm_t0;
562
    MMXReg mmx_t0;
563
    target_ulong cc_tmp; /* temporary for rcr/rcl */
564

    
565
    /* sysenter registers */
566
    uint32_t sysenter_cs;
567
    target_ulong sysenter_esp;
568
    target_ulong sysenter_eip;
569
    uint64_t efer;
570
    uint64_t star;
571

    
572
    uint64_t vm_hsave;
573
    uint64_t vm_vmcb;
574
    uint64_t tsc_offset;
575
    uint64_t intercept;
576
    uint16_t intercept_cr_read;
577
    uint16_t intercept_cr_write;
578
    uint16_t intercept_dr_read;
579
    uint16_t intercept_dr_write;
580
    uint32_t intercept_exceptions;
581
    uint8_t v_tpr;
582

    
583
#ifdef TARGET_X86_64
584
    target_ulong lstar;
585
    target_ulong cstar;
586
    target_ulong fmask;
587
    target_ulong kernelgsbase;
588
#endif
589

    
590
    uint64_t pat;
591

    
592
    /* exception/interrupt handling */
593
    int error_code;
594
    int exception_is_int;
595
    target_ulong exception_next_eip;
596
    target_ulong dr[8]; /* debug registers */
597
    uint32_t smbase;
598
    int old_exception;  /* exception in flight */
599

    
600
    CPU_COMMON
601

    
602
    /* processor features (e.g. for CPUID insn) */
603
    uint32_t cpuid_level;
604
    uint32_t cpuid_vendor1;
605
    uint32_t cpuid_vendor2;
606
    uint32_t cpuid_vendor3;
607
    uint32_t cpuid_version;
608
    uint32_t cpuid_features;
609
    uint32_t cpuid_ext_features;
610
    uint32_t cpuid_xlevel;
611
    uint32_t cpuid_model[12];
612
    uint32_t cpuid_ext2_features;
613
    uint32_t cpuid_ext3_features;
614
    uint32_t cpuid_apic_id;
615

    
616
#ifdef USE_KQEMU
617
    int kqemu_enabled;
618
    int last_io_time;
619
#endif
620
    /* in order to simplify APIC support, we leave this pointer to the
621
       user */
622
    struct APICState *apic_state;
623
} CPUX86State;
624

    
625
CPUX86State *cpu_x86_init(const char *cpu_model);
626
int cpu_x86_exec(CPUX86State *s);
627
void cpu_x86_close(CPUX86State *s);
628
void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt,
629
                                                 ...));
630
int cpu_get_pic_interrupt(CPUX86State *s);
631
/* MSDOS compatibility mode FPU exception support */
632
void cpu_set_ferr(CPUX86State *s);
633

    
634
/* this function must always be used to load data in the segment
635
   cache: it synchronizes the hflags with the segment cache values */
636
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
637
                                          int seg_reg, unsigned int selector,
638
                                          target_ulong base,
639
                                          unsigned int limit,
640
                                          unsigned int flags)
641
{
642
    SegmentCache *sc;
643
    unsigned int new_hflags;
644

    
645
    sc = &env->segs[seg_reg];
646
    sc->selector = selector;
647
    sc->base = base;
648
    sc->limit = limit;
649
    sc->flags = flags;
650

    
651
    /* update the hidden flags */
652
    {
653
        if (seg_reg == R_CS) {
654
#ifdef TARGET_X86_64
655
            if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
656
                /* long mode */
657
                env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
658
                env->hflags &= ~(HF_ADDSEG_MASK);
659
            } else
660
#endif
661
            {
662
                /* legacy / compatibility case */
663
                new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
664
                    >> (DESC_B_SHIFT - HF_CS32_SHIFT);
665
                env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
666
                    new_hflags;
667
            }
668
        }
669
        new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
670
            >> (DESC_B_SHIFT - HF_SS32_SHIFT);
671
        if (env->hflags & HF_CS64_MASK) {
672
            /* zero base assumed for DS, ES and SS in long mode */
673
        } else if (!(env->cr[0] & CR0_PE_MASK) ||
674
                   (env->eflags & VM_MASK) ||
675
                   !(env->hflags & HF_CS32_MASK)) {
676
            /* XXX: try to avoid this test. The problem comes from the
677
               fact that is real mode or vm86 mode we only modify the
678
               'base' and 'selector' fields of the segment cache to go
679
               faster. A solution may be to force addseg to one in
680
               translate-i386.c. */
681
            new_hflags |= HF_ADDSEG_MASK;
682
        } else {
683
            new_hflags |= ((env->segs[R_DS].base |
684
                            env->segs[R_ES].base |
685
                            env->segs[R_SS].base) != 0) <<
686
                HF_ADDSEG_SHIFT;
687
        }
688
        env->hflags = (env->hflags &
689
                       ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
690
    }
691
}
692

    
693
/* wrapper, just in case memory mappings must be changed */
694
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
695
{
696
#if HF_CPL_MASK == 3
697
    s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
698
#else
699
#error HF_CPL_MASK is hardcoded
700
#endif
701
}
702

    
703
/* used for debug or cpu save/restore */
704
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
705
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
706

    
707
/* the following helpers are only usable in user mode simulation as
708
   they can trigger unexpected exceptions */
709
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
710
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
711
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
712

    
713
/* you can call this signal handler from your SIGBUS and SIGSEGV
714
   signal handlers to inform the virtual CPU of exceptions. non zero
715
   is returned if the signal was handled by the virtual CPU.  */
716
int cpu_x86_signal_handler(int host_signum, void *pinfo,
717
                           void *puc);
718
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
719

    
720
uint64_t cpu_get_tsc(CPUX86State *env);
721

    
722
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
723
uint64_t cpu_get_apic_base(CPUX86State *env);
724
void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
725
#ifndef NO_CPU_IO_DEFS
726
uint8_t cpu_get_apic_tpr(CPUX86State *env);
727
#endif
728
void cpu_smm_update(CPUX86State *env);
729

    
730
/* will be suppressed */
731
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
732

    
733
/* used to debug */
734
#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
735
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
736

    
737
#ifdef USE_KQEMU
738
static inline int cpu_get_time_fast(void)
739
{
740
    int low, high;
741
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
742
    return low;
743
}
744
#endif
745

    
746
#define TARGET_PAGE_BITS 12
747

    
748
#define CPUState CPUX86State
749
#define cpu_init cpu_x86_init
750
#define cpu_exec cpu_x86_exec
751
#define cpu_gen_code cpu_x86_gen_code
752
#define cpu_signal_handler cpu_x86_signal_handler
753
#define cpu_list x86_cpu_list
754

    
755
#define CPU_SAVE_VERSION 7
756

    
757
/* MMU modes definitions */
758
#define MMU_MODE0_SUFFIX _kernel
759
#define MMU_MODE1_SUFFIX _user
760
#define MMU_USER_IDX 1
761
static inline int cpu_mmu_index (CPUState *env)
762
{
763
    return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
764
}
765

    
766
void optimize_flags_init(void);
767

    
768
typedef struct CCTable {
769
    int (*compute_all)(void); /* return all the flags */
770
    int (*compute_c)(void);  /* return the C flag */
771
} CCTable;
772

    
773
extern CCTable cc_table[];
774

    
775
#if defined(CONFIG_USER_ONLY)
776
static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
777
{
778
    if (newsp)
779
        env->regs[R_ESP] = newsp;
780
    env->regs[R_EAX] = 0;
781
}
782
#endif
783

    
784
#define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base
785

    
786
#include "cpu-all.h"
787

    
788
#include "svm.h"
789

    
790
#endif /* CPU_I386_H */