Statistics
| Branch: | Revision:

root / target-i386 / cpu.h @ 023fe10d

History | View | Annotate | Download (13.7 kB)

1
/*
2
 * i386 virtual CPU header
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#ifndef CPU_I386_H
21
#define CPU_I386_H
22

    
23
#define TARGET_LONG_BITS 32
24

    
25
/* target supports implicit self modifying code */
26
#define TARGET_HAS_SMC
27
/* support for self modifying code even if the modified instruction is
28
   close to the modifying instruction */
29
#define TARGET_HAS_PRECISE_SMC
30

    
31
#include "cpu-defs.h"
32

    
33
#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
34
#define USE_CODE_COPY
35
#endif
36

    
37
#define R_EAX 0
38
#define R_ECX 1
39
#define R_EDX 2
40
#define R_EBX 3
41
#define R_ESP 4
42
#define R_EBP 5
43
#define R_ESI 6
44
#define R_EDI 7
45

    
46
#define R_AL 0
47
#define R_CL 1
48
#define R_DL 2
49
#define R_BL 3
50
#define R_AH 4
51
#define R_CH 5
52
#define R_DH 6
53
#define R_BH 7
54

    
55
#define R_ES 0
56
#define R_CS 1
57
#define R_SS 2
58
#define R_DS 3
59
#define R_FS 4
60
#define R_GS 5
61

    
62
/* segment descriptor fields */
63
#define DESC_G_MASK     (1 << 23)
64
#define DESC_B_SHIFT    22
65
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
66
#define DESC_AVL_MASK   (1 << 20)
67
#define DESC_P_MASK     (1 << 15)
68
#define DESC_DPL_SHIFT  13
69
#define DESC_S_MASK     (1 << 12)
70
#define DESC_TYPE_SHIFT 8
71
#define DESC_A_MASK     (1 << 8)
72

    
73
#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
74
#define DESC_C_MASK     (1 << 10) /* code: conforming */
75
#define DESC_R_MASK     (1 << 9)  /* code: readable */
76

    
77
#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
78
#define DESC_W_MASK     (1 << 9)  /* data: writable */
79

    
80
#define DESC_TSS_BUSY_MASK (1 << 9)
81

    
82
/* eflags masks */
83
#define CC_C           0x0001
84
#define CC_P         0x0004
85
#define CC_A        0x0010
86
#define CC_Z        0x0040
87
#define CC_S    0x0080
88
#define CC_O    0x0800
89

    
90
#define TF_SHIFT   8
91
#define IOPL_SHIFT 12
92
#define VM_SHIFT   17
93

    
94
#define TF_MASK                 0x00000100
95
#define IF_MASK                 0x00000200
96
#define DF_MASK                 0x00000400
97
#define IOPL_MASK                0x00003000
98
#define NT_MASK                         0x00004000
99
#define RF_MASK                        0x00010000
100
#define VM_MASK                        0x00020000
101
#define AC_MASK                        0x00040000 
102
#define VIF_MASK                0x00080000
103
#define VIP_MASK                0x00100000
104
#define ID_MASK                 0x00200000
105

    
106
/* hidden flags - used internally by qemu to represent additionnal cpu
107
   states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
108
   using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
109
   with eflags. */
110
/* current cpl */
111
#define HF_CPL_SHIFT         0
112
/* true if soft mmu is being used */
113
#define HF_SOFTMMU_SHIFT     2
114
/* true if hardware interrupts must be disabled for next instruction */
115
#define HF_INHIBIT_IRQ_SHIFT 3
116
/* 16 or 32 segments */
117
#define HF_CS32_SHIFT        4
118
#define HF_SS32_SHIFT        5
119
/* zero base for DS, ES and SS */
120
#define HF_ADDSEG_SHIFT      6
121
/* copy of CR0.PE (protected mode) */
122
#define HF_PE_SHIFT          7
123
#define HF_TF_SHIFT          8 /* must be same as eflags */
124
#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
125
#define HF_EM_SHIFT         10
126
#define HF_TS_SHIFT         11
127
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
128
#define HF_VM_SHIFT         17 /* must be same as eflags */
129

    
130
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
131
#define HF_SOFTMMU_MASK      (1 << HF_SOFTMMU_SHIFT)
132
#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
133
#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
134
#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
135
#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
136
#define HF_PE_MASK           (1 << HF_PE_SHIFT)
137
#define HF_TF_MASK           (1 << HF_TF_SHIFT)
138
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
139
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
140
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
141

    
142
#define CR0_PE_MASK  (1 << 0)
143
#define CR0_MP_MASK  (1 << 1)
144
#define CR0_EM_MASK  (1 << 2)
145
#define CR0_TS_MASK  (1 << 3)
146
#define CR0_ET_MASK  (1 << 4)
147
#define CR0_NE_MASK  (1 << 5)
148
#define CR0_WP_MASK  (1 << 16)
149
#define CR0_AM_MASK  (1 << 18)
150
#define CR0_PG_MASK  (1 << 31)
151

    
152
#define CR4_VME_MASK  (1 << 0)
153
#define CR4_PVI_MASK  (1 << 1)
154
#define CR4_TSD_MASK  (1 << 2)
155
#define CR4_DE_MASK   (1 << 3)
156
#define CR4_PSE_MASK  (1 << 4)
157
#define CR4_PAE_MASK  (1 << 5)
158
#define CR4_PGE_MASK  (1 << 7)
159

    
160
#define PG_PRESENT_BIT        0
161
#define PG_RW_BIT        1
162
#define PG_USER_BIT        2
163
#define PG_PWT_BIT        3
164
#define PG_PCD_BIT        4
165
#define PG_ACCESSED_BIT        5
166
#define PG_DIRTY_BIT        6
167
#define PG_PSE_BIT        7
168
#define PG_GLOBAL_BIT        8
169

    
170
#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
171
#define PG_RW_MASK         (1 << PG_RW_BIT)
172
#define PG_USER_MASK         (1 << PG_USER_BIT)
173
#define PG_PWT_MASK         (1 << PG_PWT_BIT)
174
#define PG_PCD_MASK         (1 << PG_PCD_BIT)
175
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
176
#define PG_DIRTY_MASK         (1 << PG_DIRTY_BIT)
177
#define PG_PSE_MASK         (1 << PG_PSE_BIT)
178
#define PG_GLOBAL_MASK         (1 << PG_GLOBAL_BIT)
179

    
180
#define PG_ERROR_W_BIT     1
181

    
182
#define PG_ERROR_P_MASK    0x01
183
#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
184
#define PG_ERROR_U_MASK    0x04
185
#define PG_ERROR_RSVD_MASK 0x08
186

    
187
#define MSR_IA32_APICBASE               0x1b
188
#define MSR_IA32_APICBASE_BSP           (1<<8)
189
#define MSR_IA32_APICBASE_ENABLE        (1<<11)
190
#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
191

    
192
#define MSR_IA32_SYSENTER_CS            0x174
193
#define MSR_IA32_SYSENTER_ESP           0x175
194
#define MSR_IA32_SYSENTER_EIP           0x176
195

    
196
#define EXCP00_DIVZ        0
197
#define EXCP01_SSTP        1
198
#define EXCP02_NMI        2
199
#define EXCP03_INT3        3
200
#define EXCP04_INTO        4
201
#define EXCP05_BOUND        5
202
#define EXCP06_ILLOP        6
203
#define EXCP07_PREX        7
204
#define EXCP08_DBLE        8
205
#define EXCP09_XERR        9
206
#define EXCP0A_TSS        10
207
#define EXCP0B_NOSEG        11
208
#define EXCP0C_STACK        12
209
#define EXCP0D_GPF        13
210
#define EXCP0E_PAGE        14
211
#define EXCP10_COPR        16
212
#define EXCP11_ALGN        17
213
#define EXCP12_MCHK        18
214

    
215
enum {
216
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
217
    CC_OP_EFLAGS,  /* all cc are explicitely computed, CC_SRC = flags */
218

    
219
    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
220
    CC_OP_MULW,
221
    CC_OP_MULL,
222

    
223
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
224
    CC_OP_ADDW,
225
    CC_OP_ADDL,
226

    
227
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
228
    CC_OP_ADCW,
229
    CC_OP_ADCL,
230

    
231
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
232
    CC_OP_SUBW,
233
    CC_OP_SUBL,
234

    
235
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
236
    CC_OP_SBBW,
237
    CC_OP_SBBL,
238

    
239
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
240
    CC_OP_LOGICW,
241
    CC_OP_LOGICL,
242

    
243
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
244
    CC_OP_INCW,
245
    CC_OP_INCL,
246

    
247
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
248
    CC_OP_DECW,
249
    CC_OP_DECL,
250

    
251
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
252
    CC_OP_SHLW,
253
    CC_OP_SHLL,
254

    
255
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
256
    CC_OP_SARW,
257
    CC_OP_SARL,
258

    
259
    CC_OP_NB,
260
};
261

    
262
#if (defined(__i386__) || defined(__x86_64__)) && !defined(_BSD)
263
#define USE_X86LDOUBLE
264
#endif
265

    
266
#ifdef USE_X86LDOUBLE
267
typedef long double CPU86_LDouble;
268
#else
269
typedef double CPU86_LDouble;
270
#endif
271

    
272
typedef struct SegmentCache {
273
    uint32_t selector;
274
    uint8_t *base;
275
    uint32_t limit;
276
    uint32_t flags;
277
} SegmentCache;
278

    
279
typedef struct CPUX86State {
280
    /* standard registers */
281
    uint32_t regs[8];
282
    uint32_t eip;
283
    uint32_t eflags; /* eflags register. During CPU emulation, CC
284
                        flags and DF are set to zero because they are
285
                        stored elsewhere */
286

    
287
    /* emulator internal eflags handling */
288
    uint32_t cc_src;
289
    uint32_t cc_dst;
290
    uint32_t cc_op;
291
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
292
    uint32_t hflags; /* hidden flags, see HF_xxx constants */
293

    
294
    /* FPU state */
295
    unsigned int fpstt; /* top of stack index */
296
    unsigned int fpus;
297
    unsigned int fpuc;
298
    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
299
    CPU86_LDouble fpregs[8];
300

    
301
    /* emulator internal variables */
302
    CPU86_LDouble ft0;
303
    union {
304
        float f;
305
        double d;
306
        int i32;
307
        int64_t i64;
308
    } fp_convert;
309
    
310
    /* segments */
311
    SegmentCache segs[6]; /* selector values */
312
    SegmentCache ldt;
313
    SegmentCache tr;
314
    SegmentCache gdt; /* only base and limit are used */
315
    SegmentCache idt; /* only base and limit are used */
316
    
317
    /* sysenter registers */
318
    uint32_t sysenter_cs;
319
    uint32_t sysenter_esp;
320
    uint32_t sysenter_eip;
321

    
322
    /* temporary data for USE_CODE_COPY mode */
323
#ifdef USE_CODE_COPY
324
    uint32_t tmp0;
325
    uint32_t saved_esp;
326
    int native_fp_regs; /* if true, the FPU state is in the native CPU regs */
327
#endif
328
    
329
    /* exception/interrupt handling */
330
    jmp_buf jmp_env;
331
    int exception_index;
332
    int error_code;
333
    int exception_is_int;
334
    int exception_next_eip;
335
    struct TranslationBlock *current_tb; /* currently executing TB */
336
    uint32_t cr[5]; /* NOTE: cr1 is unused */
337
    uint32_t dr[8]; /* debug registers */
338
    int interrupt_request; 
339
    int user_mode_only; /* user mode only simulation */
340

    
341
    uint32_t a20_mask;
342

    
343
    /* soft mmu support */
344
    /* in order to avoid passing too many arguments to the memory
345
       write helpers, we store some rarely used information in the CPU
346
       context) */
347
    unsigned long mem_write_pc; /* host pc at which the memory was
348
                                   written */
349
    unsigned long mem_write_vaddr; /* target virtual addr at which the
350
                                      memory was written */
351
    /* 0 = kernel, 1 = user */
352
    CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
353
    CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
354
    
355
    /* ice debug support */
356
    uint32_t breakpoints[MAX_BREAKPOINTS];
357
    int nb_breakpoints;
358
    int singlestep_enabled;
359

    
360
    /* user data */
361
    void *opaque;
362
} CPUX86State;
363

    
364
#ifndef IN_OP_I386
365
void cpu_x86_outb(CPUX86State *env, int addr, int val);
366
void cpu_x86_outw(CPUX86State *env, int addr, int val);
367
void cpu_x86_outl(CPUX86State *env, int addr, int val);
368
int cpu_x86_inb(CPUX86State *env, int addr);
369
int cpu_x86_inw(CPUX86State *env, int addr);
370
int cpu_x86_inl(CPUX86State *env, int addr);
371
#endif
372

    
373
CPUX86State *cpu_x86_init(void);
374
int cpu_x86_exec(CPUX86State *s);
375
void cpu_x86_close(CPUX86State *s);
376
int cpu_get_pic_interrupt(CPUX86State *s);
377
/* MSDOS compatibility mode FPU exception support */
378
void cpu_set_ferr(CPUX86State *s);
379

    
380
/* this function must always be used to load data in the segment
381
   cache: it synchronizes the hflags with the segment cache values */
382
static inline void cpu_x86_load_seg_cache(CPUX86State *env, 
383
                                          int seg_reg, unsigned int selector,
384
                                          uint8_t *base, unsigned int limit, 
385
                                          unsigned int flags)
386
{
387
    SegmentCache *sc;
388
    unsigned int new_hflags;
389
    
390
    sc = &env->segs[seg_reg];
391
    sc->selector = selector;
392
    sc->base = base;
393
    sc->limit = limit;
394
    sc->flags = flags;
395

    
396
    /* update the hidden flags */
397
    new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
398
        >> (DESC_B_SHIFT - HF_CS32_SHIFT);
399
    new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
400
        >> (DESC_B_SHIFT - HF_SS32_SHIFT);
401
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
402
        /* XXX: try to avoid this test. The problem comes from the
403
           fact that is real mode or vm86 mode we only modify the
404
           'base' and 'selector' fields of the segment cache to go
405
           faster. A solution may be to force addseg to one in
406
           translate-i386.c. */
407
        new_hflags |= HF_ADDSEG_MASK;
408
    } else {
409
        new_hflags |= (((unsigned long)env->segs[R_DS].base | 
410
                        (unsigned long)env->segs[R_ES].base |
411
                        (unsigned long)env->segs[R_SS].base) != 0) << 
412
            HF_ADDSEG_SHIFT;
413
    }
414
    env->hflags = (env->hflags & 
415
                   ~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
416
}
417

    
418
/* wrapper, just in case memory mappings must be changed */
419
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
420
{
421
#if HF_CPL_MASK == 3
422
    s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
423
#else
424
#error HF_CPL_MASK is hardcoded
425
#endif
426
}
427

    
428
/* used for debug or cpu save/restore */
429
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
430
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
431

    
432
/* the following helpers are only usable in user mode simulation as
433
   they can trigger unexpected exceptions */
434
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
435
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
436
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
437

    
438
/* you can call this signal handler from your SIGBUS and SIGSEGV
439
   signal handlers to inform the virtual CPU of exceptions. non zero
440
   is returned if the signal was handled by the virtual CPU.  */
441
struct siginfo;
442
int cpu_x86_signal_handler(int host_signum, struct siginfo *info, 
443
                           void *puc);
444
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
445

    
446
uint64_t cpu_get_tsc(CPUX86State *env);
447

    
448
/* will be suppressed */
449
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
450

    
451
/* used to debug */
452
#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
453
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
454
void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags);
455

    
456
#define TARGET_PAGE_BITS 12
457
#include "cpu-all.h"
458

    
459
#endif /* CPU_I386_H */