root / cpu-i386.h @ c50c0c3f
History | View | Annotate | Download (9.7 kB)
1 |
/*
|
---|---|
2 |
* i386 virtual CPU header
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#ifndef CPU_I386_H
|
21 |
#define CPU_I386_H
|
22 |
|
23 |
#include "config.h" |
24 |
#include <setjmp.h> |
25 |
|
26 |
#define R_EAX 0 |
27 |
#define R_ECX 1 |
28 |
#define R_EDX 2 |
29 |
#define R_EBX 3 |
30 |
#define R_ESP 4 |
31 |
#define R_EBP 5 |
32 |
#define R_ESI 6 |
33 |
#define R_EDI 7 |
34 |
|
35 |
#define R_AL 0 |
36 |
#define R_CL 1 |
37 |
#define R_DL 2 |
38 |
#define R_BL 3 |
39 |
#define R_AH 4 |
40 |
#define R_CH 5 |
41 |
#define R_DH 6 |
42 |
#define R_BH 7 |
43 |
|
44 |
#define R_ES 0 |
45 |
#define R_CS 1 |
46 |
#define R_SS 2 |
47 |
#define R_DS 3 |
48 |
#define R_FS 4 |
49 |
#define R_GS 5 |
50 |
|
51 |
/* eflags masks */
|
52 |
#define CC_C 0x0001 |
53 |
#define CC_P 0x0004 |
54 |
#define CC_A 0x0010 |
55 |
#define CC_Z 0x0040 |
56 |
#define CC_S 0x0080 |
57 |
#define CC_O 0x0800 |
58 |
|
59 |
#define TF_MASK 0x00000100 |
60 |
#define IF_MASK 0x00000200 |
61 |
#define DF_MASK 0x00000400 |
62 |
#define IOPL_MASK 0x00003000 |
63 |
#define NT_MASK 0x00004000 |
64 |
#define RF_MASK 0x00010000 |
65 |
#define VM_MASK 0x00020000 |
66 |
#define AC_MASK 0x00040000 |
67 |
#define VIF_MASK 0x00080000 |
68 |
#define VIP_MASK 0x00100000 |
69 |
#define ID_MASK 0x00200000 |
70 |
|
71 |
#define EXCP00_DIVZ 0 |
72 |
#define EXCP01_SSTP 1 |
73 |
#define EXCP02_NMI 2 |
74 |
#define EXCP03_INT3 3 |
75 |
#define EXCP04_INTO 4 |
76 |
#define EXCP05_BOUND 5 |
77 |
#define EXCP06_ILLOP 6 |
78 |
#define EXCP07_PREX 7 |
79 |
#define EXCP08_DBLE 8 |
80 |
#define EXCP09_XERR 9 |
81 |
#define EXCP0A_TSS 10 |
82 |
#define EXCP0B_NOSEG 11 |
83 |
#define EXCP0C_STACK 12 |
84 |
#define EXCP0D_GPF 13 |
85 |
#define EXCP0E_PAGE 14 |
86 |
#define EXCP10_COPR 16 |
87 |
#define EXCP11_ALGN 17 |
88 |
#define EXCP12_MCHK 18 |
89 |
|
90 |
#define EXCP_INTERRUPT 256 /* async interruption */ |
91 |
|
92 |
enum {
|
93 |
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
|
94 |
CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
|
95 |
CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
|
96 |
|
97 |
CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
98 |
CC_OP_ADDW, |
99 |
CC_OP_ADDL, |
100 |
|
101 |
CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
102 |
CC_OP_ADCW, |
103 |
CC_OP_ADCL, |
104 |
|
105 |
CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
106 |
CC_OP_SUBW, |
107 |
CC_OP_SUBL, |
108 |
|
109 |
CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
110 |
CC_OP_SBBW, |
111 |
CC_OP_SBBL, |
112 |
|
113 |
CC_OP_LOGICB, /* modify all flags, CC_DST = res */
|
114 |
CC_OP_LOGICW, |
115 |
CC_OP_LOGICL, |
116 |
|
117 |
CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
118 |
CC_OP_INCW, |
119 |
CC_OP_INCL, |
120 |
|
121 |
CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
122 |
CC_OP_DECW, |
123 |
CC_OP_DECL, |
124 |
|
125 |
CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
|
126 |
CC_OP_SHLW, |
127 |
CC_OP_SHLL, |
128 |
|
129 |
CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
|
130 |
CC_OP_SARW, |
131 |
CC_OP_SARL, |
132 |
|
133 |
CC_OP_NB, |
134 |
}; |
135 |
|
136 |
#ifdef __i386__
|
137 |
#define USE_X86LDOUBLE
|
138 |
#endif
|
139 |
|
140 |
#ifdef USE_X86LDOUBLE
|
141 |
typedef long double CPU86_LDouble; |
142 |
#else
|
143 |
typedef double CPU86_LDouble; |
144 |
#endif
|
145 |
|
146 |
typedef struct SegmentCache { |
147 |
uint8_t *base; |
148 |
unsigned long limit; |
149 |
uint8_t seg_32bit; |
150 |
} SegmentCache; |
151 |
|
152 |
typedef struct SegmentDescriptorTable { |
153 |
uint8_t *base; |
154 |
unsigned long limit; |
155 |
/* this is the returned base when reading the register, just to
|
156 |
avoid that the emulated program modifies it */
|
157 |
unsigned long emu_base; |
158 |
} SegmentDescriptorTable; |
159 |
|
160 |
typedef struct CPUX86State { |
161 |
/* standard registers */
|
162 |
uint32_t regs[8];
|
163 |
uint32_t eip; |
164 |
uint32_t eflags; /* eflags register. During CPU emulation, CC
|
165 |
flags and DF are set to zero because they are
|
166 |
stored elsewhere */
|
167 |
|
168 |
/* emulator internal eflags handling */
|
169 |
uint32_t cc_src; |
170 |
uint32_t cc_dst; |
171 |
uint32_t cc_op; |
172 |
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
|
173 |
|
174 |
/* FPU state */
|
175 |
unsigned int fpstt; /* top of stack index */ |
176 |
unsigned int fpus; |
177 |
unsigned int fpuc; |
178 |
uint8_t fptags[8]; /* 0 = valid, 1 = empty */ |
179 |
CPU86_LDouble fpregs[8];
|
180 |
|
181 |
/* emulator internal variables */
|
182 |
CPU86_LDouble ft0; |
183 |
union {
|
184 |
float f;
|
185 |
double d;
|
186 |
int i32;
|
187 |
int64_t i64; |
188 |
} fp_convert; |
189 |
|
190 |
/* segments */
|
191 |
uint32_t segs[6]; /* selector values */ |
192 |
SegmentCache seg_cache[6]; /* info taken from LDT/GDT */ |
193 |
SegmentDescriptorTable gdt; |
194 |
SegmentDescriptorTable ldt; |
195 |
SegmentDescriptorTable idt; |
196 |
|
197 |
/* exception/interrupt handling */
|
198 |
jmp_buf jmp_env; |
199 |
int exception_index;
|
200 |
int error_code;
|
201 |
uint32_t cr2; |
202 |
int interrupt_request;
|
203 |
|
204 |
/* user data */
|
205 |
void *opaque;
|
206 |
} CPUX86State; |
207 |
|
208 |
/* all CPU memory access use these macros */
|
209 |
static inline int ldub(void *ptr) |
210 |
{ |
211 |
return *(uint8_t *)ptr;
|
212 |
} |
213 |
|
214 |
static inline int ldsb(void *ptr) |
215 |
{ |
216 |
return *(int8_t *)ptr;
|
217 |
} |
218 |
|
219 |
static inline void stb(void *ptr, int v) |
220 |
{ |
221 |
*(uint8_t *)ptr = v; |
222 |
} |
223 |
|
224 |
#ifdef WORDS_BIGENDIAN
|
225 |
|
226 |
/* conservative code for little endian unaligned accesses */
|
227 |
static inline int lduw(void *ptr) |
228 |
{ |
229 |
#ifdef __powerpc__
|
230 |
int val;
|
231 |
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
232 |
return val;
|
233 |
#else
|
234 |
uint8_t *p = ptr; |
235 |
return p[0] | (p[1] << 8); |
236 |
#endif
|
237 |
} |
238 |
|
239 |
static inline int ldsw(void *ptr) |
240 |
{ |
241 |
#ifdef __powerpc__
|
242 |
int val;
|
243 |
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
244 |
return (int16_t)val;
|
245 |
#else
|
246 |
uint8_t *p = ptr; |
247 |
return (int16_t)(p[0] | (p[1] << 8)); |
248 |
#endif
|
249 |
} |
250 |
|
251 |
static inline int ldl(void *ptr) |
252 |
{ |
253 |
#ifdef __powerpc__
|
254 |
int val;
|
255 |
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); |
256 |
return val;
|
257 |
#else
|
258 |
uint8_t *p = ptr; |
259 |
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); |
260 |
#endif
|
261 |
} |
262 |
|
263 |
static inline uint64_t ldq(void *ptr) |
264 |
{ |
265 |
uint8_t *p = ptr; |
266 |
uint32_t v1, v2; |
267 |
v1 = ldl(p); |
268 |
v2 = ldl(p + 4);
|
269 |
return v1 | ((uint64_t)v2 << 32); |
270 |
} |
271 |
|
272 |
static inline void stw(void *ptr, int v) |
273 |
{ |
274 |
#ifdef __powerpc__
|
275 |
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
276 |
#else
|
277 |
uint8_t *p = ptr; |
278 |
p[0] = v;
|
279 |
p[1] = v >> 8; |
280 |
#endif
|
281 |
} |
282 |
|
283 |
static inline void stl(void *ptr, int v) |
284 |
{ |
285 |
#ifdef __powerpc__
|
286 |
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
287 |
#else
|
288 |
uint8_t *p = ptr; |
289 |
p[0] = v;
|
290 |
p[1] = v >> 8; |
291 |
p[2] = v >> 16; |
292 |
p[3] = v >> 24; |
293 |
#endif
|
294 |
} |
295 |
|
296 |
static inline void stq(void *ptr, uint64_t v) |
297 |
{ |
298 |
uint8_t *p = ptr; |
299 |
stl(p, (uint32_t)v); |
300 |
stl(p + 4, v >> 32); |
301 |
} |
302 |
|
303 |
/* float access */
|
304 |
|
305 |
static inline float ldfl(void *ptr) |
306 |
{ |
307 |
union {
|
308 |
float f;
|
309 |
uint32_t i; |
310 |
} u; |
311 |
u.i = ldl(ptr); |
312 |
return u.f;
|
313 |
} |
314 |
|
315 |
static inline double ldfq(void *ptr) |
316 |
{ |
317 |
union {
|
318 |
double d;
|
319 |
uint64_t i; |
320 |
} u; |
321 |
u.i = ldq(ptr); |
322 |
return u.d;
|
323 |
} |
324 |
|
325 |
static inline void stfl(void *ptr, float v) |
326 |
{ |
327 |
union {
|
328 |
float f;
|
329 |
uint32_t i; |
330 |
} u; |
331 |
u.f = v; |
332 |
stl(ptr, u.i); |
333 |
} |
334 |
|
335 |
static inline void stfq(void *ptr, double v) |
336 |
{ |
337 |
union {
|
338 |
double d;
|
339 |
uint64_t i; |
340 |
} u; |
341 |
u.d = v; |
342 |
stq(ptr, u.i); |
343 |
} |
344 |
|
345 |
#else
|
346 |
|
347 |
static inline int lduw(void *ptr) |
348 |
{ |
349 |
return *(uint16_t *)ptr;
|
350 |
} |
351 |
|
352 |
static inline int ldsw(void *ptr) |
353 |
{ |
354 |
return *(int16_t *)ptr;
|
355 |
} |
356 |
|
357 |
static inline int ldl(void *ptr) |
358 |
{ |
359 |
return *(uint32_t *)ptr;
|
360 |
} |
361 |
|
362 |
static inline uint64_t ldq(void *ptr) |
363 |
{ |
364 |
return *(uint64_t *)ptr;
|
365 |
} |
366 |
|
367 |
static inline void stw(void *ptr, int v) |
368 |
{ |
369 |
*(uint16_t *)ptr = v; |
370 |
} |
371 |
|
372 |
static inline void stl(void *ptr, int v) |
373 |
{ |
374 |
*(uint32_t *)ptr = v; |
375 |
} |
376 |
|
377 |
static inline void stq(void *ptr, uint64_t v) |
378 |
{ |
379 |
*(uint64_t *)ptr = v; |
380 |
} |
381 |
|
382 |
/* float access */
|
383 |
|
384 |
static inline float ldfl(void *ptr) |
385 |
{ |
386 |
return *(float *)ptr; |
387 |
} |
388 |
|
389 |
static inline double ldfq(void *ptr) |
390 |
{ |
391 |
return *(double *)ptr; |
392 |
} |
393 |
|
394 |
static inline void stfl(void *ptr, float v) |
395 |
{ |
396 |
*(float *)ptr = v;
|
397 |
} |
398 |
|
399 |
static inline void stfq(void *ptr, double v) |
400 |
{ |
401 |
*(double *)ptr = v;
|
402 |
} |
403 |
#endif
|
404 |
|
405 |
#ifndef IN_OP_I386
|
406 |
void cpu_x86_outb(CPUX86State *env, int addr, int val); |
407 |
void cpu_x86_outw(CPUX86State *env, int addr, int val); |
408 |
void cpu_x86_outl(CPUX86State *env, int addr, int val); |
409 |
int cpu_x86_inb(CPUX86State *env, int addr); |
410 |
int cpu_x86_inw(CPUX86State *env, int addr); |
411 |
int cpu_x86_inl(CPUX86State *env, int addr); |
412 |
#endif
|
413 |
|
414 |
CPUX86State *cpu_x86_init(void);
|
415 |
int cpu_x86_exec(CPUX86State *s);
|
416 |
void cpu_x86_interrupt(CPUX86State *s);
|
417 |
void cpu_x86_close(CPUX86State *s);
|
418 |
|
419 |
/* needed to load some predefinied segment registers */
|
420 |
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); |
421 |
|
422 |
/* you can call this signal handler from your SIGBUS and SIGSEGV
|
423 |
signal handlers to inform the virtual CPU of exceptions. non zero
|
424 |
is returned if the signal was handled by the virtual CPU. */
|
425 |
struct siginfo;
|
426 |
int cpu_x86_signal_handler(int host_signum, struct siginfo *info, |
427 |
void *puc);
|
428 |
|
429 |
/* used to debug */
|
430 |
#define X86_DUMP_FPU 0x0001 /* dump FPU state too */ |
431 |
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ |
432 |
void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags); |
433 |
|
434 |
/* internal functions */
|
435 |
|
436 |
#define GEN_FLAG_CODE32_SHIFT 0 |
437 |
#define GEN_FLAG_ADDSEG_SHIFT 1 |
438 |
#define GEN_FLAG_SS32_SHIFT 2 |
439 |
#define GEN_FLAG_VM_SHIFT 3 |
440 |
#define GEN_FLAG_ST_SHIFT 4 |
441 |
#define GEN_FLAG_CPL_SHIFT 7 |
442 |
#define GEN_FLAG_IOPL_SHIFT 9 |
443 |
#define GEN_FLAG_TF_SHIFT 11 |
444 |
|
445 |
int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size, |
446 |
int *gen_code_size_ptr,
|
447 |
uint8_t *pc_start, uint8_t *cs_base, int flags);
|
448 |
void cpu_x86_tblocks_init(void); |
449 |
|
450 |
#endif /* CPU_I386_H */ |