Statistics
| Branch: | Revision:

root / cpu-i386.h @ bc8a22cc

History | View | Annotate | Download (9.2 kB)

1
/*
2
 * i386 virtual CPU header
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#ifndef CPU_I386_H
21
#define CPU_I386_H
22

    
23
#include "config.h"
24
#include <setjmp.h>
25

    
26
#define R_EAX 0
27
#define R_ECX 1
28
#define R_EDX 2
29
#define R_EBX 3
30
#define R_ESP 4
31
#define R_EBP 5
32
#define R_ESI 6
33
#define R_EDI 7
34

    
35
#define R_AL 0
36
#define R_CL 1
37
#define R_DL 2
38
#define R_BL 3
39
#define R_AH 4
40
#define R_CH 5
41
#define R_DH 6
42
#define R_BH 7
43

    
44
#define R_ES 0
45
#define R_CS 1
46
#define R_SS 2
47
#define R_DS 3
48
#define R_FS 4
49
#define R_GS 5
50

    
51
/* eflags masks */
52
#define CC_C           0x0001
53
#define CC_P         0x0004
54
#define CC_A        0x0010
55
#define CC_Z        0x0040
56
#define CC_S    0x0080
57
#define CC_O    0x0800
58

    
59
#define TF_MASK                 0x00000100
60
#define IF_MASK                 0x00000200
61
#define DF_MASK                 0x00000400
62
#define IOPL_MASK                0x00003000
63
#define NT_MASK                         0x00004000
64
#define RF_MASK                        0x00010000
65
#define VM_MASK                        0x00020000
66
#define AC_MASK                        0x00040000 
67
#define VIF_MASK                0x00080000
68
#define VIP_MASK                0x00100000
69
#define ID_MASK                 0x00200000
70

    
71
#define EXCP00_DIVZ        0
72
#define EXCP01_SSTP        1
73
#define EXCP02_NMI        2
74
#define EXCP03_INT3        3
75
#define EXCP04_INTO        4
76
#define EXCP05_BOUND        5
77
#define EXCP06_ILLOP        6
78
#define EXCP07_PREX        7
79
#define EXCP08_DBLE        8
80
#define EXCP09_XERR        9
81
#define EXCP0A_TSS        10
82
#define EXCP0B_NOSEG        11
83
#define EXCP0C_STACK        12
84
#define EXCP0D_GPF        13
85
#define EXCP0E_PAGE        14
86
#define EXCP10_COPR        16
87
#define EXCP11_ALGN        17
88
#define EXCP12_MCHK        18
89

    
90
#define EXCP_INTERRUPT         256 /* async interruption */
91

    
92
enum {
93
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
94
    CC_OP_EFLAGS,  /* all cc are explicitely computed, CC_SRC = flags */
95
    CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
96

    
97
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
98
    CC_OP_ADDW,
99
    CC_OP_ADDL,
100

    
101
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
102
    CC_OP_ADCW,
103
    CC_OP_ADCL,
104

    
105
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
106
    CC_OP_SUBW,
107
    CC_OP_SUBL,
108

    
109
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
110
    CC_OP_SBBW,
111
    CC_OP_SBBL,
112

    
113
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
114
    CC_OP_LOGICW,
115
    CC_OP_LOGICL,
116

    
117
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
118
    CC_OP_INCW,
119
    CC_OP_INCL,
120

    
121
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
122
    CC_OP_DECW,
123
    CC_OP_DECL,
124

    
125
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
126
    CC_OP_SHLW,
127
    CC_OP_SHLL,
128

    
129
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
130
    CC_OP_SARW,
131
    CC_OP_SARL,
132

    
133
    CC_OP_NB,
134
};
135

    
136
#ifdef __i386__
137
#define USE_X86LDOUBLE
138
#endif
139

    
140
#ifdef USE_X86LDOUBLE
141
typedef long double CPU86_LDouble;
142
#else
143
typedef double CPU86_LDouble;
144
#endif
145

    
146
typedef struct SegmentCache {
147
    uint8_t *base;
148
    unsigned long limit;
149
    uint8_t seg_32bit;
150
} SegmentCache;
151

    
152
typedef struct SegmentDescriptorTable {
153
    uint8_t *base;
154
    unsigned long limit;
155
    /* this is the returned base when reading the register, just to
156
    avoid that the emulated program modifies it */
157
    unsigned long emu_base;
158
} SegmentDescriptorTable;
159

    
160
typedef struct CPUX86State {
161
    /* standard registers */
162
    uint32_t regs[8];
163
    uint32_t eip;
164
    uint32_t eflags; /* eflags register. During CPU emulation, CC
165
                        flags and DF are set to zero because they are
166
                        store elsewhere */
167

    
168
    /* emulator internal eflags handling */
169
    uint32_t cc_src;
170
    uint32_t cc_dst;
171
    uint32_t cc_op;
172
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
173

    
174
    /* FPU state */
175
    unsigned int fpstt; /* top of stack index */
176
    unsigned int fpus;
177
    unsigned int fpuc;
178
    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
179
    CPU86_LDouble fpregs[8];    
180

    
181
    /* emulator internal variables */
182
    CPU86_LDouble ft0;
183
    
184
    /* segments */
185
    uint32_t segs[6]; /* selector values */
186
    SegmentCache seg_cache[6]; /* info taken from LDT/GDT */
187
    SegmentDescriptorTable gdt;
188
    SegmentDescriptorTable ldt;
189
    SegmentDescriptorTable idt;
190
    
191
    /* exception/interrupt handling */
192
    jmp_buf jmp_env;
193
    int exception_index;
194
    int interrupt_request;
195

    
196
    /* user data */
197
    void *opaque;
198
} CPUX86State;
199

    
200
/* all CPU memory access use these macros */
201
static inline int ldub(void *ptr)
202
{
203
    return *(uint8_t *)ptr;
204
}
205

    
206
static inline int ldsb(void *ptr)
207
{
208
    return *(int8_t *)ptr;
209
}
210

    
211
static inline void stb(void *ptr, int v)
212
{
213
    *(uint8_t *)ptr = v;
214
}
215

    
216
#ifdef WORDS_BIGENDIAN
217

    
218
/* conservative code for little endian unaligned accesses */
219
static inline int lduw(void *ptr)
220
{
221
#ifdef __powerpc__
222
    int val;
223
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
224
    return val;
225
#else
226
    uint8_t *p = ptr;
227
    return p[0] | (p[1] << 8);
228
#endif
229
}
230

    
231
static inline int ldsw(void *ptr)
232
{
233
#ifdef __powerpc__
234
    int val;
235
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
236
    return (int16_t)val;
237
#else
238
    uint8_t *p = ptr;
239
    return (int16_t)(p[0] | (p[1] << 8));
240
#endif
241
}
242

    
243
static inline int ldl(void *ptr)
244
{
245
#ifdef __powerpc__
246
    int val;
247
    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
248
    return val;
249
#else
250
    uint8_t *p = ptr;
251
    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
252
#endif
253
}
254

    
255
static inline uint64_t ldq(void *ptr)
256
{
257
    uint8_t *p = ptr;
258
    uint32_t v1, v2;
259
    v1 = ldl(p);
260
    v2 = ldl(p + 4);
261
    return v1 | ((uint64_t)v2 << 32);
262
}
263

    
264
static inline void stw(void *ptr, int v)
265
{
266
#ifdef __powerpc__
267
    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
268
#else
269
    uint8_t *p = ptr;
270
    p[0] = v;
271
    p[1] = v >> 8;
272
#endif
273
}
274

    
275
static inline void stl(void *ptr, int v)
276
{
277
#ifdef __powerpc__
278
    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
279
#else
280
    uint8_t *p = ptr;
281
    p[0] = v;
282
    p[1] = v >> 8;
283
    p[2] = v >> 16;
284
    p[3] = v >> 24;
285
#endif
286
}
287

    
288
static inline void stq(void *ptr, uint64_t v)
289
{
290
    uint8_t *p = ptr;
291
    stl(p, (uint32_t)v);
292
    stl(p + 4, v >> 32);
293
}
294

    
295
/* float access */
296

    
297
static inline float ldfl(void *ptr)
298
{
299
    union {
300
        float f;
301
        uint32_t i;
302
    } u;
303
    u.i = ldl(ptr);
304
    return u.f;
305
}
306

    
307
static inline double ldfq(void *ptr)
308
{
309
    union {
310
        double d;
311
        uint64_t i;
312
    } u;
313
    u.i = ldq(ptr);
314
    return u.d;
315
}
316

    
317
static inline void stfl(void *ptr, float v)
318
{
319
    union {
320
        float f;
321
        uint32_t i;
322
    } u;
323
    u.f = v;
324
    stl(ptr, u.i);
325
}
326

    
327
static inline void stfq(void *ptr, double v)
328
{
329
    union {
330
        double d;
331
        uint64_t i;
332
    } u;
333
    u.d = v;
334
    stq(ptr, u.i);
335
}
336

    
337
#else
338

    
339
static inline int lduw(void *ptr)
340
{
341
    return *(uint16_t *)ptr;
342
}
343

    
344
static inline int ldsw(void *ptr)
345
{
346
    return *(int16_t *)ptr;
347
}
348

    
349
static inline int ldl(void *ptr)
350
{
351
    return *(uint32_t *)ptr;
352
}
353

    
354
static inline uint64_t ldq(void *ptr)
355
{
356
    return *(uint64_t *)ptr;
357
}
358

    
359
static inline void stw(void *ptr, int v)
360
{
361
    *(uint16_t *)ptr = v;
362
}
363

    
364
static inline void stl(void *ptr, int v)
365
{
366
    *(uint32_t *)ptr = v;
367
}
368

    
369
static inline void stq(void *ptr, uint64_t v)
370
{
371
    *(uint64_t *)ptr = v;
372
}
373

    
374
/* float access */
375

    
376
static inline float ldfl(void *ptr)
377
{
378
    return *(float *)ptr;
379
}
380

    
381
static inline double ldfq(void *ptr)
382
{
383
    return *(double *)ptr;
384
}
385

    
386
static inline void stfl(void *ptr, float v)
387
{
388
    *(float *)ptr = v;
389
}
390

    
391
static inline void stfq(void *ptr, double v)
392
{
393
    *(double *)ptr = v;
394
}
395
#endif
396

    
397
#ifndef IN_OP_I386
398
void cpu_x86_outb(int addr, int val);
399
void cpu_x86_outw(int addr, int val);
400
void cpu_x86_outl(int addr, int val);
401
int cpu_x86_inb(int addr);
402
int cpu_x86_inw(int addr);
403
int cpu_x86_inl(int addr);
404
#endif
405

    
406
CPUX86State *cpu_x86_init(void);
407
int cpu_x86_exec(CPUX86State *s);
408
void cpu_x86_interrupt(CPUX86State *s);
409
void cpu_x86_close(CPUX86State *s);
410

    
411
/* needed to load some predefinied segment registers */
412
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
413

    
414
/* you can call this signal handler from your SIGBUS and SIGSEGV
415
   signal handlers to inform the virtual CPU of exceptions. non zero
416
   is returned if the signal was handled by the virtual CPU.  */
417
struct siginfo;
418
int cpu_x86_signal_handler(int host_signum, struct siginfo *info, 
419
                           void *puc);
420

    
421
/* internal functions */
422

    
423
#define GEN_FLAG_CODE32_SHIFT 0
424
#define GEN_FLAG_ADDSEG_SHIFT 1
425
#define GEN_FLAG_SS32_SHIFT   2
426
#define GEN_FLAG_VM_SHIFT     3
427
#define GEN_FLAG_ST_SHIFT     4
428

    
429
int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size, 
430
                     int *gen_code_size_ptr,
431
                     uint8_t *pc_start,  uint8_t *cs_base, int flags);
432
void cpu_x86_tblocks_init(void);
433

    
434
#endif /* CPU_I386_H */