Revision 14ce26e7

b/target-i386/cpu.h
20 20
#ifndef CPU_I386_H
21 21
#define CPU_I386_H
22 22

  
23
#include "config.h"
24

  
25
#ifdef TARGET_X86_64
26
#define TARGET_LONG_BITS 64
27
#else
23 28
#define TARGET_LONG_BITS 32
29
#endif
24 30

  
25 31
/* target supports implicit self modifying code */
26 32
#define TARGET_HAS_SMC
......
63 69
#define DESC_G_MASK     (1 << 23)
64 70
#define DESC_B_SHIFT    22
65 71
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
72
#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
73
#define DESC_L_MASK     (1 << DESC_L_SHIFT)
66 74
#define DESC_AVL_MASK   (1 << 20)
67 75
#define DESC_P_MASK     (1 << 15)
68 76
#define DESC_DPL_SHIFT  13
......
125 133
#define HF_EM_SHIFT         10
126 134
#define HF_TS_SHIFT         11
127 135
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
136
#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
137
#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
128 138
#define HF_VM_SHIFT         17 /* must be same as eflags */
129 139

  
130 140
#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
......
138 148
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
139 149
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
140 150
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
151
#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
152
#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
141 153

  
142 154
#define CR0_PE_MASK  (1 << 0)
143 155
#define CR0_MP_MASK  (1 << 1)
......
156 168
#define CR4_PSE_MASK  (1 << 4)
157 169
#define CR4_PAE_MASK  (1 << 5)
158 170
#define CR4_PGE_MASK  (1 << 7)
171
#define CR4_PCE_MASK  (1 << 8)
172
#define CR4_OSFXSR_MASK (1 << 9)
173
#define CR4_OSXMMEXCPT_MASK  (1 << 10)
159 174

  
160 175
#define PG_PRESENT_BIT	0
161 176
#define PG_RW_BIT	1
......
193 208
#define MSR_IA32_SYSENTER_ESP           0x175
194 209
#define MSR_IA32_SYSENTER_EIP           0x176
195 210

  
211
#define MSR_EFER                        0xc0000080
212

  
213
#define MSR_EFER_SCE   (1 << 0)
214
#define MSR_EFER_LME   (1 << 8)
215
#define MSR_EFER_LMA   (1 << 10)
216
#define MSR_EFER_NXE   (1 << 11)
217
#define MSR_EFER_FFXSR (1 << 14)
218

  
219
#define MSR_STAR                        0xc0000081
220
#define MSR_LSTAR                       0xc0000082
221
#define MSR_CSTAR                       0xc0000083
222
#define MSR_FMASK                       0xc0000084
223
#define MSR_FSBASE                      0xc0000100
224
#define MSR_GSBASE                      0xc0000101
225
#define MSR_KERNELGSBASE                0xc0000102
226

  
227
/* cpuid_features bits */
228
#define CPUID_FP87 (1 << 0)
229
#define CPUID_VME  (1 << 1)
230
#define CPUID_DE   (1 << 2)
231
#define CPUID_PSE  (1 << 3)
232
#define CPUID_TSC  (1 << 4)
233
#define CPUID_MSR  (1 << 5)
234
#define CPUID_PAE  (1 << 6)
235
#define CPUID_MCE  (1 << 7)
236
#define CPUID_CX8  (1 << 8)
237
#define CPUID_APIC (1 << 9)
238
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
239
#define CPUID_MTRR (1 << 12)
240
#define CPUID_PGE  (1 << 13)
241
#define CPUID_MCA  (1 << 14)
242
#define CPUID_CMOV (1 << 15)
243
/* ... */
244
#define CPUID_MMX  (1 << 23)
245
#define CPUID_FXSR (1 << 24)
246
#define CPUID_SSE  (1 << 25)
247
#define CPUID_SSE2 (1 << 26)
248

  
196 249
#define EXCP00_DIVZ	0
197 250
#define EXCP01_SSTP	1
198 251
#define EXCP02_NMI	2
......
219 272
    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
220 273
    CC_OP_MULW,
221 274
    CC_OP_MULL,
275
    CC_OP_MULQ,
222 276

  
223 277
    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
224 278
    CC_OP_ADDW,
225 279
    CC_OP_ADDL,
280
    CC_OP_ADDQ,
226 281

  
227 282
    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
228 283
    CC_OP_ADCW,
229 284
    CC_OP_ADCL,
285
    CC_OP_ADCQ,
230 286

  
231 287
    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
232 288
    CC_OP_SUBW,
233 289
    CC_OP_SUBL,
290
    CC_OP_SUBQ,
234 291

  
235 292
    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
236 293
    CC_OP_SBBW,
237 294
    CC_OP_SBBL,
295
    CC_OP_SBBQ,
238 296

  
239 297
    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
240 298
    CC_OP_LOGICW,
241 299
    CC_OP_LOGICL,
300
    CC_OP_LOGICQ,
242 301

  
243 302
    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
244 303
    CC_OP_INCW,
245 304
    CC_OP_INCL,
305
    CC_OP_INCQ,
246 306

  
247 307
    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
248 308
    CC_OP_DECW,
249 309
    CC_OP_DECL,
310
    CC_OP_DECQ,
250 311

  
251 312
    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
252 313
    CC_OP_SHLW,
253 314
    CC_OP_SHLL,
315
    CC_OP_SHLQ,
254 316

  
255 317
    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
256 318
    CC_OP_SARW,
257 319
    CC_OP_SARL,
320
    CC_OP_SARQ,
258 321

  
259 322
    CC_OP_NB,
260 323
};
......
271 334

  
272 335
typedef struct SegmentCache {
273 336
    uint32_t selector;
274
    uint8_t *base;
337
    target_ulong base;
275 338
    uint32_t limit;
276 339
    uint32_t flags;
277 340
} SegmentCache;
278 341

  
342
typedef struct {
343
    union {
344
        uint8_t b[16];
345
        uint16_t w[8];
346
        uint32_t l[4];
347
        uint64_t q[2];
348
    } u;
349
} XMMReg;
350

  
351
#ifdef TARGET_X86_64
352
#define CPU_NB_REGS 16
353
#else
354
#define CPU_NB_REGS 8
355
#endif
356

  
279 357
typedef struct CPUX86State {
358
#if TARGET_LONG_BITS > HOST_LONG_BITS
359
    /* temporaries if we cannot store them in host registers */
360
    target_ulong t0, t1, t2;
361
#endif
362

  
280 363
    /* standard registers */
281
    uint32_t regs[8];
282
    uint32_t eip;
283
    uint32_t eflags; /* eflags register. During CPU emulation, CC
364
    target_ulong regs[CPU_NB_REGS];
365
    target_ulong eip;
366
    target_ulong eflags; /* eflags register. During CPU emulation, CC
284 367
                        flags and DF are set to zero because they are
285 368
                        stored elsewhere */
286 369

  
287 370
    /* emulator internal eflags handling */
288
    uint32_t cc_src;
289
    uint32_t cc_dst;
371
    target_ulong cc_src;
372
    target_ulong cc_dst;
290 373
    uint32_t cc_op;
291 374
    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
292 375
    uint32_t hflags; /* hidden flags, see HF_xxx constants */
......
314 397
    SegmentCache gdt; /* only base and limit are used */
315 398
    SegmentCache idt; /* only base and limit are used */
316 399
    
400
    XMMReg xmm_regs[CPU_NB_REGS];
401
    XMMReg xmm_t0;
402

  
317 403
    /* sysenter registers */
318 404
    uint32_t sysenter_cs;
319 405
    uint32_t sysenter_esp;
320 406
    uint32_t sysenter_eip;
407
#ifdef TARGET_X86_64
408
    target_ulong efer;
409
    target_ulong star;
410
    target_ulong lstar;
411
    target_ulong cstar;
412
    target_ulong fmask;
413
    target_ulong kernelgsbase;
414
#endif
321 415

  
322 416
    /* temporary data for USE_CODE_COPY mode */
323 417
#ifdef USE_CODE_COPY
......
333 427
    int exception_is_int;
334 428
    int exception_next_eip;
335 429
    struct TranslationBlock *current_tb; /* currently executing TB */
336
    uint32_t cr[5]; /* NOTE: cr1 is unused */
337
    uint32_t dr[8]; /* debug registers */
430
    target_ulong cr[5]; /* NOTE: cr1 is unused */
431
    target_ulong dr[8]; /* debug registers */
338 432
    int interrupt_request; 
339 433
    int user_mode_only; /* user mode only simulation */
340 434

  
......
346 440
       context) */
347 441
    unsigned long mem_write_pc; /* host pc at which the memory was
348 442
                                   written */
349
    unsigned long mem_write_vaddr; /* target virtual addr at which the
350
                                      memory was written */
443
    target_ulong mem_write_vaddr; /* target virtual addr at which the
444
                                     memory was written */
351 445
    /* 0 = kernel, 1 = user */
352 446
    CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
353 447
    CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
354 448
    
355 449
    /* from this point: preserved by CPU reset */
356 450
    /* ice debug support */
357
    uint32_t breakpoints[MAX_BREAKPOINTS];
451
    target_ulong breakpoints[MAX_BREAKPOINTS];
358 452
    int nb_breakpoints;
359 453
    int singlestep_enabled;
360 454

  
455
    /* processor features (e.g. for CPUID insn) */
456
    uint32_t cpuid_vendor1;
457
    uint32_t cpuid_vendor2;
458
    uint32_t cpuid_vendor3;
459
    uint32_t cpuid_version;
460
    uint32_t cpuid_features;
461

  
462
    /* in order to simplify APIC support, we leave this pointer to the
463
       user */
464
    struct APICState *apic_state;
361 465
    /* user data */
362 466
    void *opaque;
363 467
} CPUX86State;
......
382 486
   cache: it synchronizes the hflags with the segment cache values */
383 487
static inline void cpu_x86_load_seg_cache(CPUX86State *env, 
384 488
                                          int seg_reg, unsigned int selector,
385
                                          uint8_t *base, unsigned int limit, 
489
                                          uint32_t base, unsigned int limit, 
386 490
                                          unsigned int flags)
387 491
{
388 492
    SegmentCache *sc;
......
395 499
    sc->flags = flags;
396 500

  
397 501
    /* update the hidden flags */
398
    new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
399
        >> (DESC_B_SHIFT - HF_CS32_SHIFT);
400
    new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
401
        >> (DESC_B_SHIFT - HF_SS32_SHIFT);
402
    if (!(env->cr[0] & CR0_PE_MASK) || 
403
        (env->eflags & VM_MASK) ||
404
        !(new_hflags & HF_CS32_MASK)) {
405
        /* XXX: try to avoid this test. The problem comes from the
406
           fact that is real mode or vm86 mode we only modify the
407
           'base' and 'selector' fields of the segment cache to go
408
           faster. A solution may be to force addseg to one in
409
           translate-i386.c. */
410
        new_hflags |= HF_ADDSEG_MASK;
411
    } else {
412
        new_hflags |= (((unsigned long)env->segs[R_DS].base | 
413
                        (unsigned long)env->segs[R_ES].base |
414
                        (unsigned long)env->segs[R_SS].base) != 0) << 
415
            HF_ADDSEG_SHIFT;
502
    {
503
        if (seg_reg == R_CS) {
504
#ifdef TARGET_X86_64
505
            if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
506
                /* long mode */
507
                env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
508
                env->hflags &= ~(HF_ADDSEG_MASK);
509
            } else 
510
#endif
511
            {
512
                /* legacy / compatibility case */
513
                new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
514
                    >> (DESC_B_SHIFT - HF_CS32_SHIFT);
515
                env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
516
                    new_hflags;
517
            }
518
        }
519
        new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
520
            >> (DESC_B_SHIFT - HF_SS32_SHIFT);
521
        if (env->hflags & HF_CS64_MASK) {
522
            /* zero base assumed for DS, ES and SS in long mode */
523
        } else if (!(env->cr[0] & CR0_PE_MASK) || 
524
            (env->eflags & VM_MASK) ||
525
            !(new_hflags & HF_CS32_MASK)) {
526
            /* XXX: try to avoid this test. The problem comes from the
527
               fact that is real mode or vm86 mode we only modify the
528
               'base' and 'selector' fields of the segment cache to go
529
               faster. A solution may be to force addseg to one in
530
               translate-i386.c. */
531
            new_hflags |= HF_ADDSEG_MASK;
532
        } else {
533
            new_hflags |= (((unsigned long)env->segs[R_DS].base | 
534
                            (unsigned long)env->segs[R_ES].base |
535
                            (unsigned long)env->segs[R_SS].base) != 0) << 
536
                HF_ADDSEG_SHIFT;
537
        }
538
        env->hflags = (env->hflags & 
539
                       ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
416 540
    }
417
    env->hflags = (env->hflags & 
418
                   ~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
419 541
}
420 542

  
421 543
/* wrapper, just in case memory mappings must be changed */
......
448 570

  
449 571
uint64_t cpu_get_tsc(CPUX86State *env);
450 572

  
573
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
574
uint64_t cpu_get_apic_base(CPUX86State *env);
575

  
451 576
/* will be suppressed */
452 577
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
453 578

  
b/target-i386/exec.h
20 20
#include "config.h"
21 21
#include "dyngen-exec.h"
22 22

  
23
/* XXX: factorize this mess */
24
#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
25
#define HOST_LONG_BITS 64
26
#else
27
#define HOST_LONG_BITS 32
28
#endif
29

  
30
#ifdef TARGET_X86_64
31
#define TARGET_LONG_BITS 64
32
#else
33
#define TARGET_LONG_BITS 32
34
#endif
35

  
23 36
/* at least 4 register variables are defined */
24 37
register struct CPUX86State *env asm(AREG0);
38

  
39
/* XXX: use 64 bit regs if HOST_LONG_BITS == 64 */
40
#if TARGET_LONG_BITS == 32
41

  
25 42
register uint32_t T0 asm(AREG1);
26 43
register uint32_t T1 asm(AREG2);
27 44
register uint32_t T2 asm(AREG3);
28 45

  
29
#define A0 T2
30

  
31 46
/* if more registers are available, we define some registers too */
32 47
#ifdef AREG4
33 48
register uint32_t EAX asm(AREG4);
......
69 84
#define reg_EDI
70 85
#endif
71 86

  
87
#else
88

  
89
/* no registers can be used */
90
#define T0 (env->t0)
91
#define T1 (env->t1)
92
#define T2 (env->t2)
93

  
94
#endif
95

  
96
#define A0 T2
97

  
72 98
extern FILE *logfile;
73 99
extern int loglevel;
74 100

  
......
136 162
void helper_movl_drN_T0(int reg);
137 163
void helper_invlpg(unsigned int addr);
138 164
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
139
void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3);
165
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
140 166
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
141 167
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
142
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, 
168
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 
143 169
                             int is_write, int is_user, int is_softmmu);
144
void tlb_fill(unsigned long addr, int is_write, int is_user, 
170
void tlb_fill(target_ulong addr, int is_write, int is_user, 
145 171
              void *retaddr);
146 172
void __hidden cpu_lock(void);
147 173
void __hidden cpu_unlock(void);
148 174
void do_interrupt(int intno, int is_int, int error_code, 
149
                  unsigned int next_eip, int is_hw);
175
                  target_ulong next_eip, int is_hw);
150 176
void do_interrupt_user(int intno, int is_int, int error_code, 
151
                       unsigned int next_eip);
177
                       target_ulong next_eip);
152 178
void raise_interrupt(int intno, int is_int, int error_code, 
153 179
                     unsigned int next_eip);
154 180
void raise_exception_err(int exception_index, int error_code);
155 181
void raise_exception(int exception_index);
156 182
void __hidden cpu_loop_exit(void);
157
void helper_fsave(uint8_t *ptr, int data32);
158
void helper_frstor(uint8_t *ptr, int data32);
159 183

  
160 184
void OPPROTO op_movl_eflags_T0(void);
161 185
void OPPROTO op_movl_T0_eflags(void);
......
163 187
                     unsigned int next_eip);
164 188
void raise_exception_err(int exception_index, int error_code);
165 189
void raise_exception(int exception_index);
166
void helper_divl_EAX_T0(uint32_t eip);
167
void helper_idivl_EAX_T0(uint32_t eip);
190
void helper_divl_EAX_T0(void);
191
void helper_idivl_EAX_T0(void);
192
void helper_mulq_EAX_T0(void);
193
void helper_imulq_EAX_T0(void);
194
void helper_imulq_T0_T1(void);
195
void helper_divq_EAX_T0(void);
196
void helper_idivq_EAX_T0(void);
168 197
void helper_cmpxchg8b(void);
169 198
void helper_cpuid(void);
170 199
void helper_enter_level(int level, int data32);
171 200
void helper_sysenter(void);
172 201
void helper_sysexit(void);
202
void helper_syscall(void);
203
void helper_sysret(int dflag);
173 204
void helper_rdtsc(void);
174 205
void helper_rdmsr(void);
175 206
void helper_wrmsr(void);
......
252 283
#define stl(p, v) stl_data(p, v)
253 284
#define stq(p, v) stq_data(p, v)
254 285

  
255
static inline double ldfq(void *ptr)
286
static inline double ldfq(target_ulong ptr)
256 287
{
257 288
    union {
258 289
        double d;
......
262 293
    return u.d;
263 294
}
264 295

  
265
static inline void stfq(void *ptr, double v)
296
static inline void stfq(target_ulong ptr, double v)
266 297
{
267 298
    union {
268 299
        double d;
......
272 303
    stq(ptr, u.i);
273 304
}
274 305

  
275
static inline float ldfl(void *ptr)
306
static inline float ldfl(target_ulong ptr)
276 307
{
277 308
    union {
278 309
        float f;
......
282 313
    return u.f;
283 314
}
284 315

  
285
static inline void stfl(void *ptr, float v)
316
static inline void stfl(target_ulong ptr, float v)
286 317
{
287 318
    union {
288 319
        float f;
......
411 442
}
412 443

  
413 444
#ifndef USE_X86LDOUBLE
414
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
445
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
415 446
{
416 447
    CPU86_LDoubleU temp;
417 448
    int upper, e;
......
451 482

  
452 483
#ifdef CONFIG_USER_ONLY
453 484

  
454
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
485
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
455 486
{
456 487
    return *(CPU86_LDouble *)ptr;
457 488
}
458 489

  
459
static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
490
static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
460 491
{
461 492
    *(CPU86_LDouble *)ptr = f;
462 493
}
......
465 496

  
466 497
/* we use memory access macros */
467 498

  
468
static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
499
static inline CPU86_LDouble helper_fldt(target_ulong ptr)
469 500
{
470 501
    CPU86_LDoubleU temp;
471 502

  
......
474 505
    return temp.d;
475 506
}
476 507

  
477
static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
508
static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
478 509
{
479 510
    CPU86_LDoubleU temp;
480 511
    
......
522 553
void helper_fsin(void);
523 554
void helper_fcos(void);
524 555
void helper_fxam_ST0(void);
525
void helper_fstenv(uint8_t *ptr, int data32);
526
void helper_fldenv(uint8_t *ptr, int data32);
527
void helper_fsave(uint8_t *ptr, int data32);
528
void helper_frstor(uint8_t *ptr, int data32);
556
void helper_fstenv(target_ulong ptr, int data32);
557
void helper_fldenv(target_ulong ptr, int data32);
558
void helper_fsave(target_ulong ptr, int data32);
559
void helper_frstor(target_ulong ptr, int data32);
560
void helper_fxsave(target_ulong ptr, int data64);
561
void helper_fxrstor(target_ulong ptr, int data64);
529 562
void restore_native_fp_state(CPUState *env);
530 563
void save_native_fp_state(CPUState *env);
531 564

  
b/target-i386/helper.c
119 119
{
120 120
    SegmentCache *dt;
121 121
    int index;
122
    uint8_t *ptr;
122
    target_ulong ptr;
123 123

  
124 124
    if (selector & 0x4)
125 125
        dt = &env->ldt;
......
143 143
    return limit;
144 144
}
145 145

  
146
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
146
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
147 147
{
148
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
148
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
149 149
}
150 150

  
151 151
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
......
160 160
{
161 161
    selector &= 0xffff;
162 162
    cpu_x86_load_seg_cache(env, seg, selector, 
163
                           (uint8_t *)(selector << 4), 0xffff, 0);
163
                           (selector << 4), 0xffff, 0);
164 164
}
165 165

  
166 166
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
......
258 258
                       uint32_t next_eip)
259 259
{
260 260
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
261
    uint8_t *tss_base;
261
    target_ulong tss_base;
262 262
    uint32_t new_regs[8], new_segs[6];
263 263
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
264 264
    uint32_t old_eflags, eflags_mask;
265 265
    SegmentCache *dt;
266 266
    int index;
267
    uint8_t *ptr;
267
    target_ulong ptr;
268 268

  
269 269
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
270 270
#ifdef DEBUG_PCALL
......
345 345
    
346 346
    /* clear busy bit (it is restartable) */
347 347
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
348
        uint8_t *ptr;
348
        target_ulong ptr;
349 349
        uint32_t e2;
350 350
        ptr = env->gdt.base + (env->tr.selector & ~7);
351 351
        e2 = ldl_kernel(ptr + 4);
......
397 397

  
398 398
    /* set busy bit */
399 399
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
400
        uint8_t *ptr;
400
        target_ulong ptr;
401 401
        uint32_t e2;
402 402
        ptr = env->gdt.base + (tss_selector & ~7);
403 403
        e2 = ldl_kernel(ptr + 4);
......
445 445
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
446 446
        /* first just selectors as the rest may trigger exceptions */
447 447
        for(i = 0; i < 6; i++)
448
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
448
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
449 449
    }
450 450
    
451 451
    env->ldt.selector = new_ldt & ~4;
452
    env->ldt.base = NULL;
452
    env->ldt.base = 0;
453 453
    env->ldt.limit = 0;
454 454
    env->ldt.flags = 0;
455 455

  
......
573 573

  
574 574
#define POPL(ssp, sp, sp_mask, val)\
575 575
{\
576
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
576
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
577 577
    sp += 4;\
578 578
}
579 579

  
......
582 582
                                   unsigned int next_eip, int is_hw)
583 583
{
584 584
    SegmentCache *dt;
585
    uint8_t *ptr, *ssp;
585
    target_ulong ptr, ssp;
586 586
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
587 587
    int has_error_code, new_stack, shift;
588 588
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
......
703 703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704 704
        new_stack = 0; /* avoid warning */
705 705
        sp_mask = 0; /* avoid warning */
706
        ssp = NULL; /* avoid warning */
706
        ssp = 0; /* avoid warning */
707 707
        esp = 0; /* avoid warning */
708 708
    }
709 709

  
......
754 754
    
755 755
    if (new_stack) {
756 756
        if (env->eflags & VM_MASK) {
757
            cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
758
            cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
759
            cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
760
            cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
757
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
758
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
759
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
760
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
761 761
        }
762 762
        ss = (ss & ~3) | dpl;
763 763
        cpu_x86_load_seg_cache(env, R_SS, ss, 
......
780 780
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
781 781
}
782 782

  
783
#ifdef TARGET_X86_64
784

  
785
#define PUSHQ(sp, val)\
786
{\
787
    sp -= 8;\
788
    stq_kernel(sp, (val));\
789
}
790

  
791
#define POPQ(sp, val)\
792
{\
793
    val = ldq_kernel(sp);\
794
    sp += 8;\
795
}
796

  
797
static inline target_ulong get_rsp_from_tss(int level)
798
{
799
    int index;
800
    
801
#if 0
802
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
803
           env->tr.base, env->tr.limit);
804
#endif
805

  
806
    if (!(env->tr.flags & DESC_P_MASK))
807
        cpu_abort(env, "invalid tss");
808
    index = 8 * level + 4;
809
    if ((index + 7) > env->tr.limit)
810
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
811
    return ldq_kernel(env->tr.base + index);
812
}
813

  
814
/* 64 bit interrupt */
815
static void do_interrupt64(int intno, int is_int, int error_code,
816
                           target_ulong next_eip, int is_hw)
817
{
818
    SegmentCache *dt;
819
    target_ulong ptr;
820
    int type, dpl, selector, cpl, ist;
821
    int has_error_code, new_stack;
822
    uint32_t e1, e2, e3, ss;
823
    target_ulong old_eip, esp, offset;
824

  
825
    has_error_code = 0;
826
    if (!is_int && !is_hw) {
827
        switch(intno) {
828
        case 8:
829
        case 10:
830
        case 11:
831
        case 12:
832
        case 13:
833
        case 14:
834
        case 17:
835
            has_error_code = 1;
836
            break;
837
        }
838
    }
839
    if (is_int)
840
        old_eip = next_eip;
841
    else
842
        old_eip = env->eip;
843

  
844
    dt = &env->idt;
845
    if (intno * 16 + 15 > dt->limit)
846
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
847
    ptr = dt->base + intno * 16;
848
    e1 = ldl_kernel(ptr);
849
    e2 = ldl_kernel(ptr + 4);
850
    e3 = ldl_kernel(ptr + 8);
851
    /* check gate type */
852
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
853
    switch(type) {
854
    case 14: /* 386 interrupt gate */
855
    case 15: /* 386 trap gate */
856
        break;
857
    default:
858
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
859
        break;
860
    }
861
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
862
    cpl = env->hflags & HF_CPL_MASK;
863
    /* check privledge if software int */
864
    if (is_int && dpl < cpl)
865
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
866
    /* check valid bit */
867
    if (!(e2 & DESC_P_MASK))
868
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
869
    selector = e1 >> 16;
870
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
871
    ist = e2 & 7;
872
    if ((selector & 0xfffc) == 0)
873
        raise_exception_err(EXCP0D_GPF, 0);
874

  
875
    if (load_segment(&e1, &e2, selector) != 0)
876
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
877
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
878
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
879
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
880
    if (dpl > cpl)
881
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
882
    if (!(e2 & DESC_P_MASK))
883
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
884
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
885
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
886
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
887
        /* to inner priviledge */
888
        if (ist != 0)
889
            esp = get_rsp_from_tss(ist + 3);
890
        else
891
            esp = get_rsp_from_tss(dpl);
892
        ss = 0;
893
        new_stack = 1;
894
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
895
        /* to same priviledge */
896
        if (env->eflags & VM_MASK)
897
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
898
        new_stack = 0;
899
        esp = ESP & ~0xf; /* align stack */
900
        dpl = cpl;
901
    } else {
902
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
903
        new_stack = 0; /* avoid warning */
904
        esp = 0; /* avoid warning */
905
    }
906

  
907
    PUSHQ(esp, env->segs[R_SS].selector);
908
    PUSHQ(esp, ESP);
909
    PUSHQ(esp, compute_eflags());
910
    PUSHQ(esp, env->segs[R_CS].selector);
911
    PUSHQ(esp, old_eip);
912
    if (has_error_code) {
913
        PUSHQ(esp, error_code);
914
    }
915
    
916
    if (new_stack) {
917
        ss = 0 | dpl;
918
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
919
    }
920
    ESP = esp;
921

  
922
    selector = (selector & ~3) | dpl;
923
    cpu_x86_load_seg_cache(env, R_CS, selector, 
924
                   get_seg_base(e1, e2),
925
                   get_seg_limit(e1, e2),
926
                   e2);
927
    cpu_x86_set_cpl(env, dpl);
928
    env->eip = offset;
929

  
930
    /* interrupt gate clear IF mask */
931
    if ((type & 1) == 0) {
932
        env->eflags &= ~IF_MASK;
933
    }
934
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
935
}
936

  
937
void helper_syscall(void)
938
{
939
    int selector;
940

  
941
    if (!(env->efer & MSR_EFER_SCE)) {
942
        raise_exception_err(EXCP06_ILLOP, 0);
943
    }
944
    selector = (env->star >> 32) & 0xffff;
945
    if (env->hflags & HF_LMA_MASK) {
946
        ECX = env->eip;
947
        env->regs[11] = compute_eflags();
948

  
949
        cpu_x86_set_cpl(env, 0);
950
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
951
                           0, 0xffffffff, 
952
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
953
                               DESC_S_MASK |
954
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
955
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
956
                               0, 0xffffffff,
957
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
958
                               DESC_S_MASK |
959
                               DESC_W_MASK | DESC_A_MASK);
960
        env->eflags &= ~env->fmask;
961
        if (env->hflags & HF_CS64_MASK)
962
            env->eip = env->lstar;
963
        else
964
            env->eip = env->cstar;
965
    } else {
966
        ECX = (uint32_t)env->eip;
967
        
968
        cpu_x86_set_cpl(env, 0);
969
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
970
                           0, 0xffffffff, 
971
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
972
                               DESC_S_MASK |
973
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
974
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
975
                               0, 0xffffffff,
976
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
977
                               DESC_S_MASK |
978
                               DESC_W_MASK | DESC_A_MASK);
979
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
980
        env->eip = (uint32_t)env->star;
981
    }
982
}
983

  
984
void helper_sysret(int dflag)
985
{
986
    int cpl, selector;
987

  
988
    cpl = env->hflags & HF_CPL_MASK;
989
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
990
        raise_exception_err(EXCP0D_GPF, 0);
991
    }
992
    selector = (env->star >> 48) & 0xffff;
993
    if (env->hflags & HF_LMA_MASK) {
994
        if (dflag == 2) {
995
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 
996
                                   0, 0xffffffff, 
997
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
998
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
999
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
1000
                                   DESC_L_MASK);
1001
            env->eip = ECX;
1002
        } else {
1003
            cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1004
                                   0, 0xffffffff, 
1005
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1006
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1007
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1008
            env->eip = (uint32_t)ECX;
1009
        }
1010
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1011
                               0, 0xffffffff,
1012
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1013
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1014
                               DESC_W_MASK | DESC_A_MASK);
1015
        load_eflags((uint32_t)(env->regs[11]), 0xffffffff);
1016
        cpu_x86_set_cpl(env, 3);
1017
    } else {
1018
        cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1019
                               0, 0xffffffff, 
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1022
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1023
        env->eip = (uint32_t)ECX;
1024
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1025
                               0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1028
                               DESC_W_MASK | DESC_A_MASK);
1029
        env->eflags |= IF_MASK;
1030
        cpu_x86_set_cpl(env, 3);
1031
    }
1032
}
1033
#endif
1034

  
783 1035
/* real mode interrupt */
784 1036
static void do_interrupt_real(int intno, int is_int, int error_code,
785 1037
                              unsigned int next_eip)
786 1038
{
787 1039
    SegmentCache *dt;
788
    uint8_t *ptr, *ssp;
1040
    target_ulong ptr, ssp;
789 1041
    int selector;
790 1042
    uint32_t offset, esp;
791 1043
    uint32_t old_cs, old_eip;
......
813 1065
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
814 1066
    env->eip = offset;
815 1067
    env->segs[R_CS].selector = selector;
816
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
1068
    env->segs[R_CS].base = (selector << 4);
817 1069
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
818 1070
}
819 1071

  
820 1072
/* fake user mode interrupt */
821 1073
void do_interrupt_user(int intno, int is_int, int error_code, 
822
                       unsigned int next_eip)
1074
                       target_ulong next_eip)
823 1075
{
824 1076
    SegmentCache *dt;
825
    uint8_t *ptr;
1077
    target_ulong ptr;
826 1078
    int dpl, cpl;
827 1079
    uint32_t e2;
828 1080

  
......
849 1101
 * instruction. It is only relevant if is_int is TRUE.  
850 1102
 */
851 1103
void do_interrupt(int intno, int is_int, int error_code, 
852
                  unsigned int next_eip, int is_hw)
1104
                  target_ulong next_eip, int is_hw)
853 1105
{
854 1106
#ifdef DEBUG_PCALL
855 1107
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
856 1108
        if ((env->cr[0] & CR0_PE_MASK)) {
857 1109
            static int count;
858
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
1110
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
859 1111
                    count, intno, error_code, is_int,
860 1112
                    env->hflags & HF_CPL_MASK,
861 1113
                    env->segs[R_CS].selector, EIP,
862 1114
                    (int)env->segs[R_CS].base + EIP,
863 1115
                    env->segs[R_SS].selector, ESP);
864 1116
            if (intno == 0x0e) {
865
                fprintf(logfile, " CR2=%08x", env->cr[2]);
1117
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
866 1118
            } else {
867
                fprintf(logfile, " EAX=%08x", EAX);
1119
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
868 1120
            }
869 1121
            fprintf(logfile, "\n");
870
#if 0
871 1122
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1123
#if 0
872 1124
            {
873 1125
                int i;
874 1126
                uint8_t *ptr;
......
885 1137
    }
886 1138
#endif
887 1139
    if (env->cr[0] & CR0_PE_MASK) {
888
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1140
#if TARGET_X86_64
1141
        if (env->hflags & HF_LMA_MASK) {
1142
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1143
        } else
1144
#endif
1145
        {
1146
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1147
        }
889 1148
    } else {
890 1149
        do_interrupt_real(intno, is_int, error_code, next_eip);
891 1150
    }
......
932 1191
#ifdef BUGGY_GCC_DIV64
933 1192
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
934 1193
   call it from another function */
935
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
1194
uint32_t div32(uint32_t *q_ptr, uint64_t num, uint32_t den)
936 1195
{
937 1196
    *q_ptr = num / den;
938 1197
    return num % den;
939 1198
}
940 1199

  
941
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
1200
int32_t idiv32(int32_t *q_ptr, int64_t num, int32_t den)
942 1201
{
943 1202
    *q_ptr = num / den;
944 1203
    return num % den;
945 1204
}
946 1205
#endif
947 1206

  
948
void helper_divl_EAX_T0(uint32_t eip)
1207
void helper_divl_EAX_T0(void)
949 1208
{
950 1209
    unsigned int den, q, r;
951 1210
    uint64_t num;
......
953 1212
    num = EAX | ((uint64_t)EDX << 32);
954 1213
    den = T0;
955 1214
    if (den == 0) {
956
        EIP = eip;
957 1215
        raise_exception(EXCP00_DIVZ);
958 1216
    }
959 1217
#ifdef BUGGY_GCC_DIV64
960
    r = div64(&q, num, den);
1218
    r = div32(&q, num, den);
961 1219
#else
962 1220
    q = (num / den);
963 1221
    r = (num % den);
964 1222
#endif
965
    EAX = q;
966
    EDX = r;
1223
    EAX = (uint32_t)q;
1224
    EDX = (uint32_t)r;
967 1225
}
968 1226

  
969
void helper_idivl_EAX_T0(uint32_t eip)
1227
void helper_idivl_EAX_T0(void)
970 1228
{
971 1229
    int den, q, r;
972 1230
    int64_t num;
......
974 1232
    num = EAX | ((uint64_t)EDX << 32);
975 1233
    den = T0;
976 1234
    if (den == 0) {
977
        EIP = eip;
978 1235
        raise_exception(EXCP00_DIVZ);
979 1236
    }
980 1237
#ifdef BUGGY_GCC_DIV64
981
    r = idiv64(&q, num, den);
1238
    r = idiv32(&q, num, den);
982 1239
#else
983 1240
    q = (num / den);
984 1241
    r = (num % den);
985 1242
#endif
986
    EAX = q;
987
    EDX = r;
1243
    EAX = (uint32_t)q;
1244
    EDX = (uint32_t)r;
988 1245
}
989 1246

  
990 1247
void helper_cmpxchg8b(void)
......
993 1250
    int eflags;
994 1251

  
995 1252
    eflags = cc_table[CC_OP].compute_all();
996
    d = ldq((uint8_t *)A0);
1253
    d = ldq(A0);
997 1254
    if (d == (((uint64_t)EDX << 32) | EAX)) {
998
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1255
        stq(A0, ((uint64_t)ECX << 32) | EBX);
999 1256
        eflags |= CC_Z;
1000 1257
    } else {
1001 1258
        EDX = d >> 32;
......
1005 1262
    CC_SRC = eflags;
1006 1263
}
1007 1264

  
1008
#define CPUID_FP87 (1 << 0)
1009
#define CPUID_VME  (1 << 1)
1010
#define CPUID_DE   (1 << 2)
1011
#define CPUID_PSE  (1 << 3)
1012
#define CPUID_TSC  (1 << 4)
1013
#define CPUID_MSR  (1 << 5)
1014
#define CPUID_PAE  (1 << 6)
1015
#define CPUID_MCE  (1 << 7)
1016
#define CPUID_CX8  (1 << 8)
1017
#define CPUID_APIC (1 << 9)
1018
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1019
#define CPUID_MTRR (1 << 12)
1020
#define CPUID_PGE  (1 << 13)
1021
#define CPUID_MCA  (1 << 14)
1022
#define CPUID_CMOV (1 << 15)
1023
/* ... */
1024
#define CPUID_MMX  (1 << 23)
1025
#define CPUID_FXSR (1 << 24)
1026
#define CPUID_SSE  (1 << 25)
1027
#define CPUID_SSE2 (1 << 26)
1028

  
1029 1265
void helper_cpuid(void)
1030 1266
{
1031
    switch(EAX) {
1267
    switch((uint32_t)EAX) {
1032 1268
    case 0:
1033 1269
        EAX = 2; /* max EAX index supported */
1034
        EBX = 0x756e6547;
1035
        ECX = 0x6c65746e;
1036
        EDX = 0x49656e69;
1270
        EBX = env->cpuid_vendor1;
1271
        EDX = env->cpuid_vendor2;
1272
        ECX = env->cpuid_vendor3;
1037 1273
        break;
1038 1274
    case 1:
1039
        {
1040
            int family, model, stepping;
1041
            /* EAX = 1 info */
1042
#if 0
1043
            /* pentium 75-200 */
1044
            family = 5;
1045
            model = 2;
1046
            stepping = 11;
1047
#else
1048
            /* pentium pro */
1049
            family = 6;
1050
            model = 1;
1051
            stepping = 3;
1052
#endif
1053
            EAX = (family << 8) | (model << 4) | stepping;
1054
            EBX = 0;
1055
            ECX = 0;
1056
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1057
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1058
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1059
        }
1275
        EAX = env->cpuid_version;
1276
        EBX = 0;
1277
        ECX = 0;
1278
        EDX = env->cpuid_features;
1060 1279
        break;
1061 1280
    default:
1062 1281
        /* cache info: needed for Pentium Pro compatibility */
......
1065 1284
        ECX = 0;
1066 1285
        EDX = 0;
1067 1286
        break;
1287
#ifdef TARGET_X86_64
1288
    case 0x80000000:
1289
        EAX = 0x80000008;
1290
        EBX = env->cpuid_vendor1;
1291
        EDX = env->cpuid_vendor2;
1292
        ECX = env->cpuid_vendor3;
1293
        break;
1294
    case 0x80000001:
1295
        EAX = env->cpuid_features;
1296
        EBX = 0;
1297
        ECX = 0;
1298
        /* long mode + syscall/sysret features */
1299
        EDX = (env->cpuid_features & 0x0183F3FF) | (1 << 29) | (1 << 11);
1300
        break;
1301
    case 0x80000008:
1302
        /* virtual & phys address size in low 2 bytes. */
1303
        EAX = 0x00003028;
1304
        EBX = 0;
1305
        ECX = 0;
1306
        EDX = 0;
1307
        break;
1308
#endif
1068 1309
    }
1069 1310
}
1070 1311

  
1071 1312
void helper_enter_level(int level, int data32)
1072 1313
{
1073
    uint8_t *ssp;
1314
    target_ulong ssp;
1074 1315
    uint32_t esp_mask, esp, ebp;
1075 1316

  
1076 1317
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
......
1105 1346
    int selector;
1106 1347
    SegmentCache *dt;
1107 1348
    uint32_t e1, e2;
1108
    int index;
1109
    uint8_t *ptr;
1349
    int index, entry_limit;
1350
    target_ulong ptr;
1110 1351
    
1111 1352
    selector = T0 & 0xffff;
1112 1353
    if ((selector & 0xfffc) == 0) {
1113 1354
        /* XXX: NULL selector case: invalid LDT */
1114
        env->ldt.base = NULL;
1355
        env->ldt.base = 0;
1115 1356
        env->ldt.limit = 0;
1116 1357
    } else {
1117 1358
        if (selector & 0x4)
1118 1359
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1119 1360
        dt = &env->gdt;
1120 1361
        index = selector & ~7;
1121
        if ((index + 7) > dt->limit)
1362
#ifdef TARGET_X86_64
1363
        if (env->hflags & HF_LMA_MASK)
1364
            entry_limit = 15;
1365
        else
1366
#endif            
1367
            entry_limit = 7;
1368
        if ((index + entry_limit) > dt->limit)
1122 1369
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1123 1370
        ptr = dt->base + index;
1124 1371
        e1 = ldl_kernel(ptr);
......
1127 1374
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1128 1375
        if (!(e2 & DESC_P_MASK))
1129 1376
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1130
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1377
#ifdef TARGET_X86_64
1378
        if (env->hflags & HF_LMA_MASK) {
1379
            uint32_t e3;
1380
            e3 = ldl_kernel(ptr + 8);
1381
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1382
            env->ldt.base |= (target_ulong)e3 << 32;
1383
        } else
1384
#endif
1385
        {
1386
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1387
        }
1131 1388
    }
1132 1389
    env->ldt.selector = selector;
1133 1390
}
......
1137 1394
    int selector;
1138 1395
    SegmentCache *dt;
1139 1396
    uint32_t e1, e2;
1140
    int index, type;
1141
    uint8_t *ptr;
1397
    int index, type, entry_limit;
1398
    target_ulong ptr;
1142 1399
    
1143 1400
    selector = T0 & 0xffff;
1144 1401
    if ((selector & 0xfffc) == 0) {
1145
        /* NULL selector case: invalid LDT */
1146
        env->tr.base = NULL;
1402
        /* NULL selector case: invalid TR */
1403
        env->tr.base = 0;
1147 1404
        env->tr.limit = 0;
1148 1405
        env->tr.flags = 0;
1149 1406
    } else {
......
1151 1408
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1152 1409
        dt = &env->gdt;
1153 1410
        index = selector & ~7;
1154
        if ((index + 7) > dt->limit)
1411
#ifdef TARGET_X86_64
1412
        if (env->hflags & HF_LMA_MASK)
1413
            entry_limit = 15;
1414
        else
1415
#endif            
1416
            entry_limit = 7;
1417
        if ((index + entry_limit) > dt->limit)
1155 1418
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1156 1419
        ptr = dt->base + index;
1157 1420
        e1 = ldl_kernel(ptr);
......
1162 1425
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1163 1426
        if (!(e2 & DESC_P_MASK))
1164 1427
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1165
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1428
#ifdef TARGET_X86_64
1429
        if (env->hflags & HF_LMA_MASK) {
1430
            uint32_t e3;
1431
            e3 = ldl_kernel(ptr + 8);
1432
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1433
            env->tr.base |= (target_ulong)e3 << 32;
1434
        } else 
1435
#endif
1436
        {
1437
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1438
        }
1166 1439
        e2 |= DESC_TSS_BUSY_MASK;
1167 1440
        stl_kernel(ptr + 4, e2);
1168 1441
    }
......
1176 1449
    int cpl, dpl, rpl;
1177 1450
    SegmentCache *dt;
1178 1451
    int index;
1179
    uint8_t *ptr;
1452
    target_ulong ptr;
1180 1453

  
1181 1454
    selector &= 0xffff;
1182 1455
    if ((selector & 0xfffc) == 0) {
1183 1456
        /* null selector case */
1184 1457
        if (seg_reg == R_SS)
1185 1458
            raise_exception_err(EXCP0D_GPF, 0);
1186
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1459
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1187 1460
    } else {
1188 1461
        
1189 1462
        if (selector & 0x4)
......
1196 1469
        ptr = dt->base + index;
1197 1470
        e1 = ldl_kernel(ptr);
1198 1471
        e2 = ldl_kernel(ptr + 4);
1199

  
1472
        
1200 1473
        if (!(e2 & DESC_S_MASK))
1201 1474
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1202 1475
        rpl = selector & 3;
......
1247 1520
/* protected mode jump */
1248 1521
void helper_ljmp_protected_T0_T1(int next_eip)
1249 1522
{
1250
    int new_cs, new_eip, gate_cs, type;
1523
    int new_cs, gate_cs, type;
1251 1524
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1252

  
1525
    target_ulong new_eip;
1526
    
1253 1527
    new_cs = T0;
1254 1528
    new_eip = T1;
1255 1529
    if ((new_cs & 0xfffc) == 0)
......
1312 1586
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1313 1587
                 (DESC_S_MASK | DESC_CS_MASK)))
1314 1588
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1315
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1589
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 
1316 1590
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1317 1591
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1318 1592
            if (!(e2 & DESC_P_MASK))
......
1336 1610
{
1337 1611
    int new_cs, new_eip;
1338 1612
    uint32_t esp, esp_mask;
1339
    uint8_t *ssp;
1613
    target_ulong ssp;
1340 1614

  
1341 1615
    new_cs = T0;
1342 1616
    new_eip = T1;
......
1354 1628
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1355 1629
    env->eip = new_eip;
1356 1630
    env->segs[R_CS].selector = new_cs;
1357
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1631
    env->segs[R_CS].base = (new_cs << 4);
1358 1632
}
1359 1633

  
1360 1634
/* protected mode call */
......
1364 1638
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1365 1639
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1366 1640
    uint32_t val, limit, old_sp_mask;
1367
    uint8_t *ssp, *old_ssp;
1641
    target_ulong ssp, old_ssp;
1368 1642
    
1369 1643
    new_cs = T0;
1370 1644
    new_eip = T1;
......
1471 1745
            get_ss_esp_from_tss(&ss, &sp, dpl);
1472 1746
#ifdef DEBUG_PCALL
1473 1747
            if (loglevel & CPU_LOG_PCALL)
1474
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", 
1748
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 
1475 1749
                        ss, sp, param_count, ESP);
1476 1750
#endif
1477 1751
            if ((ss & 0xfffc) == 0)
......
1555 1829
void helper_iret_real(int shift)
1556 1830
{
1557 1831
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1558
    uint8_t *ssp;
1832
    target_ulong ssp;
1559 1833
    int eflags_mask;
1560 1834

  
1561 1835
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
......
1595 1869
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1596 1870
        /* data or non conforming code segment */
1597 1871
        if (dpl < cpl) {
1598
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1872
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1599 1873
        }
1600 1874
    }
1601 1875
}
......
1603 1877
/* protected mode iret */
1604 1878
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1605 1879
{
1606
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1880
    uint32_t new_cs, new_eflags, new_ss;
1607 1881
    uint32_t new_es, new_ds, new_fs, new_gs;
1608 1882
    uint32_t e1, e2, ss_e1, ss_e2;
1609 1883
    int cpl, dpl, rpl, eflags_mask, iopl;
1610
    uint8_t *ssp;
1884
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1611 1885
    
1612
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1886
#ifdef TARGET_X86_64
1887
    if (shift == 2)
1888
        sp_mask = -1;
1889
    else
1890
#endif
1891
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1613 1892
    sp = ESP;
1614 1893
    ssp = env->segs[R_SS].base;
1615 1894
    new_eflags = 0; /* avoid warning */
1895
#ifdef TARGET_X86_64
1896
    if (shift == 2) {
1897
        POPQ(sp, new_eip);
1898
        POPQ(sp, new_cs);
1899
        new_cs &= 0xffff;
1900
        if (is_iret) {
1901
            POPQ(sp, new_eflags);
1902
        }
1903
    } else
1904
#endif
1616 1905
    if (shift == 1) {
1617 1906
        /* 32 bits */
1618 1907
        POPL(ssp, sp, sp_mask, new_eip);
......
1632 1921
    }
1633 1922
#ifdef DEBUG_PCALL
1634 1923
    if (loglevel & CPU_LOG_PCALL) {
1635
        fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1924
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
1636 1925
                new_cs, new_eip, shift, addend);
1637 1926
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1638 1927
    }
......
1660 1949
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1661 1950
    
1662 1951
    sp += addend;
1663
    if (rpl == cpl) {
1952
    if (rpl == cpl && !(env->hflags & HF_CS64_MASK)) {
1664 1953
        /* return to same priledge level */
1665 1954
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1666 1955
                       get_seg_base(e1, e2),
......
1668 1957
                       e2);
1669 1958
    } else {
1670 1959
        /* return to different priviledge level */
1960
#ifdef TARGET_X86_64
1961
        if (shift == 2) {
1962
            POPQ(sp, new_esp);
1963
            POPQ(sp, new_ss);
1964
            new_ss &= 0xffff;
1965
        } else
1966
#endif
1671 1967
        if (shift == 1) {
1672 1968
            /* 32 bits */
1673 1969
            POPL(ssp, sp, sp_mask, new_esp);
......
1680 1976
        }
1681 1977
#ifdef DEBUG_PCALL
1682 1978
        if (loglevel & CPU_LOG_PCALL) {
1683
            fprintf(logfile, "new ss:esp=%04x:%08x\n",
1979
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
1684 1980
                    new_ss, new_esp);
1685 1981
        }
1686 1982
#endif
1687
        
1688
        if ((new_ss & 3) != rpl)
1689
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1690
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1691
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1692
        if (!(ss_e2 & DESC_S_MASK) ||
1693
            (ss_e2 & DESC_CS_MASK) ||
1694
            !(ss_e2 & DESC_W_MASK))
1695
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1696
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1697
        if (dpl != rpl)
1698
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1699
        if (!(ss_e2 & DESC_P_MASK))
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff