Statistics
| Branch: | Revision:

root / helper-i386.c @ d05e66d2

History | View | Annotate | Download (46.4 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec-i386.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
134
#if 0
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

    
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw(env->tr.base + index);
157
        *ss_ptr = lduw(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl(env->tr.base + index);
160
        *ss_ptr = lduw(env->tr.base + index + 4);
161
    }
162
}
163

    
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
167
{
168
    SegmentCache *dt;
169
    int index;
170
    uint8_t *ptr;
171

    
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl(ptr);
181
    *e2_ptr = ldl(ptr + 4);
182
    return 0;
183
}
184
                                     
185

    
186
/* protected mode interrupt */
187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                   unsigned int next_eip, int is_hw)
189
{
190
    SegmentCache *dt;
191
    uint8_t *ptr, *ssp;
192
    int type, dpl, selector, ss_dpl;
193
    int has_error_code, new_stack, shift;
194
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195
    uint32_t old_cs, old_ss, old_esp, old_eip;
196

    
197
    dt = &env->idt;
198
    if (intno * 8 + 7 > dt->limit)
199
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200
    ptr = dt->base + intno * 8;
201
    e1 = ldl(ptr);
202
    e2 = ldl(ptr + 4);
203
    /* check gate type */
204
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
205
    switch(type) {
206
    case 5: /* task gate */
207
        cpu_abort(env, "task gate not supported");
208
        break;
209
    case 6: /* 286 interrupt gate */
210
    case 7: /* 286 trap gate */
211
    case 14: /* 386 interrupt gate */
212
    case 15: /* 386 trap gate */
213
        break;
214
    default:
215
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
216
        break;
217
    }
218
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219
    /* check privledge if software int */
220
    if (is_int && dpl < env->cpl)
221
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
222
    /* check valid bit */
223
    if (!(e2 & DESC_P_MASK))
224
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
225
    selector = e1 >> 16;
226
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
227
    if ((selector & 0xfffc) == 0)
228
        raise_exception_err(EXCP0D_GPF, 0);
229

    
230
    if (load_segment(&e1, &e2, selector) != 0)
231
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
232
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
233
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
234
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
235
    if (dpl > env->cpl)
236
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
237
    if (!(e2 & DESC_P_MASK))
238
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
239
    if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
240
        /* to inner priviledge */
241
        get_ss_esp_from_tss(&ss, &esp, dpl);
242
        if ((ss & 0xfffc) == 0)
243
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
244
        if ((ss & 3) != dpl)
245
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
246
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
247
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
248
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
249
        if (ss_dpl != dpl)
250
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
251
        if (!(ss_e2 & DESC_S_MASK) ||
252
            (ss_e2 & DESC_CS_MASK) ||
253
            !(ss_e2 & DESC_W_MASK))
254
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255
        if (!(ss_e2 & DESC_P_MASK))
256
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
257
        new_stack = 1;
258
    } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
259
        /* to same priviledge */
260
        new_stack = 0;
261
    } else {
262
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
263
        new_stack = 0; /* avoid warning */
264
    }
265

    
266
    shift = type >> 3;
267
    has_error_code = 0;
268
    if (!is_int && !is_hw) {
269
        switch(intno) {
270
        case 8:
271
        case 10:
272
        case 11:
273
        case 12:
274
        case 13:
275
        case 14:
276
        case 17:
277
            has_error_code = 1;
278
            break;
279
        }
280
    }
281
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
282
    if (env->eflags & VM_MASK)
283
        push_size += 8;
284
    push_size <<= shift;
285

    
286
    /* XXX: check that enough room is available */
287
    if (new_stack) {
288
        old_esp = ESP;
289
        old_ss = env->segs[R_SS].selector;
290
        load_seg(R_SS, ss, env->eip);
291
    } else {
292
        old_esp = 0;
293
        old_ss = 0;
294
        esp = ESP;
295
    }
296
    if (is_int)
297
        old_eip = next_eip;
298
    else
299
        old_eip = env->eip;
300
    old_cs = env->segs[R_CS].selector;
301
    load_seg(R_CS, selector, env->eip);
302
    env->eip = offset;
303
    ESP = esp - push_size;
304
    ssp = env->segs[R_SS].base + esp;
305
    if (shift == 1) {
306
        int old_eflags;
307
        if (env->eflags & VM_MASK) {
308
            ssp -= 4;
309
            stl(ssp, env->segs[R_GS].selector);
310
            ssp -= 4;
311
            stl(ssp, env->segs[R_FS].selector);
312
            ssp -= 4;
313
            stl(ssp, env->segs[R_DS].selector);
314
            ssp -= 4;
315
            stl(ssp, env->segs[R_ES].selector);
316
        }
317
        if (new_stack) {
318
            ssp -= 4;
319
            stl(ssp, old_ss);
320
            ssp -= 4;
321
            stl(ssp, old_esp);
322
        }
323
        ssp -= 4;
324
        old_eflags = compute_eflags();
325
        stl(ssp, old_eflags);
326
        ssp -= 4;
327
        stl(ssp, old_cs);
328
        ssp -= 4;
329
        stl(ssp, old_eip);
330
        if (has_error_code) {
331
            ssp -= 4;
332
            stl(ssp, error_code);
333
        }
334
    } else {
335
        if (new_stack) {
336
            ssp -= 2;
337
            stw(ssp, old_ss);
338
            ssp -= 2;
339
            stw(ssp, old_esp);
340
        }
341
        ssp -= 2;
342
        stw(ssp, compute_eflags());
343
        ssp -= 2;
344
        stw(ssp, old_cs);
345
        ssp -= 2;
346
        stw(ssp, old_eip);
347
        if (has_error_code) {
348
            ssp -= 2;
349
            stw(ssp, error_code);
350
        }
351
    }
352
    
353
    /* interrupt gate clear IF mask */
354
    if ((type & 1) == 0) {
355
        env->eflags &= ~IF_MASK;
356
    }
357
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
358
}
359

    
360
/* real mode interrupt */
361
static void do_interrupt_real(int intno, int is_int, int error_code,
362
                                 unsigned int next_eip)
363
{
364
    SegmentCache *dt;
365
    uint8_t *ptr, *ssp;
366
    int selector;
367
    uint32_t offset, esp;
368
    uint32_t old_cs, old_eip;
369

    
370
    /* real mode (simpler !) */
371
    dt = &env->idt;
372
    if (intno * 4 + 3 > dt->limit)
373
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
374
    ptr = dt->base + intno * 4;
375
    offset = lduw(ptr);
376
    selector = lduw(ptr + 2);
377
    esp = ESP;
378
    ssp = env->segs[R_SS].base;
379
    if (is_int)
380
        old_eip = next_eip;
381
    else
382
        old_eip = env->eip;
383
    old_cs = env->segs[R_CS].selector;
384
    esp -= 2;
385
    stw(ssp + (esp & 0xffff), compute_eflags());
386
    esp -= 2;
387
    stw(ssp + (esp & 0xffff), old_cs);
388
    esp -= 2;
389
    stw(ssp + (esp & 0xffff), old_eip);
390
    
391
    /* update processor state */
392
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
393
    env->eip = offset;
394
    env->segs[R_CS].selector = selector;
395
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
396
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
397
}
398

    
399
/* fake user mode interrupt */
400
void do_interrupt_user(int intno, int is_int, int error_code, 
401
                       unsigned int next_eip)
402
{
403
    SegmentCache *dt;
404
    uint8_t *ptr;
405
    int dpl;
406
    uint32_t e2;
407

    
408
    dt = &env->idt;
409
    ptr = dt->base + (intno * 8);
410
    e2 = ldl(ptr + 4);
411
    
412
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
413
    /* check privledge if software int */
414
    if (is_int && dpl < env->cpl)
415
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
416

    
417
    /* Since we emulate only user space, we cannot do more than
418
       exiting the emulation with the suitable exception and error
419
       code */
420
    if (is_int)
421
        EIP = next_eip;
422
}
423

    
424
/*
425
 * Begin excution of an interruption. is_int is TRUE if coming from
426
 * the int instruction. next_eip is the EIP value AFTER the interrupt
427
 * instruction. It is only relevant if is_int is TRUE.  
428
 */
429
void do_interrupt(int intno, int is_int, int error_code, 
430
                  unsigned int next_eip, int is_hw)
431
{
432
    if (env->cr[0] & CR0_PE_MASK) {
433
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
434
    } else {
435
        do_interrupt_real(intno, is_int, error_code, next_eip);
436
    }
437
}
438

    
439
/*
440
 * Signal an interruption. It is executed in the main CPU loop.
441
 * is_int is TRUE if coming from the int instruction. next_eip is the
442
 * EIP value AFTER the interrupt instruction. It is only relevant if
443
 * is_int is TRUE.  
444
 */
445
void raise_interrupt(int intno, int is_int, int error_code, 
446
                     unsigned int next_eip)
447
{
448
    env->exception_index = intno;
449
    env->error_code = error_code;
450
    env->exception_is_int = is_int;
451
    env->exception_next_eip = next_eip;
452
    cpu_loop_exit();
453
}
454

    
455
/* shortcuts to generate exceptions */
456
void raise_exception_err(int exception_index, int error_code)
457
{
458
    raise_interrupt(exception_index, 0, error_code, 0);
459
}
460

    
461
void raise_exception(int exception_index)
462
{
463
    raise_interrupt(exception_index, 0, 0, 0);
464
}
465

    
466
#ifdef BUGGY_GCC_DIV64
467
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
468
   call it from another function */
469
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
470
{
471
    *q_ptr = num / den;
472
    return num % den;
473
}
474

    
475
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
476
{
477
    *q_ptr = num / den;
478
    return num % den;
479
}
480
#endif
481

    
482
void helper_divl_EAX_T0(uint32_t eip)
483
{
484
    unsigned int den, q, r;
485
    uint64_t num;
486
    
487
    num = EAX | ((uint64_t)EDX << 32);
488
    den = T0;
489
    if (den == 0) {
490
        EIP = eip;
491
        raise_exception(EXCP00_DIVZ);
492
    }
493
#ifdef BUGGY_GCC_DIV64
494
    r = div64(&q, num, den);
495
#else
496
    q = (num / den);
497
    r = (num % den);
498
#endif
499
    EAX = q;
500
    EDX = r;
501
}
502

    
503
void helper_idivl_EAX_T0(uint32_t eip)
504
{
505
    int den, q, r;
506
    int64_t num;
507
    
508
    num = EAX | ((uint64_t)EDX << 32);
509
    den = T0;
510
    if (den == 0) {
511
        EIP = eip;
512
        raise_exception(EXCP00_DIVZ);
513
    }
514
#ifdef BUGGY_GCC_DIV64
515
    r = idiv64(&q, num, den);
516
#else
517
    q = (num / den);
518
    r = (num % den);
519
#endif
520
    EAX = q;
521
    EDX = r;
522
}
523

    
524
void helper_cmpxchg8b(void)
525
{
526
    uint64_t d;
527
    int eflags;
528

    
529
    eflags = cc_table[CC_OP].compute_all();
530
    d = ldq((uint8_t *)A0);
531
    if (d == (((uint64_t)EDX << 32) | EAX)) {
532
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
533
        eflags |= CC_Z;
534
    } else {
535
        EDX = d >> 32;
536
        EAX = d;
537
        eflags &= ~CC_Z;
538
    }
539
    CC_SRC = eflags;
540
}
541

    
542
/* We simulate a pre-MMX pentium as in valgrind */
543
#define CPUID_FP87 (1 << 0)
544
#define CPUID_VME  (1 << 1)
545
#define CPUID_DE   (1 << 2)
546
#define CPUID_PSE  (1 << 3)
547
#define CPUID_TSC  (1 << 4)
548
#define CPUID_MSR  (1 << 5)
549
#define CPUID_PAE  (1 << 6)
550
#define CPUID_MCE  (1 << 7)
551
#define CPUID_CX8  (1 << 8)
552
#define CPUID_APIC (1 << 9)
553
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
554
#define CPUID_MTRR (1 << 12)
555
#define CPUID_PGE  (1 << 13)
556
#define CPUID_MCA  (1 << 14)
557
#define CPUID_CMOV (1 << 15)
558
/* ... */
559
#define CPUID_MMX  (1 << 23)
560
#define CPUID_FXSR (1 << 24)
561
#define CPUID_SSE  (1 << 25)
562
#define CPUID_SSE2 (1 << 26)
563

    
564
void helper_cpuid(void)
565
{
566
    if (EAX == 0) {
567
        EAX = 1; /* max EAX index supported */
568
        EBX = 0x756e6547;
569
        ECX = 0x6c65746e;
570
        EDX = 0x49656e69;
571
    } else if (EAX == 1) {
572
        int family, model, stepping;
573
        /* EAX = 1 info */
574
#if 0
575
        /* pentium 75-200 */
576
        family = 5;
577
        model = 2;
578
        stepping = 11;
579
#else
580
        /* pentium pro */
581
        family = 6;
582
        model = 1;
583
        stepping = 3;
584
#endif
585
        EAX = (family << 8) | (model << 4) | stepping;
586
        EBX = 0;
587
        ECX = 0;
588
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
589
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
590
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
591
    }
592
}
593

    
594
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
595
{
596
    sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
597
    sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
598
    if (e2 & DESC_G_MASK)
599
        sc->limit = (sc->limit << 12) | 0xfff;
600
    sc->flags = e2;
601
}
602

    
603
void helper_lldt_T0(void)
604
{
605
    int selector;
606
    SegmentCache *dt;
607
    uint32_t e1, e2;
608
    int index;
609
    uint8_t *ptr;
610
    
611
    selector = T0 & 0xffff;
612
    if ((selector & 0xfffc) == 0) {
613
        /* XXX: NULL selector case: invalid LDT */
614
        env->ldt.base = NULL;
615
        env->ldt.limit = 0;
616
    } else {
617
        if (selector & 0x4)
618
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
619
        dt = &env->gdt;
620
        index = selector & ~7;
621
        if ((index + 7) > dt->limit)
622
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
623
        ptr = dt->base + index;
624
        e1 = ldl(ptr);
625
        e2 = ldl(ptr + 4);
626
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
627
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
628
        if (!(e2 & DESC_P_MASK))
629
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
630
        load_seg_cache(&env->ldt, e1, e2);
631
    }
632
    env->ldt.selector = selector;
633
}
634

    
635
void helper_ltr_T0(void)
636
{
637
    int selector;
638
    SegmentCache *dt;
639
    uint32_t e1, e2;
640
    int index, type;
641
    uint8_t *ptr;
642
    
643
    selector = T0 & 0xffff;
644
    if ((selector & 0xfffc) == 0) {
645
        /* NULL selector case: invalid LDT */
646
        env->tr.base = NULL;
647
        env->tr.limit = 0;
648
        env->tr.flags = 0;
649
    } else {
650
        if (selector & 0x4)
651
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
652
        dt = &env->gdt;
653
        index = selector & ~7;
654
        if ((index + 7) > dt->limit)
655
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
656
        ptr = dt->base + index;
657
        e1 = ldl(ptr);
658
        e2 = ldl(ptr + 4);
659
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
660
        if ((e2 & DESC_S_MASK) || 
661
            (type != 2 && type != 9))
662
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
663
        if (!(e2 & DESC_P_MASK))
664
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
665
        load_seg_cache(&env->tr, e1, e2);
666
        e2 |= 0x00000200; /* set the busy bit */
667
        stl(ptr + 4, e2);
668
    }
669
    env->tr.selector = selector;
670
}
671

    
672
/* only works if protected mode and not VM86 */
673
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
674
{
675
    SegmentCache *sc;
676
    uint32_t e1, e2;
677
    
678
    sc = &env->segs[seg_reg];
679
    if ((selector & 0xfffc) == 0) {
680
        /* null selector case */
681
        if (seg_reg == R_SS) {
682
            EIP = cur_eip;
683
            raise_exception_err(EXCP0D_GPF, 0);
684
        } else {
685
            /* XXX: each access should trigger an exception */
686
            sc->base = NULL;
687
            sc->limit = 0;
688
            sc->flags = 0;
689
        }
690
    } else {
691
        if (load_segment(&e1, &e2, selector) != 0) {
692
            EIP = cur_eip;
693
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
694
        }
695
        if (!(e2 & DESC_S_MASK) ||
696
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
697
            EIP = cur_eip;
698
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
699
        }
700

    
701
        if (seg_reg == R_SS) {
702
            if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
703
                EIP = cur_eip;
704
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
            }
706
        } else {
707
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
708
                EIP = cur_eip;
709
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710
            }
711
        }
712

    
713
        if (!(e2 & DESC_P_MASK)) {
714
            EIP = cur_eip;
715
            if (seg_reg == R_SS)
716
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
717
            else
718
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
719
        }
720
        load_seg_cache(sc, e1, e2);
721
#if 0
722
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
723
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
724
#endif
725
    }
726
    if (seg_reg == R_CS) {
727
        cpu_x86_set_cpl(env, selector & 3);
728
    }
729
    sc->selector = selector;
730
}
731

    
732
/* protected mode jump */
733
void helper_ljmp_protected_T0_T1(void)
734
{
735
    int new_cs, new_eip;
736
    SegmentCache sc1;
737
    uint32_t e1, e2, cpl, dpl, rpl;
738

    
739
    new_cs = T0;
740
    new_eip = T1;
741
    if ((new_cs & 0xfffc) == 0)
742
        raise_exception_err(EXCP0D_GPF, 0);
743
    if (load_segment(&e1, &e2, new_cs) != 0)
744
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
745
    cpl = env->cpl;
746
    if (e2 & DESC_S_MASK) {
747
        if (!(e2 & DESC_CS_MASK))
748
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
749
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
750
        if (e2 & DESC_CS_MASK) {
751
            /* conforming code segment */
752
            if (dpl > cpl)
753
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
754
        } else {
755
            /* non conforming code segment */
756
            rpl = new_cs & 3;
757
            if (rpl > cpl)
758
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
759
            if (dpl != cpl)
760
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
761
        }
762
        if (!(e2 & DESC_P_MASK))
763
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
764
        load_seg_cache(&sc1, e1, e2);
765
        if (new_eip > sc1.limit)
766
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
767
        env->segs[R_CS].base = sc1.base;
768
        env->segs[R_CS].limit = sc1.limit;
769
        env->segs[R_CS].flags = sc1.flags;
770
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
771
        EIP = new_eip;
772
    } else {
773
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
774
                  new_cs, new_eip);
775
    }
776
}
777

    
778
/* real mode call */
779
void helper_lcall_real_T0_T1(int shift, int next_eip)
780
{
781
    int new_cs, new_eip;
782
    uint32_t esp, esp_mask;
783
    uint8_t *ssp;
784

    
785
    new_cs = T0;
786
    new_eip = T1;
787
    esp = ESP;
788
    esp_mask = 0xffffffff;
789
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
790
        esp_mask = 0xffff;
791
    ssp = env->segs[R_SS].base;
792
    if (shift) {
793
        esp -= 4;
794
        stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
795
        esp -= 4;
796
        stl(ssp + (esp & esp_mask), next_eip);
797
    } else {
798
        esp -= 2;
799
        stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
800
        esp -= 2;
801
        stw(ssp + (esp & esp_mask), next_eip);
802
    }
803

    
804
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
805
        ESP = (ESP & ~0xffff) | (esp & 0xffff);
806
    else
807
        ESP = esp;
808
    env->eip = new_eip;
809
    env->segs[R_CS].selector = new_cs;
810
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
811
}
812

    
813
/* protected mode call */
814
void helper_lcall_protected_T0_T1(int shift, int next_eip)
815
{
816
    int new_cs, new_eip;
817
    SegmentCache sc1;
818
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
819
    uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
820
    uint32_t old_ss, old_esp, val, i;
821
    uint8_t *ssp, *old_ssp;
822
    
823
    new_cs = T0;
824
    new_eip = T1;
825
    if ((new_cs & 0xfffc) == 0)
826
        raise_exception_err(EXCP0D_GPF, 0);
827
    if (load_segment(&e1, &e2, new_cs) != 0)
828
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
829
    cpl = env->cpl;
830
    if (e2 & DESC_S_MASK) {
831
        if (!(e2 & DESC_CS_MASK))
832
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
833
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
834
        if (e2 & DESC_CS_MASK) {
835
            /* conforming code segment */
836
            if (dpl > cpl)
837
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
838
        } else {
839
            /* non conforming code segment */
840
            rpl = new_cs & 3;
841
            if (rpl > cpl)
842
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
843
            if (dpl != cpl)
844
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
845
        }
846
        if (!(e2 & DESC_P_MASK))
847
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
848

    
849
        sp = ESP;
850
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
851
            sp &= 0xffff;
852
        ssp = env->segs[R_SS].base + sp;
853
        if (shift) {
854
            ssp -= 4;
855
            stl(ssp, env->segs[R_CS].selector);
856
            ssp -= 4;
857
            stl(ssp, next_eip);
858
        } else {
859
            ssp -= 2;
860
            stw(ssp, env->segs[R_CS].selector);
861
            ssp -= 2;
862
            stw(ssp, next_eip);
863
        }
864
        sp -= (4 << shift);
865
        
866
        load_seg_cache(&sc1, e1, e2);
867
        if (new_eip > sc1.limit)
868
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
869
        /* from this point, not restartable */
870
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
871
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
872
        else
873
            ESP = sp;
874
        env->segs[R_CS].base = sc1.base;
875
        env->segs[R_CS].limit = sc1.limit;
876
        env->segs[R_CS].flags = sc1.flags;
877
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
878
        EIP = new_eip;
879
    } else {
880
        /* check gate type */
881
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
882
        switch(type) {
883
        case 1: /* available 286 TSS */
884
        case 9: /* available 386 TSS */
885
        case 5: /* task gate */
886
            cpu_abort(env, "task gate not supported");
887
            break;
888
        case 4: /* 286 call gate */
889
        case 12: /* 386 call gate */
890
            break;
891
        default:
892
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
893
            break;
894
        }
895
        shift = type >> 3;
896

    
897
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
898
        rpl = new_cs & 3;
899
        if (dpl < cpl || dpl < rpl)
900
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
901
        /* check valid bit */
902
        if (!(e2 & DESC_P_MASK))
903
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
904
        selector = e1 >> 16;
905
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
906
        if ((selector & 0xfffc) == 0)
907
            raise_exception_err(EXCP0D_GPF, 0);
908

    
909
        if (load_segment(&e1, &e2, selector) != 0)
910
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
912
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
914
        if (dpl > cpl)
915
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916
        if (!(e2 & DESC_P_MASK))
917
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
918

    
919
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
920
            /* to inner priviledge */
921
            get_ss_esp_from_tss(&ss, &sp, dpl);
922
            if ((ss & 0xfffc) == 0)
923
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
924
            if ((ss & 3) != dpl)
925
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
926
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
927
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
929
            if (ss_dpl != dpl)
930
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
931
            if (!(ss_e2 & DESC_S_MASK) ||
932
                (ss_e2 & DESC_CS_MASK) ||
933
                !(ss_e2 & DESC_W_MASK))
934
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935
            if (!(ss_e2 & DESC_P_MASK))
936
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937
            
938
            param_count = e2 & 0x1f;
939
            push_size = ((param_count * 2) + 8) << shift;
940

    
941
            old_esp = ESP;
942
            old_ss = env->segs[R_SS].selector;
943
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
944
                old_esp &= 0xffff;
945
            old_ssp = env->segs[R_SS].base + old_esp;
946
            
947
            /* XXX: from this point not restartable */
948
            load_seg(R_SS, ss, env->eip);
949

    
950
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
951
                sp &= 0xffff;
952
            ssp = env->segs[R_SS].base + sp;
953
            if (shift) {
954
                ssp -= 4;
955
                stl(ssp, old_ss);
956
                ssp -= 4;
957
                stl(ssp, old_esp);
958
                ssp -= 4 * param_count;
959
                for(i = 0; i < param_count; i++) {
960
                    val = ldl(old_ssp + i * 4);
961
                    stl(ssp + i * 4, val);
962
                }
963
            } else {
964
                ssp -= 2;
965
                stw(ssp, old_ss);
966
                ssp -= 2;
967
                stw(ssp, old_esp);
968
                ssp -= 2 * param_count;
969
                for(i = 0; i < param_count; i++) {
970
                    val = lduw(old_ssp + i * 2);
971
                    stw(ssp + i * 2, val);
972
                }
973
            }
974
        } else {
975
            /* to same priviledge */
976
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
977
                sp &= 0xffff;
978
            ssp = env->segs[R_SS].base + sp;
979
            push_size = (4 << shift);
980
        }
981

    
982
        if (shift) {
983
            ssp -= 4;
984
            stl(ssp, env->segs[R_CS].selector);
985
            ssp -= 4;
986
            stl(ssp, next_eip);
987
        } else {
988
            ssp -= 2;
989
            stw(ssp, env->segs[R_CS].selector);
990
            ssp -= 2;
991
            stw(ssp, next_eip);
992
        }
993

    
994
        sp -= push_size;
995
        load_seg(R_CS, selector, env->eip);
996
        /* from this point, not restartable if same priviledge */
997
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
998
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
999
        else
1000
            ESP = sp;
1001
        EIP = offset;
1002
    }
1003
}
1004

    
1005
/* init the segment cache in vm86 mode */
1006
static inline void load_seg_vm(int seg, int selector)
1007
{
1008
    SegmentCache *sc = &env->segs[seg];
1009
    selector &= 0xffff;
1010
    sc->base = (uint8_t *)(selector << 4);
1011
    sc->selector = selector;
1012
    sc->flags = 0;
1013
    sc->limit = 0xffff;
1014
}
1015

    
1016
/* real mode iret */
1017
void helper_iret_real(int shift)
1018
{
1019
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1020
    uint8_t *ssp;
1021
    int eflags_mask;
1022
    
1023
    sp = ESP & 0xffff;
1024
    ssp = env->segs[R_SS].base + sp;
1025
    if (shift == 1) {
1026
        /* 32 bits */
1027
        new_eflags = ldl(ssp + 8);
1028
        new_cs = ldl(ssp + 4) & 0xffff;
1029
        new_eip = ldl(ssp) & 0xffff;
1030
    } else {
1031
        /* 16 bits */
1032
        new_eflags = lduw(ssp + 4);
1033
        new_cs = lduw(ssp + 2);
1034
        new_eip = lduw(ssp);
1035
    }
1036
    new_esp = sp + (6 << shift);
1037
    ESP = (ESP & 0xffff0000) | 
1038
        (new_esp & 0xffff);
1039
    load_seg_vm(R_CS, new_cs);
1040
    env->eip = new_eip;
1041
    eflags_mask = FL_UPDATE_CPL0_MASK;
1042
    if (shift == 0)
1043
        eflags_mask &= 0xffff;
1044
    load_eflags(new_eflags, eflags_mask);
1045
}
1046

    
1047
/* protected mode iret */
1048
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1049
{
1050
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1051
    uint32_t new_es, new_ds, new_fs, new_gs;
1052
    uint32_t e1, e2;
1053
    int cpl, dpl, rpl, eflags_mask;
1054
    uint8_t *ssp;
1055
    
1056
    sp = ESP;
1057
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1058
        sp &= 0xffff;
1059
    ssp = env->segs[R_SS].base + sp;
1060
    if (shift == 1) {
1061
        /* 32 bits */
1062
        if (is_iret)
1063
            new_eflags = ldl(ssp + 8);
1064
        new_cs = ldl(ssp + 4) & 0xffff;
1065
        new_eip = ldl(ssp);
1066
        if (is_iret && (new_eflags & VM_MASK))
1067
            goto return_to_vm86;
1068
    } else {
1069
        /* 16 bits */
1070
        if (is_iret)
1071
            new_eflags = lduw(ssp + 4);
1072
        new_cs = lduw(ssp + 2);
1073
        new_eip = lduw(ssp);
1074
    }
1075
    if ((new_cs & 0xfffc) == 0)
1076
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1077
    if (load_segment(&e1, &e2, new_cs) != 0)
1078
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1079
    if (!(e2 & DESC_S_MASK) ||
1080
        !(e2 & DESC_CS_MASK))
1081
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1082
    cpl = env->cpl;
1083
    rpl = new_cs & 3; 
1084
    if (rpl < cpl)
1085
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1086
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1087
    if (e2 & DESC_CS_MASK) {
1088
        if (dpl > rpl)
1089
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1090
    } else {
1091
        if (dpl != rpl)
1092
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1093
    }
1094
    if (!(e2 & DESC_P_MASK))
1095
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1096
    
1097
    if (rpl == cpl) {
1098
        /* return to same priledge level */
1099
        load_seg(R_CS, new_cs, env->eip);
1100
        new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1101
    } else {
1102
        /* return to different priviledge level */
1103
        ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1104
        if (shift == 1) {
1105
            /* 32 bits */
1106
            new_esp = ldl(ssp);
1107
            new_ss = ldl(ssp + 4) & 0xffff;
1108
        } else {
1109
            /* 16 bits */
1110
            new_esp = lduw(ssp);
1111
            new_ss = lduw(ssp + 2);
1112
        }
1113
        
1114
        if ((new_ss & 3) != rpl)
1115
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1116
        if (load_segment(&e1, &e2, new_ss) != 0)
1117
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1118
        if (!(e2 & DESC_S_MASK) ||
1119
            (e2 & DESC_CS_MASK) ||
1120
            !(e2 & DESC_W_MASK))
1121
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1122
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1123
        if (dpl != rpl)
1124
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1125
        if (!(e2 & DESC_P_MASK))
1126
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1127

    
1128
        load_seg(R_CS, new_cs, env->eip);
1129
        load_seg(R_SS, new_ss, env->eip);
1130
    }
1131
    if (env->segs[R_SS].flags & DESC_B_MASK)
1132
        ESP = new_esp;
1133
    else
1134
        ESP = (ESP & 0xffff0000) | 
1135
            (new_esp & 0xffff);
1136
    env->eip = new_eip;
1137
    if (is_iret) {
1138
        if (cpl == 0)
1139
            eflags_mask = FL_UPDATE_CPL0_MASK;
1140
        else
1141
            eflags_mask = FL_UPDATE_MASK32;
1142
        if (shift == 0)
1143
            eflags_mask &= 0xffff;
1144
        load_eflags(new_eflags, eflags_mask);
1145
    }
1146
    return;
1147

    
1148
 return_to_vm86:
1149
    new_esp = ldl(ssp + 12);
1150
    new_ss = ldl(ssp + 16);
1151
    new_es = ldl(ssp + 20);
1152
    new_ds = ldl(ssp + 24);
1153
    new_fs = ldl(ssp + 28);
1154
    new_gs = ldl(ssp + 32);
1155
    
1156
    /* modify processor state */
1157
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1158
    load_seg_vm(R_CS, new_cs);
1159
    cpu_x86_set_cpl(env, 3);
1160
    load_seg_vm(R_SS, new_ss);
1161
    load_seg_vm(R_ES, new_es);
1162
    load_seg_vm(R_DS, new_ds);
1163
    load_seg_vm(R_FS, new_fs);
1164
    load_seg_vm(R_GS, new_gs);
1165

    
1166
    env->eip = new_eip;
1167
    ESP = new_esp;
1168
}
1169

    
1170
void helper_iret_protected(int shift)
1171
{
1172
    helper_ret_protected(shift, 1, 0);
1173
}
1174

    
1175
void helper_lret_protected(int shift, int addend)
1176
{
1177
    helper_ret_protected(shift, 0, addend);
1178
}
1179

    
1180
void helper_movl_crN_T0(int reg)
1181
{
1182
    env->cr[reg] = T0;
1183
    switch(reg) {
1184
    case 0:
1185
        cpu_x86_update_cr0(env);
1186
        break;
1187
    case 3:
1188
        cpu_x86_update_cr3(env);
1189
        break;
1190
    }
1191
}
1192

    
1193
/* XXX: do more */
1194
void helper_movl_drN_T0(int reg)
1195
{
1196
    env->dr[reg] = T0;
1197
}
1198

    
1199
void helper_invlpg(unsigned int addr)
1200
{
1201
    cpu_x86_flush_tlb(env, addr);
1202
}
1203

    
1204
/* rdtsc */
1205
#ifndef __i386__
1206
uint64_t emu_time;
1207
#endif
1208

    
1209
void helper_rdtsc(void)
1210
{
1211
    uint64_t val;
1212
#ifdef __i386__
1213
    asm("rdtsc" : "=A" (val));
1214
#else
1215
    /* better than nothing: the time increases */
1216
    val = emu_time++;
1217
#endif
1218
    EAX = val;
1219
    EDX = val >> 32;
1220
}
1221

    
1222
void helper_wrmsr(void)
1223
{
1224
    switch(ECX) {
1225
    case MSR_IA32_SYSENTER_CS:
1226
        env->sysenter_cs = EAX & 0xffff;
1227
        break;
1228
    case MSR_IA32_SYSENTER_ESP:
1229
        env->sysenter_esp = EAX;
1230
        break;
1231
    case MSR_IA32_SYSENTER_EIP:
1232
        env->sysenter_eip = EAX;
1233
        break;
1234
    default:
1235
        /* XXX: exception ? */
1236
        break; 
1237
    }
1238
}
1239

    
1240
void helper_rdmsr(void)
1241
{
1242
    switch(ECX) {
1243
    case MSR_IA32_SYSENTER_CS:
1244
        EAX = env->sysenter_cs;
1245
        EDX = 0;
1246
        break;
1247
    case MSR_IA32_SYSENTER_ESP:
1248
        EAX = env->sysenter_esp;
1249
        EDX = 0;
1250
        break;
1251
    case MSR_IA32_SYSENTER_EIP:
1252
        EAX = env->sysenter_eip;
1253
        EDX = 0;
1254
        break;
1255
    default:
1256
        /* XXX: exception ? */
1257
        break; 
1258
    }
1259
}
1260

    
1261
void helper_lsl(void)
1262
{
1263
    unsigned int selector, limit;
1264
    uint32_t e1, e2;
1265

    
1266
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1267
    selector = T0 & 0xffff;
1268
    if (load_segment(&e1, &e2, selector) != 0)
1269
        return;
1270
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1271
    if (e2 & (1 << 23))
1272
        limit = (limit << 12) | 0xfff;
1273
    T1 = limit;
1274
    CC_SRC |= CC_Z;
1275
}
1276

    
1277
void helper_lar(void)
1278
{
1279
    unsigned int selector;
1280
    uint32_t e1, e2;
1281

    
1282
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1283
    selector = T0 & 0xffff;
1284
    if (load_segment(&e1, &e2, selector) != 0)
1285
        return;
1286
    T1 = e2 & 0x00f0ff00;
1287
    CC_SRC |= CC_Z;
1288
}
1289

    
1290
/* FPU helpers */
1291

    
1292
#ifndef USE_X86LDOUBLE
1293
void helper_fldt_ST0_A0(void)
1294
{
1295
    int new_fpstt;
1296
    new_fpstt = (env->fpstt - 1) & 7;
1297
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1298
    env->fpstt = new_fpstt;
1299
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1300
}
1301

    
1302
void helper_fstt_ST0_A0(void)
1303
{
1304
    helper_fstt(ST0, (uint8_t *)A0);
1305
}
1306
#endif
1307

    
1308
/* BCD ops */
1309

    
1310
#define MUL10(iv) ( iv + iv + (iv << 3) )
1311

    
1312
void helper_fbld_ST0_A0(void)
1313
{
1314
    CPU86_LDouble tmp;
1315
    uint64_t val;
1316
    unsigned int v;
1317
    int i;
1318

    
1319
    val = 0;
1320
    for(i = 8; i >= 0; i--) {
1321
        v = ldub((uint8_t *)A0 + i);
1322
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1323
    }
1324
    tmp = val;
1325
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1326
        tmp = -tmp;
1327
    fpush();
1328
    ST0 = tmp;
1329
}
1330

    
1331
void helper_fbst_ST0_A0(void)
1332
{
1333
    CPU86_LDouble tmp;
1334
    int v;
1335
    uint8_t *mem_ref, *mem_end;
1336
    int64_t val;
1337

    
1338
    tmp = rint(ST0);
1339
    val = (int64_t)tmp;
1340
    mem_ref = (uint8_t *)A0;
1341
    mem_end = mem_ref + 9;
1342
    if (val < 0) {
1343
        stb(mem_end, 0x80);
1344
        val = -val;
1345
    } else {
1346
        stb(mem_end, 0x00);
1347
    }
1348
    while (mem_ref < mem_end) {
1349
        if (val == 0)
1350
            break;
1351
        v = val % 100;
1352
        val = val / 100;
1353
        v = ((v / 10) << 4) | (v % 10);
1354
        stb(mem_ref++, v);
1355
    }
1356
    while (mem_ref < mem_end) {
1357
        stb(mem_ref++, 0);
1358
    }
1359
}
1360

    
1361
void helper_f2xm1(void)
1362
{
1363
    ST0 = pow(2.0,ST0) - 1.0;
1364
}
1365

    
1366
void helper_fyl2x(void)
1367
{
1368
    CPU86_LDouble fptemp;
1369
    
1370
    fptemp = ST0;
1371
    if (fptemp>0.0){
1372
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1373
        ST1 *= fptemp;
1374
        fpop();
1375
    } else { 
1376
        env->fpus &= (~0x4700);
1377
        env->fpus |= 0x400;
1378
    }
1379
}
1380

    
1381
void helper_fptan(void)
1382
{
1383
    CPU86_LDouble fptemp;
1384

    
1385
    fptemp = ST0;
1386
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1387
        env->fpus |= 0x400;
1388
    } else {
1389
        ST0 = tan(fptemp);
1390
        fpush();
1391
        ST0 = 1.0;
1392
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1393
        /* the above code is for  |arg| < 2**52 only */
1394
    }
1395
}
1396

    
1397
void helper_fpatan(void)
1398
{
1399
    CPU86_LDouble fptemp, fpsrcop;
1400

    
1401
    fpsrcop = ST1;
1402
    fptemp = ST0;
1403
    ST1 = atan2(fpsrcop,fptemp);
1404
    fpop();
1405
}
1406

    
1407
void helper_fxtract(void)
1408
{
1409
    CPU86_LDoubleU temp;
1410
    unsigned int expdif;
1411

    
1412
    temp.d = ST0;
1413
    expdif = EXPD(temp) - EXPBIAS;
1414
    /*DP exponent bias*/
1415
    ST0 = expdif;
1416
    fpush();
1417
    BIASEXPONENT(temp);
1418
    ST0 = temp.d;
1419
}
1420

    
1421
void helper_fprem1(void)
1422
{
1423
    CPU86_LDouble dblq, fpsrcop, fptemp;
1424
    CPU86_LDoubleU fpsrcop1, fptemp1;
1425
    int expdif;
1426
    int q;
1427

    
1428
    fpsrcop = ST0;
1429
    fptemp = ST1;
1430
    fpsrcop1.d = fpsrcop;
1431
    fptemp1.d = fptemp;
1432
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1433
    if (expdif < 53) {
1434
        dblq = fpsrcop / fptemp;
1435
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1436
        ST0 = fpsrcop - fptemp*dblq;
1437
        q = (int)dblq; /* cutting off top bits is assumed here */
1438
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1439
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1440
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1441
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1442
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1443
    } else {
1444
        env->fpus |= 0x400;  /* C2 <-- 1 */
1445
        fptemp = pow(2.0, expdif-50);
1446
        fpsrcop = (ST0 / ST1) / fptemp;
1447
        /* fpsrcop = integer obtained by rounding to the nearest */
1448
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1449
            floor(fpsrcop): ceil(fpsrcop);
1450
        ST0 -= (ST1 * fpsrcop * fptemp);
1451
    }
1452
}
1453

    
1454
void helper_fprem(void)
1455
{
1456
    CPU86_LDouble dblq, fpsrcop, fptemp;
1457
    CPU86_LDoubleU fpsrcop1, fptemp1;
1458
    int expdif;
1459
    int q;
1460
    
1461
    fpsrcop = ST0;
1462
    fptemp = ST1;
1463
    fpsrcop1.d = fpsrcop;
1464
    fptemp1.d = fptemp;
1465
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1466
    if ( expdif < 53 ) {
1467
        dblq = fpsrcop / fptemp;
1468
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1469
        ST0 = fpsrcop - fptemp*dblq;
1470
        q = (int)dblq; /* cutting off top bits is assumed here */
1471
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1472
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1473
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1474
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1475
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1476
    } else {
1477
        env->fpus |= 0x400;  /* C2 <-- 1 */
1478
        fptemp = pow(2.0, expdif-50);
1479
        fpsrcop = (ST0 / ST1) / fptemp;
1480
        /* fpsrcop = integer obtained by chopping */
1481
        fpsrcop = (fpsrcop < 0.0)?
1482
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
1483
        ST0 -= (ST1 * fpsrcop * fptemp);
1484
    }
1485
}
1486

    
1487
void helper_fyl2xp1(void)
1488
{
1489
    CPU86_LDouble fptemp;
1490

    
1491
    fptemp = ST0;
1492
    if ((fptemp+1.0)>0.0) {
1493
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1494
        ST1 *= fptemp;
1495
        fpop();
1496
    } else { 
1497
        env->fpus &= (~0x4700);
1498
        env->fpus |= 0x400;
1499
    }
1500
}
1501

    
1502
void helper_fsqrt(void)
1503
{
1504
    CPU86_LDouble fptemp;
1505

    
1506
    fptemp = ST0;
1507
    if (fptemp<0.0) { 
1508
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1509
        env->fpus |= 0x400;
1510
    }
1511
    ST0 = sqrt(fptemp);
1512
}
1513

    
1514
void helper_fsincos(void)
1515
{
1516
    CPU86_LDouble fptemp;
1517

    
1518
    fptemp = ST0;
1519
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1520
        env->fpus |= 0x400;
1521
    } else {
1522
        ST0 = sin(fptemp);
1523
        fpush();
1524
        ST0 = cos(fptemp);
1525
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1526
        /* the above code is for  |arg| < 2**63 only */
1527
    }
1528
}
1529

    
1530
void helper_frndint(void)
1531
{
1532
    CPU86_LDouble a;
1533

    
1534
    a = ST0;
1535
#ifdef __arm__
1536
    switch(env->fpuc & RC_MASK) {
1537
    default:
1538
    case RC_NEAR:
1539
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
1540
        break;
1541
    case RC_DOWN:
1542
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1543
        break;
1544
    case RC_UP:
1545
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1546
        break;
1547
    case RC_CHOP:
1548
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1549
        break;
1550
    }
1551
#else
1552
    a = rint(a);
1553
#endif
1554
    ST0 = a;
1555
}
1556

    
1557
void helper_fscale(void)
1558
{
1559
    CPU86_LDouble fpsrcop, fptemp;
1560

    
1561
    fpsrcop = 2.0;
1562
    fptemp = pow(fpsrcop,ST1);
1563
    ST0 *= fptemp;
1564
}
1565

    
1566
void helper_fsin(void)
1567
{
1568
    CPU86_LDouble fptemp;
1569

    
1570
    fptemp = ST0;
1571
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1572
        env->fpus |= 0x400;
1573
    } else {
1574
        ST0 = sin(fptemp);
1575
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1576
        /* the above code is for  |arg| < 2**53 only */
1577
    }
1578
}
1579

    
1580
void helper_fcos(void)
1581
{
1582
    CPU86_LDouble fptemp;
1583

    
1584
    fptemp = ST0;
1585
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1586
        env->fpus |= 0x400;
1587
    } else {
1588
        ST0 = cos(fptemp);
1589
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1590
        /* the above code is for  |arg5 < 2**63 only */
1591
    }
1592
}
1593

    
1594
void helper_fxam_ST0(void)
1595
{
1596
    CPU86_LDoubleU temp;
1597
    int expdif;
1598

    
1599
    temp.d = ST0;
1600

    
1601
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1602
    if (SIGND(temp))
1603
        env->fpus |= 0x200; /* C1 <-- 1 */
1604

    
1605
    expdif = EXPD(temp);
1606
    if (expdif == MAXEXPD) {
1607
        if (MANTD(temp) == 0)
1608
            env->fpus |=  0x500 /*Infinity*/;
1609
        else
1610
            env->fpus |=  0x100 /*NaN*/;
1611
    } else if (expdif == 0) {
1612
        if (MANTD(temp) == 0)
1613
            env->fpus |=  0x4000 /*Zero*/;
1614
        else
1615
            env->fpus |= 0x4400 /*Denormal*/;
1616
    } else {
1617
        env->fpus |= 0x400;
1618
    }
1619
}
1620

    
1621
void helper_fstenv(uint8_t *ptr, int data32)
1622
{
1623
    int fpus, fptag, exp, i;
1624
    uint64_t mant;
1625
    CPU86_LDoubleU tmp;
1626

    
1627
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1628
    fptag = 0;
1629
    for (i=7; i>=0; i--) {
1630
        fptag <<= 2;
1631
        if (env->fptags[i]) {
1632
            fptag |= 3;
1633
        } else {
1634
            tmp.d = env->fpregs[i];
1635
            exp = EXPD(tmp);
1636
            mant = MANTD(tmp);
1637
            if (exp == 0 && mant == 0) {
1638
                /* zero */
1639
                fptag |= 1;
1640
            } else if (exp == 0 || exp == MAXEXPD
1641
#ifdef USE_X86LDOUBLE
1642
                       || (mant & (1LL << 63)) == 0
1643
#endif
1644
                       ) {
1645
                /* NaNs, infinity, denormal */
1646
                fptag |= 2;
1647
            }
1648
        }
1649
    }
1650
    if (data32) {
1651
        /* 32 bit */
1652
        stl(ptr, env->fpuc);
1653
        stl(ptr + 4, fpus);
1654
        stl(ptr + 8, fptag);
1655
        stl(ptr + 12, 0);
1656
        stl(ptr + 16, 0);
1657
        stl(ptr + 20, 0);
1658
        stl(ptr + 24, 0);
1659
    } else {
1660
        /* 16 bit */
1661
        stw(ptr, env->fpuc);
1662
        stw(ptr + 2, fpus);
1663
        stw(ptr + 4, fptag);
1664
        stw(ptr + 6, 0);
1665
        stw(ptr + 8, 0);
1666
        stw(ptr + 10, 0);
1667
        stw(ptr + 12, 0);
1668
    }
1669
}
1670

    
1671
void helper_fldenv(uint8_t *ptr, int data32)
1672
{
1673
    int i, fpus, fptag;
1674

    
1675
    if (data32) {
1676
        env->fpuc = lduw(ptr);
1677
        fpus = lduw(ptr + 4);
1678
        fptag = lduw(ptr + 8);
1679
    }
1680
    else {
1681
        env->fpuc = lduw(ptr);
1682
        fpus = lduw(ptr + 2);
1683
        fptag = lduw(ptr + 4);
1684
    }
1685
    env->fpstt = (fpus >> 11) & 7;
1686
    env->fpus = fpus & ~0x3800;
1687
    for(i = 0;i < 7; i++) {
1688
        env->fptags[i] = ((fptag & 3) == 3);
1689
        fptag >>= 2;
1690
    }
1691
}
1692

    
1693
void helper_fsave(uint8_t *ptr, int data32)
1694
{
1695
    CPU86_LDouble tmp;
1696
    int i;
1697

    
1698
    helper_fstenv(ptr, data32);
1699

    
1700
    ptr += (14 << data32);
1701
    for(i = 0;i < 8; i++) {
1702
        tmp = ST(i);
1703
#ifdef USE_X86LDOUBLE
1704
        *(long double *)ptr = tmp;
1705
#else
1706
        helper_fstt(tmp, ptr);
1707
#endif        
1708
        ptr += 10;
1709
    }
1710

    
1711
    /* fninit */
1712
    env->fpus = 0;
1713
    env->fpstt = 0;
1714
    env->fpuc = 0x37f;
1715
    env->fptags[0] = 1;
1716
    env->fptags[1] = 1;
1717
    env->fptags[2] = 1;
1718
    env->fptags[3] = 1;
1719
    env->fptags[4] = 1;
1720
    env->fptags[5] = 1;
1721
    env->fptags[6] = 1;
1722
    env->fptags[7] = 1;
1723
}
1724

    
1725
void helper_frstor(uint8_t *ptr, int data32)
1726
{
1727
    CPU86_LDouble tmp;
1728
    int i;
1729

    
1730
    helper_fldenv(ptr, data32);
1731
    ptr += (14 << data32);
1732

    
1733
    for(i = 0;i < 8; i++) {
1734
#ifdef USE_X86LDOUBLE
1735
        tmp = *(long double *)ptr;
1736
#else
1737
        tmp = helper_fldt(ptr);
1738
#endif        
1739
        ST(i) = tmp;
1740
        ptr += 10;
1741
    }
1742
}
1743

    
1744
#define SHIFT 0
1745
#include "softmmu_template.h"
1746

    
1747
#define SHIFT 1
1748
#include "softmmu_template.h"
1749

    
1750
#define SHIFT 2
1751
#include "softmmu_template.h"
1752

    
1753
#define SHIFT 3
1754
#include "softmmu_template.h"
1755

    
1756
/* try to fill the TLB and return an exception if error */
1757
void tlb_fill(unsigned long addr, int is_write, void *retaddr)
1758
{
1759
    TranslationBlock *tb;
1760
    int ret;
1761
    unsigned long pc;
1762
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write);
1763
    if (ret) {
1764
        /* now we have a real cpu fault */
1765
        pc = (unsigned long)retaddr;
1766
        tb = tb_find_pc(pc);
1767
        if (tb) {
1768
            /* the PC is inside the translated code. It means that we have
1769
               a virtual CPU fault */
1770
            cpu_restore_state(tb, env, pc);
1771
        }
1772
        raise_exception_err(EXCP0E_PAGE, env->error_code);
1773
    }
1774
}