Statistics
| Branch: | Revision:

root / helper-i386.c @ 2c1794c4

History | View | Annotate | Download (45.9 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec-i386.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
134
#if 0
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

    
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw(env->tr.base + index);
157
        *ss_ptr = lduw(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl(env->tr.base + index);
160
        *ss_ptr = lduw(env->tr.base + index + 4);
161
    }
162
}
163

    
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
167
{
168
    SegmentCache *dt;
169
    int index;
170
    uint8_t *ptr;
171

    
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl(ptr);
181
    *e2_ptr = ldl(ptr + 4);
182
    return 0;
183
}
184
                                     
185

    
186
/* protected mode interrupt */
187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                   unsigned int next_eip)
189
{
190
    SegmentCache *dt;
191
    uint8_t *ptr, *ssp;
192
    int type, dpl, cpl, selector, ss_dpl;
193
    int has_error_code, new_stack, shift;
194
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195
    uint32_t old_cs, old_ss, old_esp, old_eip;
196

    
197
    dt = &env->idt;
198
    if (intno * 8 + 7 > dt->limit)
199
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200
    ptr = dt->base + intno * 8;
201
    e1 = ldl(ptr);
202
    e2 = ldl(ptr + 4);
203
    /* check gate type */
204
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
205
    switch(type) {
206
    case 5: /* task gate */
207
        cpu_abort(env, "task gate not supported");
208
        break;
209
    case 6: /* 286 interrupt gate */
210
    case 7: /* 286 trap gate */
211
    case 14: /* 386 interrupt gate */
212
    case 15: /* 386 trap gate */
213
        break;
214
    default:
215
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
216
        break;
217
    }
218
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219
    if (env->eflags & VM_MASK)
220
        cpl = 3;
221
    else
222
        cpl = env->segs[R_CS].selector & 3;
223
    /* check privledge if software int */
224
    if (is_int && dpl < cpl)
225
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
226
    /* check valid bit */
227
    if (!(e2 & DESC_P_MASK))
228
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
229
    selector = e1 >> 16;
230
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
231
    if ((selector & 0xfffc) == 0)
232
        raise_exception_err(EXCP0D_GPF, 0);
233

    
234
    if (load_segment(&e1, &e2, selector) != 0)
235
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
236
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
237
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
239
    if (dpl > cpl)
240
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
241
    if (!(e2 & DESC_P_MASK))
242
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
243
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
244
        /* to inner priviledge */
245
        get_ss_esp_from_tss(&ss, &esp, dpl);
246
        if ((ss & 0xfffc) == 0)
247
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
248
        if ((ss & 3) != dpl)
249
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
250
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
251
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
253
        if (ss_dpl != dpl)
254
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255
        if (!(ss_e2 & DESC_S_MASK) ||
256
            (ss_e2 & DESC_CS_MASK) ||
257
            !(ss_e2 & DESC_W_MASK))
258
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
259
        if (!(ss_e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
261
        new_stack = 1;
262
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
263
        /* to same priviledge */
264
        new_stack = 0;
265
    } else {
266
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
267
        new_stack = 0; /* avoid warning */
268
    }
269

    
270
    shift = type >> 3;
271
    has_error_code = 0;
272
    if (!is_int) {
273
        switch(intno) {
274
        case 8:
275
        case 10:
276
        case 11:
277
        case 12:
278
        case 13:
279
        case 14:
280
        case 17:
281
            has_error_code = 1;
282
            break;
283
        }
284
    }
285
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
286
    if (env->eflags & VM_MASK)
287
        push_size += 8;
288
    push_size <<= shift;
289

    
290
    /* XXX: check that enough room is available */
291
    if (new_stack) {
292
        old_esp = env->regs[R_ESP];
293
        old_ss = env->segs[R_SS].selector;
294
        load_seg(R_SS, ss, env->eip);
295
    } else {
296
        old_esp = 0;
297
        old_ss = 0;
298
        esp = env->regs[R_ESP];
299
    }
300
    if (is_int)
301
        old_eip = next_eip;
302
    else
303
        old_eip = env->eip;
304
    old_cs = env->segs[R_CS].selector;
305
    load_seg(R_CS, selector, env->eip);
306
    env->eip = offset;
307
    env->regs[R_ESP] = esp - push_size;
308
    ssp = env->segs[R_SS].base + esp;
309
    if (shift == 1) {
310
        int old_eflags;
311
        if (env->eflags & VM_MASK) {
312
            ssp -= 4;
313
            stl(ssp, env->segs[R_GS].selector);
314
            ssp -= 4;
315
            stl(ssp, env->segs[R_FS].selector);
316
            ssp -= 4;
317
            stl(ssp, env->segs[R_DS].selector);
318
            ssp -= 4;
319
            stl(ssp, env->segs[R_ES].selector);
320
        }
321
        if (new_stack) {
322
            ssp -= 4;
323
            stl(ssp, old_ss);
324
            ssp -= 4;
325
            stl(ssp, old_esp);
326
        }
327
        ssp -= 4;
328
        old_eflags = compute_eflags();
329
        stl(ssp, old_eflags);
330
        ssp -= 4;
331
        stl(ssp, old_cs);
332
        ssp -= 4;
333
        stl(ssp, old_eip);
334
        if (has_error_code) {
335
            ssp -= 4;
336
            stl(ssp, error_code);
337
        }
338
    } else {
339
        if (new_stack) {
340
            ssp -= 2;
341
            stw(ssp, old_ss);
342
            ssp -= 2;
343
            stw(ssp, old_esp);
344
        }
345
        ssp -= 2;
346
        stw(ssp, compute_eflags());
347
        ssp -= 2;
348
        stw(ssp, old_cs);
349
        ssp -= 2;
350
        stw(ssp, old_eip);
351
        if (has_error_code) {
352
            ssp -= 2;
353
            stw(ssp, error_code);
354
        }
355
    }
356
    
357
    /* interrupt gate clear IF mask */
358
    if ((type & 1) == 0) {
359
        env->eflags &= ~IF_MASK;
360
    }
361
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
362
}
363

    
364
/* real mode interrupt */
365
static void do_interrupt_real(int intno, int is_int, int error_code,
366
                                 unsigned int next_eip)
367
{
368
    SegmentCache *dt;
369
    uint8_t *ptr, *ssp;
370
    int selector;
371
    uint32_t offset, esp;
372
    uint32_t old_cs, old_eip;
373

    
374
    /* real mode (simpler !) */
375
    dt = &env->idt;
376
    if (intno * 4 + 3 > dt->limit)
377
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
378
    ptr = dt->base + intno * 4;
379
    offset = lduw(ptr);
380
    selector = lduw(ptr + 2);
381
    esp = env->regs[R_ESP];
382
    ssp = env->segs[R_SS].base;
383
    if (is_int)
384
        old_eip = next_eip;
385
    else
386
        old_eip = env->eip;
387
    old_cs = env->segs[R_CS].selector;
388
    esp -= 2;
389
    stw(ssp + (esp & 0xffff), compute_eflags());
390
    esp -= 2;
391
    stw(ssp + (esp & 0xffff), old_cs);
392
    esp -= 2;
393
    stw(ssp + (esp & 0xffff), old_eip);
394
    
395
    /* update processor state */
396
    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
397
    env->eip = offset;
398
    env->segs[R_CS].selector = selector;
399
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
400
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
401
}
402

    
403
/* fake user mode interrupt */
404
void do_interrupt_user(int intno, int is_int, int error_code, 
405
                       unsigned int next_eip)
406
{
407
    SegmentCache *dt;
408
    uint8_t *ptr;
409
    int dpl, cpl;
410
    uint32_t e2;
411

    
412
    dt = &env->idt;
413
    ptr = dt->base + (intno * 8);
414
    e2 = ldl(ptr + 4);
415
    
416
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
417
    cpl = 3;
418
    /* check privledge if software int */
419
    if (is_int && dpl < cpl)
420
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
421

    
422
    /* Since we emulate only user space, we cannot do more than
423
       exiting the emulation with the suitable exception and error
424
       code */
425
    if (is_int)
426
        EIP = next_eip;
427
}
428

    
429
/*
430
 * Begin excution of an interruption. is_int is TRUE if coming from
431
 * the int instruction. next_eip is the EIP value AFTER the interrupt
432
 * instruction. It is only relevant if is_int is TRUE.  
433
 */
434
void do_interrupt(int intno, int is_int, int error_code, 
435
                  unsigned int next_eip)
436
{
437
    if (env->cr[0] & CR0_PE_MASK) {
438
        do_interrupt_protected(intno, is_int, error_code, next_eip);
439
    } else {
440
        do_interrupt_real(intno, is_int, error_code, next_eip);
441
    }
442
}
443

    
444
/*
445
 * Signal an interruption. It is executed in the main CPU loop.
446
 * is_int is TRUE if coming from the int instruction. next_eip is the
447
 * EIP value AFTER the interrupt instruction. It is only relevant if
448
 * is_int is TRUE.  
449
 */
450
void raise_interrupt(int intno, int is_int, int error_code, 
451
                     unsigned int next_eip)
452
{
453
    env->exception_index = intno;
454
    env->error_code = error_code;
455
    env->exception_is_int = is_int;
456
    env->exception_next_eip = next_eip;
457
    cpu_loop_exit();
458
}
459

    
460
/* shortcuts to generate exceptions */
461
void raise_exception_err(int exception_index, int error_code)
462
{
463
    raise_interrupt(exception_index, 0, error_code, 0);
464
}
465

    
466
void raise_exception(int exception_index)
467
{
468
    raise_interrupt(exception_index, 0, 0, 0);
469
}
470

    
471
#ifdef BUGGY_GCC_DIV64
472
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
473
   call it from another function */
474
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
475
{
476
    *q_ptr = num / den;
477
    return num % den;
478
}
479

    
480
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
481
{
482
    *q_ptr = num / den;
483
    return num % den;
484
}
485
#endif
486

    
487
void helper_divl_EAX_T0(uint32_t eip)
488
{
489
    unsigned int den, q, r;
490
    uint64_t num;
491
    
492
    num = EAX | ((uint64_t)EDX << 32);
493
    den = T0;
494
    if (den == 0) {
495
        EIP = eip;
496
        raise_exception(EXCP00_DIVZ);
497
    }
498
#ifdef BUGGY_GCC_DIV64
499
    r = div64(&q, num, den);
500
#else
501
    q = (num / den);
502
    r = (num % den);
503
#endif
504
    EAX = q;
505
    EDX = r;
506
}
507

    
508
void helper_idivl_EAX_T0(uint32_t eip)
509
{
510
    int den, q, r;
511
    int64_t num;
512
    
513
    num = EAX | ((uint64_t)EDX << 32);
514
    den = T0;
515
    if (den == 0) {
516
        EIP = eip;
517
        raise_exception(EXCP00_DIVZ);
518
    }
519
#ifdef BUGGY_GCC_DIV64
520
    r = idiv64(&q, num, den);
521
#else
522
    q = (num / den);
523
    r = (num % den);
524
#endif
525
    EAX = q;
526
    EDX = r;
527
}
528

    
529
void helper_cmpxchg8b(void)
530
{
531
    uint64_t d;
532
    int eflags;
533

    
534
    eflags = cc_table[CC_OP].compute_all();
535
    d = ldq((uint8_t *)A0);
536
    if (d == (((uint64_t)EDX << 32) | EAX)) {
537
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
538
        eflags |= CC_Z;
539
    } else {
540
        EDX = d >> 32;
541
        EAX = d;
542
        eflags &= ~CC_Z;
543
    }
544
    CC_SRC = eflags;
545
}
546

    
547
/* We simulate a pre-MMX pentium as in valgrind */
548
#define CPUID_FP87 (1 << 0)
549
#define CPUID_VME  (1 << 1)
550
#define CPUID_DE   (1 << 2)
551
#define CPUID_PSE  (1 << 3)
552
#define CPUID_TSC  (1 << 4)
553
#define CPUID_MSR  (1 << 5)
554
#define CPUID_PAE  (1 << 6)
555
#define CPUID_MCE  (1 << 7)
556
#define CPUID_CX8  (1 << 8)
557
#define CPUID_APIC (1 << 9)
558
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
559
#define CPUID_MTRR (1 << 12)
560
#define CPUID_PGE  (1 << 13)
561
#define CPUID_MCA  (1 << 14)
562
#define CPUID_CMOV (1 << 15)
563
/* ... */
564
#define CPUID_MMX  (1 << 23)
565
#define CPUID_FXSR (1 << 24)
566
#define CPUID_SSE  (1 << 25)
567
#define CPUID_SSE2 (1 << 26)
568

    
569
void helper_cpuid(void)
570
{
571
    if (EAX == 0) {
572
        EAX = 1; /* max EAX index supported */
573
        EBX = 0x756e6547;
574
        ECX = 0x6c65746e;
575
        EDX = 0x49656e69;
576
    } else if (EAX == 1) {
577
        int family, model, stepping;
578
        /* EAX = 1 info */
579
#if 0
580
        /* pentium 75-200 */
581
        family = 5;
582
        model = 2;
583
        stepping = 11;
584
#else
585
        /* pentium pro */
586
        family = 6;
587
        model = 1;
588
        stepping = 3;
589
#endif
590
        EAX = (family << 8) | (model << 4) | stepping;
591
        EBX = 0;
592
        ECX = 0;
593
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
594
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
595
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
596
    }
597
}
598

    
599
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
600
{
601
    sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
602
    sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
603
    if (e2 & DESC_G_MASK)
604
        sc->limit = (sc->limit << 12) | 0xfff;
605
    sc->flags = e2;
606
}
607

    
608
void helper_lldt_T0(void)
609
{
610
    int selector;
611
    SegmentCache *dt;
612
    uint32_t e1, e2;
613
    int index;
614
    uint8_t *ptr;
615
    
616
    selector = T0 & 0xffff;
617
    if ((selector & 0xfffc) == 0) {
618
        /* XXX: NULL selector case: invalid LDT */
619
        env->ldt.base = NULL;
620
        env->ldt.limit = 0;
621
    } else {
622
        if (selector & 0x4)
623
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
624
        dt = &env->gdt;
625
        index = selector & ~7;
626
        if ((index + 7) > dt->limit)
627
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
628
        ptr = dt->base + index;
629
        e1 = ldl(ptr);
630
        e2 = ldl(ptr + 4);
631
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
632
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
633
        if (!(e2 & DESC_P_MASK))
634
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
635
        load_seg_cache(&env->ldt, e1, e2);
636
    }
637
    env->ldt.selector = selector;
638
}
639

    
640
void helper_ltr_T0(void)
641
{
642
    int selector;
643
    SegmentCache *dt;
644
    uint32_t e1, e2;
645
    int index, type;
646
    uint8_t *ptr;
647
    
648
    selector = T0 & 0xffff;
649
    if ((selector & 0xfffc) == 0) {
650
        /* NULL selector case: invalid LDT */
651
        env->tr.base = NULL;
652
        env->tr.limit = 0;
653
        env->tr.flags = 0;
654
    } else {
655
        if (selector & 0x4)
656
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
657
        dt = &env->gdt;
658
        index = selector & ~7;
659
        if ((index + 7) > dt->limit)
660
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
661
        ptr = dt->base + index;
662
        e1 = ldl(ptr);
663
        e2 = ldl(ptr + 4);
664
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
665
        if ((e2 & DESC_S_MASK) || 
666
            (type != 2 && type != 9))
667
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
668
        if (!(e2 & DESC_P_MASK))
669
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
670
        load_seg_cache(&env->tr, e1, e2);
671
        e2 |= 0x00000200; /* set the busy bit */
672
        stl(ptr + 4, e2);
673
    }
674
    env->tr.selector = selector;
675
}
676

    
677
/* only works if protected mode and not VM86 */
678
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
679
{
680
    SegmentCache *sc;
681
    uint32_t e1, e2;
682
    
683
    sc = &env->segs[seg_reg];
684
    if ((selector & 0xfffc) == 0) {
685
        /* null selector case */
686
        if (seg_reg == R_SS) {
687
            EIP = cur_eip;
688
            raise_exception_err(EXCP0D_GPF, 0);
689
        } else {
690
            /* XXX: each access should trigger an exception */
691
            sc->base = NULL;
692
            sc->limit = 0;
693
            sc->flags = 0;
694
        }
695
    } else {
696
        if (load_segment(&e1, &e2, selector) != 0) {
697
            EIP = cur_eip;
698
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
699
        }
700
        if (!(e2 & DESC_S_MASK) ||
701
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
702
            EIP = cur_eip;
703
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
        }
705

    
706
        if (seg_reg == R_SS) {
707
            if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
708
                EIP = cur_eip;
709
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710
            }
711
        } else {
712
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
713
                EIP = cur_eip;
714
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
715
            }
716
        }
717

    
718
        if (!(e2 & DESC_P_MASK)) {
719
            EIP = cur_eip;
720
            if (seg_reg == R_SS)
721
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
722
            else
723
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
724
        }
725
        load_seg_cache(sc, e1, e2);
726
#if 0
727
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
728
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
729
#endif
730
    }
731
    sc->selector = selector;
732
}
733

    
734
/* protected mode jump */
735
void helper_ljmp_protected_T0_T1(void)
736
{
737
    int new_cs, new_eip;
738
    SegmentCache sc1;
739
    uint32_t e1, e2, cpl, dpl, rpl;
740

    
741
    new_cs = T0;
742
    new_eip = T1;
743
    if ((new_cs & 0xfffc) == 0)
744
        raise_exception_err(EXCP0D_GPF, 0);
745
    if (load_segment(&e1, &e2, new_cs) != 0)
746
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
747
    cpl = env->segs[R_CS].selector & 3;
748
    if (e2 & DESC_S_MASK) {
749
        if (!(e2 & DESC_CS_MASK))
750
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
751
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752
        if (e2 & DESC_CS_MASK) {
753
            /* conforming code segment */
754
            if (dpl > cpl)
755
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
756
        } else {
757
            /* non conforming code segment */
758
            rpl = new_cs & 3;
759
            if (rpl > cpl)
760
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
761
            if (dpl != cpl)
762
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
763
        }
764
        if (!(e2 & DESC_P_MASK))
765
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
766
        load_seg_cache(&sc1, e1, e2);
767
        if (new_eip > sc1.limit)
768
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769
        env->segs[R_CS].base = sc1.base;
770
        env->segs[R_CS].limit = sc1.limit;
771
        env->segs[R_CS].flags = sc1.flags;
772
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
773
        EIP = new_eip;
774
    } else {
775
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
776
                  new_cs, new_eip);
777
    }
778
}
779

    
780
/* real mode call */
781
void helper_lcall_real_T0_T1(int shift, int next_eip)
782
{
783
    int new_cs, new_eip;
784
    uint32_t esp, esp_mask;
785
    uint8_t *ssp;
786
    
787
    new_cs = T0;
788
    new_eip = T1;
789
    esp = env->regs[R_ESP];
790
    esp_mask = 0xffffffff;
791
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
792
        esp_mask = 0xffff;
793
    ssp = env->segs[R_SS].base;
794
    if (shift) {
795
        esp -= 4;
796
        stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
797
        esp -= 4;
798
        stl(ssp + (esp & esp_mask), next_eip);
799
    } else {
800
        esp -= 2;
801
        stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
802
        esp -= 2;
803
        stw(ssp + (esp & esp_mask), next_eip);
804
    }
805

    
806
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
807
        env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
808
    else
809
        env->regs[R_ESP] = esp;
810
    env->eip = new_eip;
811
    env->segs[R_CS].selector = new_cs;
812
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
813
}
814

    
815
/* protected mode call */
816
void helper_lcall_protected_T0_T1(int shift, int next_eip)
817
{
818
    int new_cs, new_eip;
819
    SegmentCache sc1;
820
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
821
    uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
822
    uint32_t old_ss, old_esp, val, i;
823
    uint8_t *ssp, *old_ssp;
824
    
825
    new_cs = T0;
826
    new_eip = T1;
827
    if ((new_cs & 0xfffc) == 0)
828
        raise_exception_err(EXCP0D_GPF, 0);
829
    if (load_segment(&e1, &e2, new_cs) != 0)
830
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
831
    cpl = env->segs[R_CS].selector & 3;
832
    if (e2 & DESC_S_MASK) {
833
        if (!(e2 & DESC_CS_MASK))
834
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
835
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
836
        if (e2 & DESC_CS_MASK) {
837
            /* conforming code segment */
838
            if (dpl > cpl)
839
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
840
        } else {
841
            /* non conforming code segment */
842
            rpl = new_cs & 3;
843
            if (rpl > cpl)
844
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
845
            if (dpl != cpl)
846
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
847
        }
848
        if (!(e2 & DESC_P_MASK))
849
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
850

    
851
        sp = env->regs[R_ESP];
852
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
853
            sp &= 0xffff;
854
        ssp = env->segs[R_SS].base + sp;
855
        if (shift) {
856
            ssp -= 4;
857
            stl(ssp, env->segs[R_CS].selector);
858
            ssp -= 4;
859
            stl(ssp, next_eip);
860
        } else {
861
            ssp -= 2;
862
            stw(ssp, env->segs[R_CS].selector);
863
            ssp -= 2;
864
            stw(ssp, next_eip);
865
        }
866
        sp -= (4 << shift);
867
        
868
        load_seg_cache(&sc1, e1, e2);
869
        if (new_eip > sc1.limit)
870
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
871
        /* from this point, not restartable */
872
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
873
            env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
874
        else
875
            env->regs[R_ESP] = sp;
876
        env->segs[R_CS].base = sc1.base;
877
        env->segs[R_CS].limit = sc1.limit;
878
        env->segs[R_CS].flags = sc1.flags;
879
        env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
880
        EIP = new_eip;
881
    } else {
882
        /* check gate type */
883
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
        switch(type) {
885
        case 1: /* available 286 TSS */
886
        case 9: /* available 386 TSS */
887
        case 5: /* task gate */
888
            cpu_abort(env, "task gate not supported");
889
            break;
890
        case 4: /* 286 call gate */
891
        case 12: /* 386 call gate */
892
            break;
893
        default:
894
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
895
            break;
896
        }
897
        shift = type >> 3;
898

    
899
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
900
        rpl = new_cs & 3;
901
        if (dpl < cpl || dpl < rpl)
902
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
903
        /* check valid bit */
904
        if (!(e2 & DESC_P_MASK))
905
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
906
        selector = e1 >> 16;
907
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
908
        if ((selector & 0xfffc) == 0)
909
            raise_exception_err(EXCP0D_GPF, 0);
910

    
911
        if (load_segment(&e1, &e2, selector) != 0)
912
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
914
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
915
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916
        if (dpl > cpl)
917
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
918
        if (!(e2 & DESC_P_MASK))
919
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
920

    
921
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
922
            /* to inner priviledge */
923
            get_ss_esp_from_tss(&ss, &sp, dpl);
924
            if ((ss & 0xfffc) == 0)
925
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
926
            if ((ss & 3) != dpl)
927
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
929
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
931
            if (ss_dpl != dpl)
932
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933
            if (!(ss_e2 & DESC_S_MASK) ||
934
                (ss_e2 & DESC_CS_MASK) ||
935
                !(ss_e2 & DESC_W_MASK))
936
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937
            if (!(ss_e2 & DESC_P_MASK))
938
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
939
            
940
            param_count = e2 & 0x1f;
941
            push_size = ((param_count * 2) + 8) << shift;
942

    
943
            old_esp = env->regs[R_ESP];
944
            old_ss = env->segs[R_SS].selector;
945
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
946
                old_esp &= 0xffff;
947
            old_ssp = env->segs[R_SS].base + old_esp;
948
            
949
            /* XXX: from this point not restartable */
950
            load_seg(R_SS, ss, env->eip);
951

    
952
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
953
                sp &= 0xffff;
954
            ssp = env->segs[R_SS].base + sp;
955
            if (shift) {
956
                ssp -= 4;
957
                stl(ssp, old_ss);
958
                ssp -= 4;
959
                stl(ssp, old_esp);
960
                ssp -= 4 * param_count;
961
                for(i = 0; i < param_count; i++) {
962
                    val = ldl(old_ssp + i * 4);
963
                    stl(ssp + i * 4, val);
964
                }
965
            } else {
966
                ssp -= 2;
967
                stw(ssp, old_ss);
968
                ssp -= 2;
969
                stw(ssp, old_esp);
970
                ssp -= 2 * param_count;
971
                for(i = 0; i < param_count; i++) {
972
                    val = lduw(old_ssp + i * 2);
973
                    stw(ssp + i * 2, val);
974
                }
975
            }
976
        } else {
977
            /* to same priviledge */
978
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
979
                sp &= 0xffff;
980
            ssp = env->segs[R_SS].base + sp;
981
            push_size = (4 << shift);
982
        }
983

    
984
        if (shift) {
985
            ssp -= 4;
986
            stl(ssp, env->segs[R_CS].selector);
987
            ssp -= 4;
988
            stl(ssp, next_eip);
989
        } else {
990
            ssp -= 2;
991
            stw(ssp, env->segs[R_CS].selector);
992
            ssp -= 2;
993
            stw(ssp, next_eip);
994
        }
995

    
996
        sp -= push_size;
997
        load_seg(R_CS, selector, env->eip);
998
        /* from this point, not restartable if same priviledge */
999
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1000
            env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
1001
        else
1002
            env->regs[R_ESP] = sp;
1003
        EIP = offset;
1004
    }
1005
}
1006

    
1007
/* init the segment cache in vm86 mode */
1008
static inline void load_seg_vm(int seg, int selector)
1009
{
1010
    SegmentCache *sc = &env->segs[seg];
1011
    selector &= 0xffff;
1012
    sc->base = (uint8_t *)(selector << 4);
1013
    sc->selector = selector;
1014
    sc->flags = 0;
1015
    sc->limit = 0xffff;
1016
}
1017

    
1018
/* real mode iret */
1019
void helper_iret_real(int shift)
1020
{
1021
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1022
    uint8_t *ssp;
1023
    int eflags_mask;
1024
    
1025
    sp = env->regs[R_ESP] & 0xffff;
1026
    ssp = env->segs[R_SS].base + sp;
1027
    if (shift == 1) {
1028
        /* 32 bits */
1029
        new_eflags = ldl(ssp + 8);
1030
        new_cs = ldl(ssp + 4) & 0xffff;
1031
        new_eip = ldl(ssp) & 0xffff;
1032
    } else {
1033
        /* 16 bits */
1034
        new_eflags = lduw(ssp + 4);
1035
        new_cs = lduw(ssp + 2);
1036
        new_eip = lduw(ssp);
1037
    }
1038
    new_esp = sp + (6 << shift);
1039
    env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
1040
        (new_esp & 0xffff);
1041
    load_seg_vm(R_CS, new_cs);
1042
    env->eip = new_eip;
1043
    eflags_mask = FL_UPDATE_CPL0_MASK;
1044
    if (shift == 0)
1045
        eflags_mask &= 0xffff;
1046
    load_eflags(new_eflags, eflags_mask);
1047
}
1048

    
1049
/* protected mode iret */
1050
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1051
{
1052
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1053
    uint32_t new_es, new_ds, new_fs, new_gs;
1054
    uint32_t e1, e2;
1055
    int cpl, dpl, rpl, eflags_mask;
1056
    uint8_t *ssp;
1057
    
1058
    sp = env->regs[R_ESP];
1059
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1060
        sp &= 0xffff;
1061
    ssp = env->segs[R_SS].base + sp;
1062
    if (shift == 1) {
1063
        /* 32 bits */
1064
        if (is_iret)
1065
            new_eflags = ldl(ssp + 8);
1066
        new_cs = ldl(ssp + 4) & 0xffff;
1067
        new_eip = ldl(ssp);
1068
        if (is_iret && (new_eflags & VM_MASK))
1069
            goto return_to_vm86;
1070
    } else {
1071
        /* 16 bits */
1072
        if (is_iret)
1073
            new_eflags = lduw(ssp + 4);
1074
        new_cs = lduw(ssp + 2);
1075
        new_eip = lduw(ssp);
1076
    }
1077
    if ((new_cs & 0xfffc) == 0)
1078
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1079
    if (load_segment(&e1, &e2, new_cs) != 0)
1080
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1081
    if (!(e2 & DESC_S_MASK) ||
1082
        !(e2 & DESC_CS_MASK))
1083
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1084
    cpl = env->segs[R_CS].selector & 3;
1085
    rpl = new_cs & 3; 
1086
    if (rpl < cpl)
1087
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1088
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1089
    if (e2 & DESC_CS_MASK) {
1090
        if (dpl > rpl)
1091
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1092
    } else {
1093
        if (dpl != rpl)
1094
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1095
    }
1096
    if (!(e2 & DESC_P_MASK))
1097
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1098
    
1099
    if (rpl == cpl) {
1100
        /* return to same priledge level */
1101
        load_seg(R_CS, new_cs, env->eip);
1102
        new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1103
    } else {
1104
        /* return to different priviledge level */
1105
        ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1106
        if (shift == 1) {
1107
            /* 32 bits */
1108
            new_esp = ldl(ssp);
1109
            new_ss = ldl(ssp + 4) & 0xffff;
1110
        } else {
1111
            /* 16 bits */
1112
            new_esp = lduw(ssp);
1113
            new_ss = lduw(ssp + 2);
1114
        }
1115
        
1116
        if ((new_ss & 3) != rpl)
1117
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1118
        if (load_segment(&e1, &e2, new_ss) != 0)
1119
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1120
        if (!(e2 & DESC_S_MASK) ||
1121
            (e2 & DESC_CS_MASK) ||
1122
            !(e2 & DESC_W_MASK))
1123
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1124
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1125
        if (dpl != rpl)
1126
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1127
        if (!(e2 & DESC_P_MASK))
1128
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1129

    
1130
        load_seg(R_CS, new_cs, env->eip);
1131
        load_seg(R_SS, new_ss, env->eip);
1132
    }
1133
    if (env->segs[R_SS].flags & DESC_B_MASK)
1134
        env->regs[R_ESP] = new_esp;
1135
    else
1136
        env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
1137
            (new_esp & 0xffff);
1138
    env->eip = new_eip;
1139
    if (is_iret) {
1140
        if (cpl == 0)
1141
            eflags_mask = FL_UPDATE_CPL0_MASK;
1142
        else
1143
            eflags_mask = FL_UPDATE_MASK32;
1144
        if (shift == 0)
1145
            eflags_mask &= 0xffff;
1146
        load_eflags(new_eflags, eflags_mask);
1147
    }
1148
    return;
1149

    
1150
 return_to_vm86:
1151
    new_esp = ldl(ssp + 12);
1152
    new_ss = ldl(ssp + 16);
1153
    new_es = ldl(ssp + 20);
1154
    new_ds = ldl(ssp + 24);
1155
    new_fs = ldl(ssp + 28);
1156
    new_gs = ldl(ssp + 32);
1157
    
1158
    /* modify processor state */
1159
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1160
    load_seg_vm(R_CS, new_cs);
1161
    load_seg_vm(R_SS, new_ss);
1162
    load_seg_vm(R_ES, new_es);
1163
    load_seg_vm(R_DS, new_ds);
1164
    load_seg_vm(R_FS, new_fs);
1165
    load_seg_vm(R_GS, new_gs);
1166
    
1167
    env->eip = new_eip;
1168
    env->regs[R_ESP] = new_esp;
1169
}
1170

    
1171
void helper_iret_protected(int shift)
1172
{
1173
    helper_ret_protected(shift, 1, 0);
1174
}
1175

    
1176
void helper_lret_protected(int shift, int addend)
1177
{
1178
    helper_ret_protected(shift, 0, addend);
1179
}
1180

    
1181
void helper_movl_crN_T0(int reg)
1182
{
1183
    env->cr[reg] = T0;
1184
    switch(reg) {
1185
    case 0:
1186
        cpu_x86_update_cr0(env);
1187
        break;
1188
    case 3:
1189
        cpu_x86_update_cr3(env);
1190
        break;
1191
    }
1192
}
1193

    
1194
/* XXX: do more */
1195
void helper_movl_drN_T0(int reg)
1196
{
1197
    env->dr[reg] = T0;
1198
}
1199

    
1200
void helper_invlpg(unsigned int addr)
1201
{
1202
    cpu_x86_flush_tlb(env, addr);
1203
}
1204

    
1205
/* rdtsc */
1206
#ifndef __i386__
1207
uint64_t emu_time;
1208
#endif
1209

    
1210
void helper_rdtsc(void)
1211
{
1212
    uint64_t val;
1213
#ifdef __i386__
1214
    asm("rdtsc" : "=A" (val));
1215
#else
1216
    /* better than nothing: the time increases */
1217
    val = emu_time++;
1218
#endif
1219
    EAX = val;
1220
    EDX = val >> 32;
1221
}
1222

    
1223
void helper_wrmsr(void)
1224
{
1225
    switch(ECX) {
1226
    case MSR_IA32_SYSENTER_CS:
1227
        env->sysenter_cs = EAX & 0xffff;
1228
        break;
1229
    case MSR_IA32_SYSENTER_ESP:
1230
        env->sysenter_esp = EAX;
1231
        break;
1232
    case MSR_IA32_SYSENTER_EIP:
1233
        env->sysenter_eip = EAX;
1234
        break;
1235
    default:
1236
        /* XXX: exception ? */
1237
        break; 
1238
    }
1239
}
1240

    
1241
void helper_rdmsr(void)
1242
{
1243
    switch(ECX) {
1244
    case MSR_IA32_SYSENTER_CS:
1245
        EAX = env->sysenter_cs;
1246
        EDX = 0;
1247
        break;
1248
    case MSR_IA32_SYSENTER_ESP:
1249
        EAX = env->sysenter_esp;
1250
        EDX = 0;
1251
        break;
1252
    case MSR_IA32_SYSENTER_EIP:
1253
        EAX = env->sysenter_eip;
1254
        EDX = 0;
1255
        break;
1256
    default:
1257
        /* XXX: exception ? */
1258
        break; 
1259
    }
1260
}
1261

    
1262
void helper_lsl(void)
1263
{
1264
    unsigned int selector, limit;
1265
    uint32_t e1, e2;
1266

    
1267
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1268
    selector = T0 & 0xffff;
1269
    if (load_segment(&e1, &e2, selector) != 0)
1270
        return;
1271
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1272
    if (e2 & (1 << 23))
1273
        limit = (limit << 12) | 0xfff;
1274
    T1 = limit;
1275
    CC_SRC |= CC_Z;
1276
}
1277

    
1278
void helper_lar(void)
1279
{
1280
    unsigned int selector;
1281
    uint32_t e1, e2;
1282

    
1283
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1284
    selector = T0 & 0xffff;
1285
    if (load_segment(&e1, &e2, selector) != 0)
1286
        return;
1287
    T1 = e2 & 0x00f0ff00;
1288
    CC_SRC |= CC_Z;
1289
}
1290

    
1291
/* FPU helpers */
1292

    
1293
#ifndef USE_X86LDOUBLE
1294
void helper_fldt_ST0_A0(void)
1295
{
1296
    int new_fpstt;
1297
    new_fpstt = (env->fpstt - 1) & 7;
1298
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1299
    env->fpstt = new_fpstt;
1300
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1301
}
1302

    
1303
void helper_fstt_ST0_A0(void)
1304
{
1305
    helper_fstt(ST0, (uint8_t *)A0);
1306
}
1307
#endif
1308

    
1309
/* BCD ops */
1310

    
1311
#define MUL10(iv) ( iv + iv + (iv << 3) )
1312

    
1313
void helper_fbld_ST0_A0(void)
1314
{
1315
    CPU86_LDouble tmp;
1316
    uint64_t val;
1317
    unsigned int v;
1318
    int i;
1319

    
1320
    val = 0;
1321
    for(i = 8; i >= 0; i--) {
1322
        v = ldub((uint8_t *)A0 + i);
1323
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1324
    }
1325
    tmp = val;
1326
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1327
        tmp = -tmp;
1328
    fpush();
1329
    ST0 = tmp;
1330
}
1331

    
1332
void helper_fbst_ST0_A0(void)
1333
{
1334
    CPU86_LDouble tmp;
1335
    int v;
1336
    uint8_t *mem_ref, *mem_end;
1337
    int64_t val;
1338

    
1339
    tmp = rint(ST0);
1340
    val = (int64_t)tmp;
1341
    mem_ref = (uint8_t *)A0;
1342
    mem_end = mem_ref + 9;
1343
    if (val < 0) {
1344
        stb(mem_end, 0x80);
1345
        val = -val;
1346
    } else {
1347
        stb(mem_end, 0x00);
1348
    }
1349
    while (mem_ref < mem_end) {
1350
        if (val == 0)
1351
            break;
1352
        v = val % 100;
1353
        val = val / 100;
1354
        v = ((v / 10) << 4) | (v % 10);
1355
        stb(mem_ref++, v);
1356
    }
1357
    while (mem_ref < mem_end) {
1358
        stb(mem_ref++, 0);
1359
    }
1360
}
1361

    
1362
void helper_f2xm1(void)
1363
{
1364
    ST0 = pow(2.0,ST0) - 1.0;
1365
}
1366

    
1367
void helper_fyl2x(void)
1368
{
1369
    CPU86_LDouble fptemp;
1370
    
1371
    fptemp = ST0;
1372
    if (fptemp>0.0){
1373
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1374
        ST1 *= fptemp;
1375
        fpop();
1376
    } else { 
1377
        env->fpus &= (~0x4700);
1378
        env->fpus |= 0x400;
1379
    }
1380
}
1381

    
1382
void helper_fptan(void)
1383
{
1384
    CPU86_LDouble fptemp;
1385

    
1386
    fptemp = ST0;
1387
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1388
        env->fpus |= 0x400;
1389
    } else {
1390
        ST0 = tan(fptemp);
1391
        fpush();
1392
        ST0 = 1.0;
1393
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1394
        /* the above code is for  |arg| < 2**52 only */
1395
    }
1396
}
1397

    
1398
void helper_fpatan(void)
1399
{
1400
    CPU86_LDouble fptemp, fpsrcop;
1401

    
1402
    fpsrcop = ST1;
1403
    fptemp = ST0;
1404
    ST1 = atan2(fpsrcop,fptemp);
1405
    fpop();
1406
}
1407

    
1408
void helper_fxtract(void)
1409
{
1410
    CPU86_LDoubleU temp;
1411
    unsigned int expdif;
1412

    
1413
    temp.d = ST0;
1414
    expdif = EXPD(temp) - EXPBIAS;
1415
    /*DP exponent bias*/
1416
    ST0 = expdif;
1417
    fpush();
1418
    BIASEXPONENT(temp);
1419
    ST0 = temp.d;
1420
}
1421

    
1422
void helper_fprem1(void)
1423
{
1424
    CPU86_LDouble dblq, fpsrcop, fptemp;
1425
    CPU86_LDoubleU fpsrcop1, fptemp1;
1426
    int expdif;
1427
    int q;
1428

    
1429
    fpsrcop = ST0;
1430
    fptemp = ST1;
1431
    fpsrcop1.d = fpsrcop;
1432
    fptemp1.d = fptemp;
1433
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1434
    if (expdif < 53) {
1435
        dblq = fpsrcop / fptemp;
1436
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1437
        ST0 = fpsrcop - fptemp*dblq;
1438
        q = (int)dblq; /* cutting off top bits is assumed here */
1439
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1440
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1441
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1442
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1443
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1444
    } else {
1445
        env->fpus |= 0x400;  /* C2 <-- 1 */
1446
        fptemp = pow(2.0, expdif-50);
1447
        fpsrcop = (ST0 / ST1) / fptemp;
1448
        /* fpsrcop = integer obtained by rounding to the nearest */
1449
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1450
            floor(fpsrcop): ceil(fpsrcop);
1451
        ST0 -= (ST1 * fpsrcop * fptemp);
1452
    }
1453
}
1454

    
1455
void helper_fprem(void)
1456
{
1457
    CPU86_LDouble dblq, fpsrcop, fptemp;
1458
    CPU86_LDoubleU fpsrcop1, fptemp1;
1459
    int expdif;
1460
    int q;
1461
    
1462
    fpsrcop = ST0;
1463
    fptemp = ST1;
1464
    fpsrcop1.d = fpsrcop;
1465
    fptemp1.d = fptemp;
1466
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1467
    if ( expdif < 53 ) {
1468
        dblq = fpsrcop / fptemp;
1469
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1470
        ST0 = fpsrcop - fptemp*dblq;
1471
        q = (int)dblq; /* cutting off top bits is assumed here */
1472
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1473
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1474
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1475
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1476
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1477
    } else {
1478
        env->fpus |= 0x400;  /* C2 <-- 1 */
1479
        fptemp = pow(2.0, expdif-50);
1480
        fpsrcop = (ST0 / ST1) / fptemp;
1481
        /* fpsrcop = integer obtained by chopping */
1482
        fpsrcop = (fpsrcop < 0.0)?
1483
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
1484
        ST0 -= (ST1 * fpsrcop * fptemp);
1485
    }
1486
}
1487

    
1488
void helper_fyl2xp1(void)
1489
{
1490
    CPU86_LDouble fptemp;
1491

    
1492
    fptemp = ST0;
1493
    if ((fptemp+1.0)>0.0) {
1494
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1495
        ST1 *= fptemp;
1496
        fpop();
1497
    } else { 
1498
        env->fpus &= (~0x4700);
1499
        env->fpus |= 0x400;
1500
    }
1501
}
1502

    
1503
void helper_fsqrt(void)
1504
{
1505
    CPU86_LDouble fptemp;
1506

    
1507
    fptemp = ST0;
1508
    if (fptemp<0.0) { 
1509
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1510
        env->fpus |= 0x400;
1511
    }
1512
    ST0 = sqrt(fptemp);
1513
}
1514

    
1515
void helper_fsincos(void)
1516
{
1517
    CPU86_LDouble fptemp;
1518

    
1519
    fptemp = ST0;
1520
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1521
        env->fpus |= 0x400;
1522
    } else {
1523
        ST0 = sin(fptemp);
1524
        fpush();
1525
        ST0 = cos(fptemp);
1526
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1527
        /* the above code is for  |arg| < 2**63 only */
1528
    }
1529
}
1530

    
1531
void helper_frndint(void)
1532
{
1533
    CPU86_LDouble a;
1534

    
1535
    a = ST0;
1536
#ifdef __arm__
1537
    switch(env->fpuc & RC_MASK) {
1538
    default:
1539
    case RC_NEAR:
1540
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
1541
        break;
1542
    case RC_DOWN:
1543
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1544
        break;
1545
    case RC_UP:
1546
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1547
        break;
1548
    case RC_CHOP:
1549
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1550
        break;
1551
    }
1552
#else
1553
    a = rint(a);
1554
#endif
1555
    ST0 = a;
1556
}
1557

    
1558
void helper_fscale(void)
1559
{
1560
    CPU86_LDouble fpsrcop, fptemp;
1561

    
1562
    fpsrcop = 2.0;
1563
    fptemp = pow(fpsrcop,ST1);
1564
    ST0 *= fptemp;
1565
}
1566

    
1567
void helper_fsin(void)
1568
{
1569
    CPU86_LDouble fptemp;
1570

    
1571
    fptemp = ST0;
1572
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1573
        env->fpus |= 0x400;
1574
    } else {
1575
        ST0 = sin(fptemp);
1576
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1577
        /* the above code is for  |arg| < 2**53 only */
1578
    }
1579
}
1580

    
1581
void helper_fcos(void)
1582
{
1583
    CPU86_LDouble fptemp;
1584

    
1585
    fptemp = ST0;
1586
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1587
        env->fpus |= 0x400;
1588
    } else {
1589
        ST0 = cos(fptemp);
1590
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1591
        /* the above code is for  |arg5 < 2**63 only */
1592
    }
1593
}
1594

    
1595
void helper_fxam_ST0(void)
1596
{
1597
    CPU86_LDoubleU temp;
1598
    int expdif;
1599

    
1600
    temp.d = ST0;
1601

    
1602
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1603
    if (SIGND(temp))
1604
        env->fpus |= 0x200; /* C1 <-- 1 */
1605

    
1606
    expdif = EXPD(temp);
1607
    if (expdif == MAXEXPD) {
1608
        if (MANTD(temp) == 0)
1609
            env->fpus |=  0x500 /*Infinity*/;
1610
        else
1611
            env->fpus |=  0x100 /*NaN*/;
1612
    } else if (expdif == 0) {
1613
        if (MANTD(temp) == 0)
1614
            env->fpus |=  0x4000 /*Zero*/;
1615
        else
1616
            env->fpus |= 0x4400 /*Denormal*/;
1617
    } else {
1618
        env->fpus |= 0x400;
1619
    }
1620
}
1621

    
1622
void helper_fstenv(uint8_t *ptr, int data32)
1623
{
1624
    int fpus, fptag, exp, i;
1625
    uint64_t mant;
1626
    CPU86_LDoubleU tmp;
1627

    
1628
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1629
    fptag = 0;
1630
    for (i=7; i>=0; i--) {
1631
        fptag <<= 2;
1632
        if (env->fptags[i]) {
1633
            fptag |= 3;
1634
        } else {
1635
            tmp.d = env->fpregs[i];
1636
            exp = EXPD(tmp);
1637
            mant = MANTD(tmp);
1638
            if (exp == 0 && mant == 0) {
1639
                /* zero */
1640
                fptag |= 1;
1641
            } else if (exp == 0 || exp == MAXEXPD
1642
#ifdef USE_X86LDOUBLE
1643
                       || (mant & (1LL << 63)) == 0
1644
#endif
1645
                       ) {
1646
                /* NaNs, infinity, denormal */
1647
                fptag |= 2;
1648
            }
1649
        }
1650
    }
1651
    if (data32) {
1652
        /* 32 bit */
1653
        stl(ptr, env->fpuc);
1654
        stl(ptr + 4, fpus);
1655
        stl(ptr + 8, fptag);
1656
        stl(ptr + 12, 0);
1657
        stl(ptr + 16, 0);
1658
        stl(ptr + 20, 0);
1659
        stl(ptr + 24, 0);
1660
    } else {
1661
        /* 16 bit */
1662
        stw(ptr, env->fpuc);
1663
        stw(ptr + 2, fpus);
1664
        stw(ptr + 4, fptag);
1665
        stw(ptr + 6, 0);
1666
        stw(ptr + 8, 0);
1667
        stw(ptr + 10, 0);
1668
        stw(ptr + 12, 0);
1669
    }
1670
}
1671

    
1672
void helper_fldenv(uint8_t *ptr, int data32)
1673
{
1674
    int i, fpus, fptag;
1675

    
1676
    if (data32) {
1677
        env->fpuc = lduw(ptr);
1678
        fpus = lduw(ptr + 4);
1679
        fptag = lduw(ptr + 8);
1680
    }
1681
    else {
1682
        env->fpuc = lduw(ptr);
1683
        fpus = lduw(ptr + 2);
1684
        fptag = lduw(ptr + 4);
1685
    }
1686
    env->fpstt = (fpus >> 11) & 7;
1687
    env->fpus = fpus & ~0x3800;
1688
    for(i = 0;i < 7; i++) {
1689
        env->fptags[i] = ((fptag & 3) == 3);
1690
        fptag >>= 2;
1691
    }
1692
}
1693

    
1694
void helper_fsave(uint8_t *ptr, int data32)
1695
{
1696
    CPU86_LDouble tmp;
1697
    int i;
1698

    
1699
    helper_fstenv(ptr, data32);
1700

    
1701
    ptr += (14 << data32);
1702
    for(i = 0;i < 8; i++) {
1703
        tmp = ST(i);
1704
#ifdef USE_X86LDOUBLE
1705
        *(long double *)ptr = tmp;
1706
#else
1707
        helper_fstt(tmp, ptr);
1708
#endif        
1709
        ptr += 10;
1710
    }
1711

    
1712
    /* fninit */
1713
    env->fpus = 0;
1714
    env->fpstt = 0;
1715
    env->fpuc = 0x37f;
1716
    env->fptags[0] = 1;
1717
    env->fptags[1] = 1;
1718
    env->fptags[2] = 1;
1719
    env->fptags[3] = 1;
1720
    env->fptags[4] = 1;
1721
    env->fptags[5] = 1;
1722
    env->fptags[6] = 1;
1723
    env->fptags[7] = 1;
1724
}
1725

    
1726
void helper_frstor(uint8_t *ptr, int data32)
1727
{
1728
    CPU86_LDouble tmp;
1729
    int i;
1730

    
1731
    helper_fldenv(ptr, data32);
1732
    ptr += (14 << data32);
1733

    
1734
    for(i = 0;i < 8; i++) {
1735
#ifdef USE_X86LDOUBLE
1736
        tmp = *(long double *)ptr;
1737
#else
1738
        tmp = helper_fldt(ptr);
1739
#endif        
1740
        ST(i) = tmp;
1741
        ptr += 10;
1742
    }
1743
}
1744