Statistics
| Branch: | Revision:

root / helper-i386.c @ a363e34c

History | View | Annotate | Download (37.5 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec-i386.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
134
#if 0
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

    
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw(env->tr.base + index);
157
        *ss_ptr = lduw(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl(env->tr.base + index);
160
        *ss_ptr = lduw(env->tr.base + index + 4);
161
    }
162
}
163

    
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
167
{
168
    SegmentCache *dt;
169
    int index;
170
    uint8_t *ptr;
171

    
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl(ptr);
181
    *e2_ptr = ldl(ptr + 4);
182
    return 0;
183
}
184
                                     
185

    
186
/* protected mode interrupt */
187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                      unsigned int next_eip)
189
{
190
    SegmentCache *dt;
191
    uint8_t *ptr, *ssp;
192
    int type, dpl, cpl, selector, ss_dpl;
193
    int has_error_code, new_stack, shift;
194
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195
    uint32_t old_cs, old_ss, old_esp, old_eip;
196

    
197
    dt = &env->idt;
198
    if (intno * 8 + 7 > dt->limit)
199
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200
    ptr = dt->base + intno * 8;
201
    e1 = ldl(ptr);
202
    e2 = ldl(ptr + 4);
203
    /* check gate type */
204
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
205
    switch(type) {
206
    case 5: /* task gate */
207
        cpu_abort(env, "task gate not supported");
208
        break;
209
    case 6: /* 286 interrupt gate */
210
    case 7: /* 286 trap gate */
211
    case 14: /* 386 interrupt gate */
212
    case 15: /* 386 trap gate */
213
        break;
214
    default:
215
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
216
        break;
217
    }
218
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219
    if (env->eflags & VM_MASK)
220
        cpl = 3;
221
    else
222
        cpl = env->segs[R_CS].selector & 3;
223
    /* check privledge if software int */
224
    if (is_int && dpl < cpl)
225
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
226
    /* check valid bit */
227
    if (!(e2 & DESC_P_MASK))
228
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
229
    selector = e1 >> 16;
230
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
231
    if ((selector & 0xfffc) == 0)
232
        raise_exception_err(EXCP0D_GPF, 0);
233

    
234
    if (load_segment(&e1, &e2, selector) != 0)
235
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
236
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
237
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
239
    if (dpl > cpl)
240
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
241
    if (!(e2 & DESC_P_MASK))
242
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
243
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
244
        /* to inner priviledge */
245
        get_ss_esp_from_tss(&ss, &esp, dpl);
246
        if ((ss & 0xfffc) == 0)
247
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
248
        if ((ss & 3) != dpl)
249
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
250
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
251
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
253
        if (ss_dpl != dpl)
254
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255
        if (!(ss_e2 & DESC_S_MASK) ||
256
            (ss_e2 & DESC_CS_MASK) ||
257
            !(ss_e2 & DESC_W_MASK))
258
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
259
        if (!(ss_e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
261
        new_stack = 1;
262
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
263
        /* to same priviledge */
264
        new_stack = 0;
265
    } else {
266
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
267
        new_stack = 0; /* avoid warning */
268
    }
269

    
270
    shift = type >> 3;
271
    has_error_code = 0;
272
    if (!is_int) {
273
        switch(intno) {
274
        case 8:
275
        case 10:
276
        case 11:
277
        case 12:
278
        case 13:
279
        case 14:
280
        case 17:
281
            has_error_code = 1;
282
            break;
283
        }
284
    }
285
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
286
    if (env->eflags & VM_MASK)
287
        push_size += 8;
288
    push_size <<= shift;
289

    
290
    /* XXX: check that enough room is available */
291
    if (new_stack) {
292
        old_esp = env->regs[R_ESP];
293
        old_ss = env->segs[R_SS].selector;
294
        load_seg(R_SS, ss, env->eip);
295
    } else {
296
        old_esp = 0;
297
        old_ss = 0;
298
        esp = env->regs[R_ESP];
299
    }
300
    if (is_int)
301
        old_eip = next_eip;
302
    else
303
        old_eip = env->eip;
304
    old_cs = env->segs[R_CS].selector;
305
    load_seg(R_CS, selector, env->eip);
306
    env->eip = offset;
307
    env->regs[R_ESP] = esp - push_size;
308
    ssp = env->segs[R_SS].base + esp;
309
    if (shift == 1) {
310
        int old_eflags;
311
        if (env->eflags & VM_MASK) {
312
            ssp -= 4;
313
            stl(ssp, env->segs[R_GS].selector);
314
            ssp -= 4;
315
            stl(ssp, env->segs[R_FS].selector);
316
            ssp -= 4;
317
            stl(ssp, env->segs[R_DS].selector);
318
            ssp -= 4;
319
            stl(ssp, env->segs[R_ES].selector);
320
        }
321
        if (new_stack) {
322
            ssp -= 4;
323
            stl(ssp, old_ss);
324
            ssp -= 4;
325
            stl(ssp, old_esp);
326
        }
327
        ssp -= 4;
328
        old_eflags = compute_eflags();
329
        stl(ssp, old_eflags);
330
        ssp -= 4;
331
        stl(ssp, old_cs);
332
        ssp -= 4;
333
        stl(ssp, old_eip);
334
        if (has_error_code) {
335
            ssp -= 4;
336
            stl(ssp, error_code);
337
        }
338
    } else {
339
        if (new_stack) {
340
            ssp -= 2;
341
            stw(ssp, old_ss);
342
            ssp -= 2;
343
            stw(ssp, old_esp);
344
        }
345
        ssp -= 2;
346
        stw(ssp, compute_eflags());
347
        ssp -= 2;
348
        stw(ssp, old_cs);
349
        ssp -= 2;
350
        stw(ssp, old_eip);
351
        if (has_error_code) {
352
            ssp -= 2;
353
            stw(ssp, error_code);
354
        }
355
    }
356
    
357
    /* interrupt gate clear IF mask */
358
    if ((type & 1) == 0) {
359
        env->eflags &= ~IF_MASK;
360
    }
361
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
362
}
363

    
364
/* real mode interrupt */
365
static void do_interrupt_real(int intno, int is_int, int error_code,
366
                                 unsigned int next_eip)
367
{
368
    SegmentCache *dt;
369
    uint8_t *ptr, *ssp;
370
    int selector;
371
    uint32_t offset, esp;
372
    uint32_t old_cs, old_eip;
373

    
374
    /* real mode (simpler !) */
375
    dt = &env->idt;
376
    if (intno * 4 + 3 > dt->limit)
377
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
378
    ptr = dt->base + intno * 4;
379
    offset = lduw(ptr);
380
    selector = lduw(ptr + 2);
381
    esp = env->regs[R_ESP] & 0xffff;
382
    ssp = env->segs[R_SS].base + esp;
383
    if (is_int)
384
        old_eip = next_eip;
385
    else
386
        old_eip = env->eip;
387
    old_cs = env->segs[R_CS].selector;
388
    ssp -= 2;
389
    stw(ssp, compute_eflags());
390
    ssp -= 2;
391
    stw(ssp, old_cs);
392
    ssp -= 2;
393
    stw(ssp, old_eip);
394
    esp -= 6;
395
    
396
    /* update processor state */
397
    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
398
    env->eip = offset;
399
    env->segs[R_CS].selector = selector;
400
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
401
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
402
}
403

    
404
/* fake user mode interrupt */
405
void do_interrupt_user(int intno, int is_int, int error_code, 
406
                       unsigned int next_eip)
407
{
408
    SegmentCache *dt;
409
    uint8_t *ptr;
410
    int dpl, cpl;
411
    uint32_t e2;
412

    
413
    dt = &env->idt;
414
    ptr = dt->base + (intno * 8);
415
    e2 = ldl(ptr + 4);
416
    
417
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
418
    cpl = 3;
419
    /* check privledge if software int */
420
    if (is_int && dpl < cpl)
421
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
422

    
423
    /* Since we emulate only user space, we cannot do more than
424
       exiting the emulation with the suitable exception and error
425
       code */
426
    if (is_int)
427
        EIP = next_eip;
428
}
429

    
430
/*
431
 * Begin excution of an interruption. is_int is TRUE if coming from
432
 * the int instruction. next_eip is the EIP value AFTER the interrupt
433
 * instruction. It is only relevant if is_int is TRUE.  
434
 */
435
void do_interrupt(int intno, int is_int, int error_code, 
436
                  unsigned int next_eip)
437
{
438
    if (env->cr[0] & CR0_PE_MASK) {
439
        do_interrupt_protected(intno, is_int, error_code, next_eip);
440
    } else {
441
        do_interrupt_real(intno, is_int, error_code, next_eip);
442
    }
443
}
444

    
445
/*
446
 * Signal an interruption. It is executed in the main CPU loop.
447
 * is_int is TRUE if coming from the int instruction. next_eip is the
448
 * EIP value AFTER the interrupt instruction. It is only relevant if
449
 * is_int is TRUE.  
450
 */
451
void raise_interrupt(int intno, int is_int, int error_code, 
452
                     unsigned int next_eip)
453
{
454
    env->exception_index = intno;
455
    env->error_code = error_code;
456
    env->exception_is_int = is_int;
457
    env->exception_next_eip = next_eip;
458
    cpu_loop_exit();
459
}
460

    
461
/* shortcuts to generate exceptions */
462
void raise_exception_err(int exception_index, int error_code)
463
{
464
    raise_interrupt(exception_index, 0, error_code, 0);
465
}
466

    
467
void raise_exception(int exception_index)
468
{
469
    raise_interrupt(exception_index, 0, 0, 0);
470
}
471

    
472
#ifdef BUGGY_GCC_DIV64
473
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
474
   call it from another function */
475
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
476
{
477
    *q_ptr = num / den;
478
    return num % den;
479
}
480

    
481
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
482
{
483
    *q_ptr = num / den;
484
    return num % den;
485
}
486
#endif
487

    
488
void helper_divl_EAX_T0(uint32_t eip)
489
{
490
    unsigned int den, q, r;
491
    uint64_t num;
492
    
493
    num = EAX | ((uint64_t)EDX << 32);
494
    den = T0;
495
    if (den == 0) {
496
        EIP = eip;
497
        raise_exception(EXCP00_DIVZ);
498
    }
499
#ifdef BUGGY_GCC_DIV64
500
    r = div64(&q, num, den);
501
#else
502
    q = (num / den);
503
    r = (num % den);
504
#endif
505
    EAX = q;
506
    EDX = r;
507
}
508

    
509
void helper_idivl_EAX_T0(uint32_t eip)
510
{
511
    int den, q, r;
512
    int64_t num;
513
    
514
    num = EAX | ((uint64_t)EDX << 32);
515
    den = T0;
516
    if (den == 0) {
517
        EIP = eip;
518
        raise_exception(EXCP00_DIVZ);
519
    }
520
#ifdef BUGGY_GCC_DIV64
521
    r = idiv64(&q, num, den);
522
#else
523
    q = (num / den);
524
    r = (num % den);
525
#endif
526
    EAX = q;
527
    EDX = r;
528
}
529

    
530
void helper_cmpxchg8b(void)
531
{
532
    uint64_t d;
533
    int eflags;
534

    
535
    eflags = cc_table[CC_OP].compute_all();
536
    d = ldq((uint8_t *)A0);
537
    if (d == (((uint64_t)EDX << 32) | EAX)) {
538
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
539
        eflags |= CC_Z;
540
    } else {
541
        EDX = d >> 32;
542
        EAX = d;
543
        eflags &= ~CC_Z;
544
    }
545
    CC_SRC = eflags;
546
}
547

    
548
/* We simulate a pre-MMX pentium as in valgrind */
549
#define CPUID_FP87 (1 << 0)
550
#define CPUID_VME  (1 << 1)
551
#define CPUID_DE   (1 << 2)
552
#define CPUID_PSE  (1 << 3)
553
#define CPUID_TSC  (1 << 4)
554
#define CPUID_MSR  (1 << 5)
555
#define CPUID_PAE  (1 << 6)
556
#define CPUID_MCE  (1 << 7)
557
#define CPUID_CX8  (1 << 8)
558
#define CPUID_APIC (1 << 9)
559
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
560
#define CPUID_MTRR (1 << 12)
561
#define CPUID_PGE  (1 << 13)
562
#define CPUID_MCA  (1 << 14)
563
#define CPUID_CMOV (1 << 15)
564
/* ... */
565
#define CPUID_MMX  (1 << 23)
566
#define CPUID_FXSR (1 << 24)
567
#define CPUID_SSE  (1 << 25)
568
#define CPUID_SSE2 (1 << 26)
569

    
570
void helper_cpuid(void)
571
{
572
    if (EAX == 0) {
573
        EAX = 1; /* max EAX index supported */
574
        EBX = 0x756e6547;
575
        ECX = 0x6c65746e;
576
        EDX = 0x49656e69;
577
    } else if (EAX == 1) {
578
        int family, model, stepping;
579
        /* EAX = 1 info */
580
#if 0
581
        /* pentium 75-200 */
582
        family = 5;
583
        model = 2;
584
        stepping = 11;
585
#else
586
        /* pentium pro */
587
        family = 6;
588
        model = 1;
589
        stepping = 3;
590
#endif
591
        EAX = (family << 8) | (model << 4) | stepping;
592
        EBX = 0;
593
        ECX = 0;
594
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
595
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
596
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
597
    }
598
}
599

    
600
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
601
{
602
    sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
603
    sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
604
    if (e2 & DESC_G_MASK)
605
        sc->limit = (sc->limit << 12) | 0xfff;
606
    sc->flags = e2;
607
}
608

    
609
void helper_lldt_T0(void)
610
{
611
    int selector;
612
    SegmentCache *dt;
613
    uint32_t e1, e2;
614
    int index;
615
    uint8_t *ptr;
616
    
617
    selector = T0 & 0xffff;
618
    if ((selector & 0xfffc) == 0) {
619
        /* XXX: NULL selector case: invalid LDT */
620
        env->ldt.base = NULL;
621
        env->ldt.limit = 0;
622
    } else {
623
        if (selector & 0x4)
624
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
625
        dt = &env->gdt;
626
        index = selector & ~7;
627
        if ((index + 7) > dt->limit)
628
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
629
        ptr = dt->base + index;
630
        e1 = ldl(ptr);
631
        e2 = ldl(ptr + 4);
632
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
633
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
634
        if (!(e2 & DESC_P_MASK))
635
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
636
        load_seg_cache(&env->ldt, e1, e2);
637
    }
638
    env->ldt.selector = selector;
639
}
640

    
641
void helper_ltr_T0(void)
642
{
643
    int selector;
644
    SegmentCache *dt;
645
    uint32_t e1, e2;
646
    int index, type;
647
    uint8_t *ptr;
648
    
649
    selector = T0 & 0xffff;
650
    if ((selector & 0xfffc) == 0) {
651
        /* NULL selector case: invalid LDT */
652
        env->tr.base = NULL;
653
        env->tr.limit = 0;
654
        env->tr.flags = 0;
655
    } else {
656
        if (selector & 0x4)
657
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658
        dt = &env->gdt;
659
        index = selector & ~7;
660
        if ((index + 7) > dt->limit)
661
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
662
        ptr = dt->base + index;
663
        e1 = ldl(ptr);
664
        e2 = ldl(ptr + 4);
665
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
666
        if ((e2 & DESC_S_MASK) || 
667
            (type != 2 && type != 9))
668
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669
        if (!(e2 & DESC_P_MASK))
670
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
671
        load_seg_cache(&env->tr, e1, e2);
672
        e2 |= 0x00000200; /* set the busy bit */
673
        stl(ptr + 4, e2);
674
    }
675
    env->tr.selector = selector;
676
}
677

    
678
/* only works if protected mode and not VM86 */
679
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
680
{
681
    SegmentCache *sc;
682
    uint32_t e1, e2;
683
    
684
    sc = &env->segs[seg_reg];
685
    if ((selector & 0xfffc) == 0) {
686
        /* null selector case */
687
        if (seg_reg == R_SS) {
688
            EIP = cur_eip;
689
            raise_exception_err(EXCP0D_GPF, 0);
690
        } else {
691
            /* XXX: each access should trigger an exception */
692
            sc->base = NULL;
693
            sc->limit = 0;
694
            sc->flags = 0;
695
        }
696
    } else {
697
        if (load_segment(&e1, &e2, selector) != 0) {
698
            EIP = cur_eip;
699
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
700
        }
701
        if (!(e2 & DESC_S_MASK) ||
702
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
703
            EIP = cur_eip;
704
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
        }
706

    
707
        if (seg_reg == R_SS) {
708
            if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
709
                EIP = cur_eip;
710
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711
            }
712
        } else {
713
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
714
                EIP = cur_eip;
715
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716
            }
717
        }
718

    
719
        if (!(e2 & DESC_P_MASK)) {
720
            EIP = cur_eip;
721
            if (seg_reg == R_SS)
722
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
723
            else
724
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
725
        }
726
        load_seg_cache(sc, e1, e2);
727
#if 0
728
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
729
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
730
#endif
731
    }
732
    sc->selector = selector;
733
}
734

    
735
/* protected mode jump */
736
void jmp_seg(int selector, unsigned int new_eip)
737
{
738
    SegmentCache sc1;
739
    uint32_t e1, e2, cpl, dpl, rpl;
740

    
741
    if ((selector & 0xfffc) == 0) {
742
        raise_exception_err(EXCP0D_GPF, 0);
743
    }
744

    
745
    if (load_segment(&e1, &e2, selector) != 0)
746
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
747
    cpl = env->segs[R_CS].selector & 3;
748
    if (e2 & DESC_S_MASK) {
749
        if (!(e2 & DESC_CS_MASK))
750
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
751
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752
        if (e2 & DESC_CS_MASK) {
753
            /* conforming code segment */
754
            if (dpl > cpl)
755
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
756
        } else {
757
            /* non conforming code segment */
758
            rpl = selector & 3;
759
            if (rpl > cpl)
760
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
761
            if (dpl != cpl)
762
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763
        }
764
        if (!(e2 & DESC_P_MASK))
765
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
766
        load_seg_cache(&sc1, e1, e2);
767
        if (new_eip > sc1.limit)
768
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769
        env->segs[R_CS].base = sc1.base;
770
        env->segs[R_CS].limit = sc1.limit;
771
        env->segs[R_CS].flags = sc1.flags;
772
        env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
773
        EIP = new_eip;
774
    } else {
775
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
776
                  selector, new_eip);
777
    }
778
}
779

    
780
/* init the segment cache in vm86 mode */
781
static inline void load_seg_vm(int seg, int selector)
782
{
783
    SegmentCache *sc = &env->segs[seg];
784
    selector &= 0xffff;
785
    sc->base = (uint8_t *)(selector << 4);
786
    sc->selector = selector;
787
    sc->flags = 0;
788
    sc->limit = 0xffff;
789
}
790

    
791
/* protected mode iret */
792
void helper_iret_protected(int shift)
793
{
794
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
795
    uint32_t new_es, new_ds, new_fs, new_gs;
796
    uint32_t e1, e2;
797
    int cpl, dpl, rpl, eflags_mask;
798
    uint8_t *ssp;
799
    
800
    sp = env->regs[R_ESP];
801
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
802
        sp &= 0xffff;
803
    ssp = env->segs[R_SS].base + sp;
804
    if (shift == 1) {
805
        /* 32 bits */
806
        new_eflags = ldl(ssp + 8);
807
        new_cs = ldl(ssp + 4) & 0xffff;
808
        new_eip = ldl(ssp);
809
        if (new_eflags & VM_MASK)
810
            goto return_to_vm86;
811
    } else {
812
        /* 16 bits */
813
        new_eflags = lduw(ssp + 4);
814
        new_cs = lduw(ssp + 2);
815
        new_eip = lduw(ssp);
816
    }
817
    if ((new_cs & 0xfffc) == 0)
818
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
819
    if (load_segment(&e1, &e2, new_cs) != 0)
820
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
821
    if (!(e2 & DESC_S_MASK) ||
822
        !(e2 & DESC_CS_MASK))
823
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
824
    cpl = env->segs[R_CS].selector & 3;
825
    rpl = new_cs & 3; 
826
    if (rpl < cpl)
827
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
828
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
829
    if (e2 & DESC_CS_MASK) {
830
        if (dpl > rpl)
831
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
832
    } else {
833
        if (dpl != rpl)
834
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
835
    }
836
    if (!(e2 & DESC_P_MASK))
837
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
838
    
839
    if (rpl == cpl) {
840
        /* return to same priledge level */
841
        load_seg(R_CS, new_cs, env->eip);
842
        new_esp = sp + (6 << shift);
843
    } else {
844
        /* return to differentr priviledge level */
845
        if (shift == 1) {
846
            /* 32 bits */
847
            new_esp = ldl(ssp + 12);
848
            new_ss = ldl(ssp + 16) & 0xffff;
849
        } else {
850
            /* 16 bits */
851
            new_esp = lduw(ssp + 6);
852
            new_ss = lduw(ssp + 8);
853
        }
854
        
855
        if ((new_ss & 3) != rpl)
856
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
857
        if (load_segment(&e1, &e2, new_ss) != 0)
858
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
859
        if (!(e2 & DESC_S_MASK) ||
860
            (e2 & DESC_CS_MASK) ||
861
            !(e2 & DESC_W_MASK))
862
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
863
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
864
        if (dpl != rpl)
865
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
866
        if (!(e2 & DESC_P_MASK))
867
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
868

    
869
        load_seg(R_CS, new_cs, env->eip);
870
        load_seg(R_SS, new_ss, env->eip);
871
    }
872
    if (env->segs[R_SS].flags & DESC_B_MASK)
873
        env->regs[R_ESP] = new_esp;
874
    else
875
        env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
876
            (new_esp & 0xffff);
877
    env->eip = new_eip;
878
    if (cpl == 0)
879
        eflags_mask = FL_UPDATE_CPL0_MASK;
880
    else
881
        eflags_mask = FL_UPDATE_MASK32;
882
    if (shift == 0)
883
        eflags_mask &= 0xffff;
884
    load_eflags(new_eflags, eflags_mask);
885
    return;
886

    
887
 return_to_vm86:
888
    new_esp = ldl(ssp + 12);
889
    new_ss = ldl(ssp + 16);
890
    new_es = ldl(ssp + 20);
891
    new_ds = ldl(ssp + 24);
892
    new_fs = ldl(ssp + 28);
893
    new_gs = ldl(ssp + 32);
894
    
895
    /* modify processor state */
896
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
897
    load_seg_vm(R_CS, new_cs);
898
    load_seg_vm(R_SS, new_ss);
899
    load_seg_vm(R_ES, new_es);
900
    load_seg_vm(R_DS, new_ds);
901
    load_seg_vm(R_FS, new_fs);
902
    load_seg_vm(R_GS, new_gs);
903
    
904
    env->eip = new_eip;
905
    env->regs[R_ESP] = new_esp;
906
}
907

    
908
void helper_movl_crN_T0(int reg)
909
{
910
    env->cr[reg] = T0;
911
    switch(reg) {
912
    case 0:
913
        cpu_x86_update_cr0(env);
914
        break;
915
    case 3:
916
        cpu_x86_update_cr3(env);
917
        break;
918
    }
919
}
920

    
921
/* XXX: do more */
922
void helper_movl_drN_T0(int reg)
923
{
924
    env->dr[reg] = T0;
925
}
926

    
927
void helper_invlpg(unsigned int addr)
928
{
929
    cpu_x86_flush_tlb(env, addr);
930
}
931

    
932
/* rdtsc */
933
#ifndef __i386__
934
uint64_t emu_time;
935
#endif
936

    
937
void helper_rdtsc(void)
938
{
939
    uint64_t val;
940
#ifdef __i386__
941
    asm("rdtsc" : "=A" (val));
942
#else
943
    /* better than nothing: the time increases */
944
    val = emu_time++;
945
#endif
946
    EAX = val;
947
    EDX = val >> 32;
948
}
949

    
950
void helper_lsl(void)
951
{
952
    unsigned int selector, limit;
953
    uint32_t e1, e2;
954

    
955
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
956
    selector = T0 & 0xffff;
957
    if (load_segment(&e1, &e2, selector) != 0)
958
        return;
959
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
960
    if (e2 & (1 << 23))
961
        limit = (limit << 12) | 0xfff;
962
    T1 = limit;
963
    CC_SRC |= CC_Z;
964
}
965

    
966
void helper_lar(void)
967
{
968
    unsigned int selector;
969
    uint32_t e1, e2;
970

    
971
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
972
    selector = T0 & 0xffff;
973
    if (load_segment(&e1, &e2, selector) != 0)
974
        return;
975
    T1 = e2 & 0x00f0ff00;
976
    CC_SRC |= CC_Z;
977
}
978

    
979
/* FPU helpers */
980

    
981
#ifndef USE_X86LDOUBLE
982
void helper_fldt_ST0_A0(void)
983
{
984
    ST0 = helper_fldt((uint8_t *)A0);
985
}
986

    
987
void helper_fstt_ST0_A0(void)
988
{
989
    helper_fstt(ST0, (uint8_t *)A0);
990
}
991
#endif
992

    
993
/* BCD ops */
994

    
995
#define MUL10(iv) ( iv + iv + (iv << 3) )
996

    
997
void helper_fbld_ST0_A0(void)
998
{
999
    uint8_t *seg;
1000
    CPU86_LDouble fpsrcop;
1001
    int m32i;
1002
    unsigned int v;
1003

    
1004
    /* in this code, seg/m32i will be used as temporary ptr/int */
1005
    seg = (uint8_t *)A0 + 8;
1006
    v = ldub(seg--);
1007
    /* XXX: raise exception */
1008
    if (v != 0)
1009
        return;
1010
    v = ldub(seg--);
1011
    /* XXX: raise exception */
1012
    if ((v & 0xf0) != 0)
1013
        return;
1014
    m32i = v;  /* <-- d14 */
1015
    v = ldub(seg--);
1016
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d13 */
1017
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d12 */
1018
    v = ldub(seg--);
1019
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d11 */
1020
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d10 */
1021
    v = ldub(seg--);
1022
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d9 */
1023
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d8 */
1024
    fpsrcop = ((CPU86_LDouble)m32i) * 100000000.0;
1025

    
1026
    v = ldub(seg--);
1027
    m32i = (v >> 4);  /* <-- d7 */
1028
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d6 */
1029
    v = ldub(seg--);
1030
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d5 */
1031
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d4 */
1032
    v = ldub(seg--);
1033
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d3 */
1034
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d2 */
1035
    v = ldub(seg);
1036
    m32i = MUL10(m32i) + (v >> 4);  /* <-- val * 10 + d1 */
1037
    m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d0 */
1038
    fpsrcop += ((CPU86_LDouble)m32i);
1039
    if ( ldub(seg+9) & 0x80 )
1040
        fpsrcop = -fpsrcop;
1041
    ST0 = fpsrcop;
1042
}
1043

    
1044
void helper_fbst_ST0_A0(void)
1045
{
1046
    CPU86_LDouble fptemp;
1047
    CPU86_LDouble fpsrcop;
1048
    int v;
1049
    uint8_t *mem_ref, *mem_end;
1050

    
1051
    fpsrcop = rint(ST0);
1052
    mem_ref = (uint8_t *)A0;
1053
    mem_end = mem_ref + 8;
1054
    if ( fpsrcop < 0.0 ) {
1055
        stw(mem_end, 0x8000);
1056
        fpsrcop = -fpsrcop;
1057
    } else {
1058
        stw(mem_end, 0x0000);
1059
    }
1060
    while (mem_ref < mem_end) {
1061
        if (fpsrcop == 0.0)
1062
            break;
1063
        fptemp = floor(fpsrcop/10.0);
1064
        v = ((int)(fpsrcop - fptemp*10.0));
1065
        if  (fptemp == 0.0)  { 
1066
            stb(mem_ref++, v); 
1067
            break; 
1068
        }
1069
        fpsrcop = fptemp;
1070
        fptemp = floor(fpsrcop/10.0);
1071
        v |= (((int)(fpsrcop - fptemp*10.0)) << 4);
1072
        stb(mem_ref++, v);
1073
        fpsrcop = fptemp;
1074
    }
1075
    while (mem_ref < mem_end) {
1076
        stb(mem_ref++, 0);
1077
    }
1078
}
1079

    
1080
void helper_f2xm1(void)
1081
{
1082
    ST0 = pow(2.0,ST0) - 1.0;
1083
}
1084

    
1085
void helper_fyl2x(void)
1086
{
1087
    CPU86_LDouble fptemp;
1088
    
1089
    fptemp = ST0;
1090
    if (fptemp>0.0){
1091
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1092
        ST1 *= fptemp;
1093
        fpop();
1094
    } else { 
1095
        env->fpus &= (~0x4700);
1096
        env->fpus |= 0x400;
1097
    }
1098
}
1099

    
1100
void helper_fptan(void)
1101
{
1102
    CPU86_LDouble fptemp;
1103

    
1104
    fptemp = ST0;
1105
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1106
        env->fpus |= 0x400;
1107
    } else {
1108
        ST0 = tan(fptemp);
1109
        fpush();
1110
        ST0 = 1.0;
1111
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1112
        /* the above code is for  |arg| < 2**52 only */
1113
    }
1114
}
1115

    
1116
void helper_fpatan(void)
1117
{
1118
    CPU86_LDouble fptemp, fpsrcop;
1119

    
1120
    fpsrcop = ST1;
1121
    fptemp = ST0;
1122
    ST1 = atan2(fpsrcop,fptemp);
1123
    fpop();
1124
}
1125

    
1126
void helper_fxtract(void)
1127
{
1128
    CPU86_LDoubleU temp;
1129
    unsigned int expdif;
1130

    
1131
    temp.d = ST0;
1132
    expdif = EXPD(temp) - EXPBIAS;
1133
    /*DP exponent bias*/
1134
    ST0 = expdif;
1135
    fpush();
1136
    BIASEXPONENT(temp);
1137
    ST0 = temp.d;
1138
}
1139

    
1140
void helper_fprem1(void)
1141
{
1142
    CPU86_LDouble dblq, fpsrcop, fptemp;
1143
    CPU86_LDoubleU fpsrcop1, fptemp1;
1144
    int expdif;
1145
    int q;
1146

    
1147
    fpsrcop = ST0;
1148
    fptemp = ST1;
1149
    fpsrcop1.d = fpsrcop;
1150
    fptemp1.d = fptemp;
1151
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1152
    if (expdif < 53) {
1153
        dblq = fpsrcop / fptemp;
1154
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1155
        ST0 = fpsrcop - fptemp*dblq;
1156
        q = (int)dblq; /* cutting off top bits is assumed here */
1157
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1158
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1159
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1160
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1161
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1162
    } else {
1163
        env->fpus |= 0x400;  /* C2 <-- 1 */
1164
        fptemp = pow(2.0, expdif-50);
1165
        fpsrcop = (ST0 / ST1) / fptemp;
1166
        /* fpsrcop = integer obtained by rounding to the nearest */
1167
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1168
            floor(fpsrcop): ceil(fpsrcop);
1169
        ST0 -= (ST1 * fpsrcop * fptemp);
1170
    }
1171
}
1172

    
1173
void helper_fprem(void)
1174
{
1175
    CPU86_LDouble dblq, fpsrcop, fptemp;
1176
    CPU86_LDoubleU fpsrcop1, fptemp1;
1177
    int expdif;
1178
    int q;
1179
    
1180
    fpsrcop = ST0;
1181
    fptemp = ST1;
1182
    fpsrcop1.d = fpsrcop;
1183
    fptemp1.d = fptemp;
1184
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1185
    if ( expdif < 53 ) {
1186
        dblq = fpsrcop / fptemp;
1187
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1188
        ST0 = fpsrcop - fptemp*dblq;
1189
        q = (int)dblq; /* cutting off top bits is assumed here */
1190
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1191
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1192
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1193
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1194
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1195
    } else {
1196
        env->fpus |= 0x400;  /* C2 <-- 1 */
1197
        fptemp = pow(2.0, expdif-50);
1198
        fpsrcop = (ST0 / ST1) / fptemp;
1199
        /* fpsrcop = integer obtained by chopping */
1200
        fpsrcop = (fpsrcop < 0.0)?
1201
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
1202
        ST0 -= (ST1 * fpsrcop * fptemp);
1203
    }
1204
}
1205

    
1206
void helper_fyl2xp1(void)
1207
{
1208
    CPU86_LDouble fptemp;
1209

    
1210
    fptemp = ST0;
1211
    if ((fptemp+1.0)>0.0) {
1212
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1213
        ST1 *= fptemp;
1214
        fpop();
1215
    } else { 
1216
        env->fpus &= (~0x4700);
1217
        env->fpus |= 0x400;
1218
    }
1219
}
1220

    
1221
void helper_fsqrt(void)
1222
{
1223
    CPU86_LDouble fptemp;
1224

    
1225
    fptemp = ST0;
1226
    if (fptemp<0.0) { 
1227
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1228
        env->fpus |= 0x400;
1229
    }
1230
    ST0 = sqrt(fptemp);
1231
}
1232

    
1233
void helper_fsincos(void)
1234
{
1235
    CPU86_LDouble fptemp;
1236

    
1237
    fptemp = ST0;
1238
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1239
        env->fpus |= 0x400;
1240
    } else {
1241
        ST0 = sin(fptemp);
1242
        fpush();
1243
        ST0 = cos(fptemp);
1244
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1245
        /* the above code is for  |arg| < 2**63 only */
1246
    }
1247
}
1248

    
1249
void helper_frndint(void)
1250
{
1251
    CPU86_LDouble a;
1252

    
1253
    a = ST0;
1254
#ifdef __arm__
1255
    switch(env->fpuc & RC_MASK) {
1256
    default:
1257
    case RC_NEAR:
1258
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
1259
        break;
1260
    case RC_DOWN:
1261
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1262
        break;
1263
    case RC_UP:
1264
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1265
        break;
1266
    case RC_CHOP:
1267
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1268
        break;
1269
    }
1270
#else
1271
    a = rint(a);
1272
#endif
1273
    ST0 = a;
1274
}
1275

    
1276
void helper_fscale(void)
1277
{
1278
    CPU86_LDouble fpsrcop, fptemp;
1279

    
1280
    fpsrcop = 2.0;
1281
    fptemp = pow(fpsrcop,ST1);
1282
    ST0 *= fptemp;
1283
}
1284

    
1285
void helper_fsin(void)
1286
{
1287
    CPU86_LDouble fptemp;
1288

    
1289
    fptemp = ST0;
1290
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1291
        env->fpus |= 0x400;
1292
    } else {
1293
        ST0 = sin(fptemp);
1294
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1295
        /* the above code is for  |arg| < 2**53 only */
1296
    }
1297
}
1298

    
1299
void helper_fcos(void)
1300
{
1301
    CPU86_LDouble fptemp;
1302

    
1303
    fptemp = ST0;
1304
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1305
        env->fpus |= 0x400;
1306
    } else {
1307
        ST0 = cos(fptemp);
1308
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1309
        /* the above code is for  |arg5 < 2**63 only */
1310
    }
1311
}
1312

    
1313
void helper_fxam_ST0(void)
1314
{
1315
    CPU86_LDoubleU temp;
1316
    int expdif;
1317

    
1318
    temp.d = ST0;
1319

    
1320
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1321
    if (SIGND(temp))
1322
        env->fpus |= 0x200; /* C1 <-- 1 */
1323

    
1324
    expdif = EXPD(temp);
1325
    if (expdif == MAXEXPD) {
1326
        if (MANTD(temp) == 0)
1327
            env->fpus |=  0x500 /*Infinity*/;
1328
        else
1329
            env->fpus |=  0x100 /*NaN*/;
1330
    } else if (expdif == 0) {
1331
        if (MANTD(temp) == 0)
1332
            env->fpus |=  0x4000 /*Zero*/;
1333
        else
1334
            env->fpus |= 0x4400 /*Denormal*/;
1335
    } else {
1336
        env->fpus |= 0x400;
1337
    }
1338
}
1339

    
1340
void helper_fstenv(uint8_t *ptr, int data32)
1341
{
1342
    int fpus, fptag, exp, i;
1343
    uint64_t mant;
1344
    CPU86_LDoubleU tmp;
1345

    
1346
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1347
    fptag = 0;
1348
    for (i=7; i>=0; i--) {
1349
        fptag <<= 2;
1350
        if (env->fptags[i]) {
1351
            fptag |= 3;
1352
        } else {
1353
            tmp.d = env->fpregs[i];
1354
            exp = EXPD(tmp);
1355
            mant = MANTD(tmp);
1356
            if (exp == 0 && mant == 0) {
1357
                /* zero */
1358
                fptag |= 1;
1359
            } else if (exp == 0 || exp == MAXEXPD
1360
#ifdef USE_X86LDOUBLE
1361
                       || (mant & (1LL << 63)) == 0
1362
#endif
1363
                       ) {
1364
                /* NaNs, infinity, denormal */
1365
                fptag |= 2;
1366
            }
1367
        }
1368
    }
1369
    if (data32) {
1370
        /* 32 bit */
1371
        stl(ptr, env->fpuc);
1372
        stl(ptr + 4, fpus);
1373
        stl(ptr + 8, fptag);
1374
        stl(ptr + 12, 0);
1375
        stl(ptr + 16, 0);
1376
        stl(ptr + 20, 0);
1377
        stl(ptr + 24, 0);
1378
    } else {
1379
        /* 16 bit */
1380
        stw(ptr, env->fpuc);
1381
        stw(ptr + 2, fpus);
1382
        stw(ptr + 4, fptag);
1383
        stw(ptr + 6, 0);
1384
        stw(ptr + 8, 0);
1385
        stw(ptr + 10, 0);
1386
        stw(ptr + 12, 0);
1387
    }
1388
}
1389

    
1390
void helper_fldenv(uint8_t *ptr, int data32)
1391
{
1392
    int i, fpus, fptag;
1393

    
1394
    if (data32) {
1395
        env->fpuc = lduw(ptr);
1396
        fpus = lduw(ptr + 4);
1397
        fptag = lduw(ptr + 8);
1398
    }
1399
    else {
1400
        env->fpuc = lduw(ptr);
1401
        fpus = lduw(ptr + 2);
1402
        fptag = lduw(ptr + 4);
1403
    }
1404
    env->fpstt = (fpus >> 11) & 7;
1405
    env->fpus = fpus & ~0x3800;
1406
    for(i = 0;i < 7; i++) {
1407
        env->fptags[i] = ((fptag & 3) == 3);
1408
        fptag >>= 2;
1409
    }
1410
}
1411

    
1412
void helper_fsave(uint8_t *ptr, int data32)
1413
{
1414
    CPU86_LDouble tmp;
1415
    int i;
1416

    
1417
    helper_fstenv(ptr, data32);
1418

    
1419
    ptr += (14 << data32);
1420
    for(i = 0;i < 8; i++) {
1421
        tmp = ST(i);
1422
#ifdef USE_X86LDOUBLE
1423
        *(long double *)ptr = tmp;
1424
#else
1425
        helper_fstt(tmp, ptr);
1426
#endif        
1427
        ptr += 10;
1428
    }
1429

    
1430
    /* fninit */
1431
    env->fpus = 0;
1432
    env->fpstt = 0;
1433
    env->fpuc = 0x37f;
1434
    env->fptags[0] = 1;
1435
    env->fptags[1] = 1;
1436
    env->fptags[2] = 1;
1437
    env->fptags[3] = 1;
1438
    env->fptags[4] = 1;
1439
    env->fptags[5] = 1;
1440
    env->fptags[6] = 1;
1441
    env->fptags[7] = 1;
1442
}
1443

    
1444
void helper_frstor(uint8_t *ptr, int data32)
1445
{
1446
    CPU86_LDouble tmp;
1447
    int i;
1448

    
1449
    helper_fldenv(ptr, data32);
1450
    ptr += (14 << data32);
1451

    
1452
    for(i = 0;i < 8; i++) {
1453
#ifdef USE_X86LDOUBLE
1454
        tmp = *(long double *)ptr;
1455
#else
1456
        tmp = helper_fldt(ptr);
1457
#endif        
1458
        ST(i) = tmp;
1459
        ptr += 10;
1460
    }
1461
}
1462