Statistics
| Branch: | Revision:

root / helper-i386.c @ a412ac57

History | View | Annotate | Download (37.8 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec-i386.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
134
#if 0
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

    
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw(env->tr.base + index);
157
        *ss_ptr = lduw(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl(env->tr.base + index);
160
        *ss_ptr = lduw(env->tr.base + index + 4);
161
    }
162
}
163

    
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
167
{
168
    SegmentCache *dt;
169
    int index;
170
    uint8_t *ptr;
171

    
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl(ptr);
181
    *e2_ptr = ldl(ptr + 4);
182
    return 0;
183
}
184
                                     
185

    
186
/* protected mode interrupt */
187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                      unsigned int next_eip)
189
{
190
    SegmentCache *dt;
191
    uint8_t *ptr, *ssp;
192
    int type, dpl, cpl, selector, ss_dpl;
193
    int has_error_code, new_stack, shift;
194
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195
    uint32_t old_cs, old_ss, old_esp, old_eip;
196

    
197
    dt = &env->idt;
198
    if (intno * 8 + 7 > dt->limit)
199
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
200
    ptr = dt->base + intno * 8;
201
    e1 = ldl(ptr);
202
    e2 = ldl(ptr + 4);
203
    /* check gate type */
204
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
205
    switch(type) {
206
    case 5: /* task gate */
207
        cpu_abort(env, "task gate not supported");
208
        break;
209
    case 6: /* 286 interrupt gate */
210
    case 7: /* 286 trap gate */
211
    case 14: /* 386 interrupt gate */
212
    case 15: /* 386 trap gate */
213
        break;
214
    default:
215
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
216
        break;
217
    }
218
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219
    if (env->eflags & VM_MASK)
220
        cpl = 3;
221
    else
222
        cpl = env->segs[R_CS].selector & 3;
223
    /* check privledge if software int */
224
    if (is_int && dpl < cpl)
225
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
226
    /* check valid bit */
227
    if (!(e2 & DESC_P_MASK))
228
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
229
    selector = e1 >> 16;
230
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
231
    if ((selector & 0xfffc) == 0)
232
        raise_exception_err(EXCP0D_GPF, 0);
233

    
234
    if (load_segment(&e1, &e2, selector) != 0)
235
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
236
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
237
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
239
    if (dpl > cpl)
240
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
241
    if (!(e2 & DESC_P_MASK))
242
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
243
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
244
        /* to inner priviledge */
245
        get_ss_esp_from_tss(&ss, &esp, dpl);
246
        if ((ss & 0xfffc) == 0)
247
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
248
        if ((ss & 3) != dpl)
249
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
250
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
251
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
253
        if (ss_dpl != dpl)
254
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
255
        if (!(ss_e2 & DESC_S_MASK) ||
256
            (ss_e2 & DESC_CS_MASK) ||
257
            !(ss_e2 & DESC_W_MASK))
258
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
259
        if (!(ss_e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
261
        new_stack = 1;
262
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
263
        /* to same priviledge */
264
        new_stack = 0;
265
    } else {
266
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
267
        new_stack = 0; /* avoid warning */
268
    }
269

    
270
    shift = type >> 3;
271
    has_error_code = 0;
272
    if (!is_int) {
273
        switch(intno) {
274
        case 8:
275
        case 10:
276
        case 11:
277
        case 12:
278
        case 13:
279
        case 14:
280
        case 17:
281
            has_error_code = 1;
282
            break;
283
        }
284
    }
285
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
286
    if (env->eflags & VM_MASK)
287
        push_size += 8;
288
    push_size <<= shift;
289

    
290
    /* XXX: check that enough room is available */
291
    if (new_stack) {
292
        old_esp = env->regs[R_ESP];
293
        old_ss = env->segs[R_SS].selector;
294
        load_seg(R_SS, ss, env->eip);
295
    } else {
296
        old_esp = 0;
297
        old_ss = 0;
298
        esp = env->regs[R_ESP];
299
    }
300
    if (is_int)
301
        old_eip = next_eip;
302
    else
303
        old_eip = env->eip;
304
    old_cs = env->segs[R_CS].selector;
305
    load_seg(R_CS, selector, env->eip);
306
    env->eip = offset;
307
    env->regs[R_ESP] = esp - push_size;
308
    ssp = env->segs[R_SS].base + esp;
309
    if (shift == 1) {
310
        int old_eflags;
311
        if (env->eflags & VM_MASK) {
312
            ssp -= 4;
313
            stl(ssp, env->segs[R_GS].selector);
314
            ssp -= 4;
315
            stl(ssp, env->segs[R_FS].selector);
316
            ssp -= 4;
317
            stl(ssp, env->segs[R_DS].selector);
318
            ssp -= 4;
319
            stl(ssp, env->segs[R_ES].selector);
320
        }
321
        if (new_stack) {
322
            ssp -= 4;
323
            stl(ssp, old_ss);
324
            ssp -= 4;
325
            stl(ssp, old_esp);
326
        }
327
        ssp -= 4;
328
        old_eflags = compute_eflags();
329
        stl(ssp, old_eflags);
330
        ssp -= 4;
331
        stl(ssp, old_cs);
332
        ssp -= 4;
333
        stl(ssp, old_eip);
334
        if (has_error_code) {
335
            ssp -= 4;
336
            stl(ssp, error_code);
337
        }
338
    } else {
339
        if (new_stack) {
340
            ssp -= 2;
341
            stw(ssp, old_ss);
342
            ssp -= 2;
343
            stw(ssp, old_esp);
344
        }
345
        ssp -= 2;
346
        stw(ssp, compute_eflags());
347
        ssp -= 2;
348
        stw(ssp, old_cs);
349
        ssp -= 2;
350
        stw(ssp, old_eip);
351
        if (has_error_code) {
352
            ssp -= 2;
353
            stw(ssp, error_code);
354
        }
355
    }
356
    
357
    /* interrupt gate clear IF mask */
358
    if ((type & 1) == 0) {
359
        env->eflags &= ~IF_MASK;
360
    }
361
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
362
}
363

    
364
/* real mode interrupt */
365
static void do_interrupt_real(int intno, int is_int, int error_code,
366
                                 unsigned int next_eip)
367
{
368
    SegmentCache *dt;
369
    uint8_t *ptr, *ssp;
370
    int selector;
371
    uint32_t offset, esp;
372
    uint32_t old_cs, old_eip;
373

    
374
    /* real mode (simpler !) */
375
    dt = &env->idt;
376
    if (intno * 4 + 3 > dt->limit)
377
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
378
    ptr = dt->base + intno * 4;
379
    offset = lduw(ptr);
380
    selector = lduw(ptr + 2);
381
    esp = env->regs[R_ESP] & 0xffff;
382
    ssp = env->segs[R_SS].base + esp;
383
    if (is_int)
384
        old_eip = next_eip;
385
    else
386
        old_eip = env->eip;
387
    old_cs = env->segs[R_CS].selector;
388
    ssp -= 2;
389
    stw(ssp, compute_eflags());
390
    ssp -= 2;
391
    stw(ssp, old_cs);
392
    ssp -= 2;
393
    stw(ssp, old_eip);
394
    esp -= 6;
395
    
396
    /* update processor state */
397
    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
398
    env->eip = offset;
399
    env->segs[R_CS].selector = selector;
400
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
401
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
402
}
403

    
404
/* fake user mode interrupt */
405
void do_interrupt_user(int intno, int is_int, int error_code, 
406
                       unsigned int next_eip)
407
{
408
    SegmentCache *dt;
409
    uint8_t *ptr;
410
    int dpl, cpl;
411
    uint32_t e2;
412

    
413
    dt = &env->idt;
414
    ptr = dt->base + (intno * 8);
415
    e2 = ldl(ptr + 4);
416
    
417
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
418
    cpl = 3;
419
    /* check privledge if software int */
420
    if (is_int && dpl < cpl)
421
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
422

    
423
    /* Since we emulate only user space, we cannot do more than
424
       exiting the emulation with the suitable exception and error
425
       code */
426
    if (is_int)
427
        EIP = next_eip;
428
}
429

    
430
/*
431
 * Begin excution of an interruption. is_int is TRUE if coming from
432
 * the int instruction. next_eip is the EIP value AFTER the interrupt
433
 * instruction. It is only relevant if is_int is TRUE.  
434
 */
435
void do_interrupt(int intno, int is_int, int error_code, 
436
                  unsigned int next_eip)
437
{
438
    if (env->cr[0] & CR0_PE_MASK) {
439
        do_interrupt_protected(intno, is_int, error_code, next_eip);
440
    } else {
441
        do_interrupt_real(intno, is_int, error_code, next_eip);
442
    }
443
}
444

    
445
/*
446
 * Signal an interruption. It is executed in the main CPU loop.
447
 * is_int is TRUE if coming from the int instruction. next_eip is the
448
 * EIP value AFTER the interrupt instruction. It is only relevant if
449
 * is_int is TRUE.  
450
 */
451
void raise_interrupt(int intno, int is_int, int error_code, 
452
                     unsigned int next_eip)
453
{
454
    env->exception_index = intno;
455
    env->error_code = error_code;
456
    env->exception_is_int = is_int;
457
    env->exception_next_eip = next_eip;
458
    cpu_loop_exit();
459
}
460

    
461
/* shortcuts to generate exceptions */
462
void raise_exception_err(int exception_index, int error_code)
463
{
464
    raise_interrupt(exception_index, 0, error_code, 0);
465
}
466

    
467
void raise_exception(int exception_index)
468
{
469
    raise_interrupt(exception_index, 0, 0, 0);
470
}
471

    
472
#ifdef BUGGY_GCC_DIV64
473
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
474
   call it from another function */
475
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
476
{
477
    *q_ptr = num / den;
478
    return num % den;
479
}
480

    
481
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
482
{
483
    *q_ptr = num / den;
484
    return num % den;
485
}
486
#endif
487

    
488
void helper_divl_EAX_T0(uint32_t eip)
489
{
490
    unsigned int den, q, r;
491
    uint64_t num;
492
    
493
    num = EAX | ((uint64_t)EDX << 32);
494
    den = T0;
495
    if (den == 0) {
496
        EIP = eip;
497
        raise_exception(EXCP00_DIVZ);
498
    }
499
#ifdef BUGGY_GCC_DIV64
500
    r = div64(&q, num, den);
501
#else
502
    q = (num / den);
503
    r = (num % den);
504
#endif
505
    EAX = q;
506
    EDX = r;
507
}
508

    
509
void helper_idivl_EAX_T0(uint32_t eip)
510
{
511
    int den, q, r;
512
    int64_t num;
513
    
514
    num = EAX | ((uint64_t)EDX << 32);
515
    den = T0;
516
    if (den == 0) {
517
        EIP = eip;
518
        raise_exception(EXCP00_DIVZ);
519
    }
520
#ifdef BUGGY_GCC_DIV64
521
    r = idiv64(&q, num, den);
522
#else
523
    q = (num / den);
524
    r = (num % den);
525
#endif
526
    EAX = q;
527
    EDX = r;
528
}
529

    
530
void helper_cmpxchg8b(void)
531
{
532
    uint64_t d;
533
    int eflags;
534

    
535
    eflags = cc_table[CC_OP].compute_all();
536
    d = ldq((uint8_t *)A0);
537
    if (d == (((uint64_t)EDX << 32) | EAX)) {
538
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
539
        eflags |= CC_Z;
540
    } else {
541
        EDX = d >> 32;
542
        EAX = d;
543
        eflags &= ~CC_Z;
544
    }
545
    CC_SRC = eflags;
546
}
547

    
548
/* We simulate a pre-MMX pentium as in valgrind */
549
#define CPUID_FP87 (1 << 0)
550
#define CPUID_VME  (1 << 1)
551
#define CPUID_DE   (1 << 2)
552
#define CPUID_PSE  (1 << 3)
553
#define CPUID_TSC  (1 << 4)
554
#define CPUID_MSR  (1 << 5)
555
#define CPUID_PAE  (1 << 6)
556
#define CPUID_MCE  (1 << 7)
557
#define CPUID_CX8  (1 << 8)
558
#define CPUID_APIC (1 << 9)
559
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
560
#define CPUID_MTRR (1 << 12)
561
#define CPUID_PGE  (1 << 13)
562
#define CPUID_MCA  (1 << 14)
563
#define CPUID_CMOV (1 << 15)
564
/* ... */
565
#define CPUID_MMX  (1 << 23)
566
#define CPUID_FXSR (1 << 24)
567
#define CPUID_SSE  (1 << 25)
568
#define CPUID_SSE2 (1 << 26)
569

    
570
void helper_cpuid(void)
571
{
572
    if (EAX == 0) {
573
        EAX = 1; /* max EAX index supported */
574
        EBX = 0x756e6547;
575
        ECX = 0x6c65746e;
576
        EDX = 0x49656e69;
577
    } else if (EAX == 1) {
578
        int family, model, stepping;
579
        /* EAX = 1 info */
580
#if 0
581
        /* pentium 75-200 */
582
        family = 5;
583
        model = 2;
584
        stepping = 11;
585
#else
586
        /* pentium pro */
587
        family = 6;
588
        model = 1;
589
        stepping = 3;
590
#endif
591
        EAX = (family << 8) | (model << 4) | stepping;
592
        EBX = 0;
593
        ECX = 0;
594
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
595
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
596
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
597
    }
598
}
599

    
600
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
601
{
602
    sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
603
    sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
604
    if (e2 & DESC_G_MASK)
605
        sc->limit = (sc->limit << 12) | 0xfff;
606
    sc->flags = e2;
607
}
608

    
609
void helper_lldt_T0(void)
610
{
611
    int selector;
612
    SegmentCache *dt;
613
    uint32_t e1, e2;
614
    int index;
615
    uint8_t *ptr;
616
    
617
    selector = T0 & 0xffff;
618
    if ((selector & 0xfffc) == 0) {
619
        /* XXX: NULL selector case: invalid LDT */
620
        env->ldt.base = NULL;
621
        env->ldt.limit = 0;
622
    } else {
623
        if (selector & 0x4)
624
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
625
        dt = &env->gdt;
626
        index = selector & ~7;
627
        if ((index + 7) > dt->limit)
628
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
629
        ptr = dt->base + index;
630
        e1 = ldl(ptr);
631
        e2 = ldl(ptr + 4);
632
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
633
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
634
        if (!(e2 & DESC_P_MASK))
635
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
636
        load_seg_cache(&env->ldt, e1, e2);
637
    }
638
    env->ldt.selector = selector;
639
}
640

    
641
void helper_ltr_T0(void)
642
{
643
    int selector;
644
    SegmentCache *dt;
645
    uint32_t e1, e2;
646
    int index, type;
647
    uint8_t *ptr;
648
    
649
    selector = T0 & 0xffff;
650
    if ((selector & 0xfffc) == 0) {
651
        /* NULL selector case: invalid LDT */
652
        env->tr.base = NULL;
653
        env->tr.limit = 0;
654
        env->tr.flags = 0;
655
    } else {
656
        if (selector & 0x4)
657
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658
        dt = &env->gdt;
659
        index = selector & ~7;
660
        if ((index + 7) > dt->limit)
661
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
662
        ptr = dt->base + index;
663
        e1 = ldl(ptr);
664
        e2 = ldl(ptr + 4);
665
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
666
        if ((e2 & DESC_S_MASK) || 
667
            (type != 2 && type != 9))
668
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669
        if (!(e2 & DESC_P_MASK))
670
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
671
        load_seg_cache(&env->tr, e1, e2);
672
        e2 |= 0x00000200; /* set the busy bit */
673
        stl(ptr + 4, e2);
674
    }
675
    env->tr.selector = selector;
676
}
677

    
678
/* only works if protected mode and not VM86 */
679
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
680
{
681
    SegmentCache *sc;
682
    uint32_t e1, e2;
683
    
684
    sc = &env->segs[seg_reg];
685
    if ((selector & 0xfffc) == 0) {
686
        /* null selector case */
687
        if (seg_reg == R_SS) {
688
            EIP = cur_eip;
689
            raise_exception_err(EXCP0D_GPF, 0);
690
        } else {
691
            /* XXX: each access should trigger an exception */
692
            sc->base = NULL;
693
            sc->limit = 0;
694
            sc->flags = 0;
695
        }
696
    } else {
697
        if (load_segment(&e1, &e2, selector) != 0) {
698
            EIP = cur_eip;
699
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
700
        }
701
        if (!(e2 & DESC_S_MASK) ||
702
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
703
            EIP = cur_eip;
704
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
        }
706

    
707
        if (seg_reg == R_SS) {
708
            if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
709
                EIP = cur_eip;
710
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711
            }
712
        } else {
713
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
714
                EIP = cur_eip;
715
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716
            }
717
        }
718

    
719
        if (!(e2 & DESC_P_MASK)) {
720
            EIP = cur_eip;
721
            if (seg_reg == R_SS)
722
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
723
            else
724
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
725
        }
726
        load_seg_cache(sc, e1, e2);
727
#if 0
728
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
729
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
730
#endif
731
    }
732
    sc->selector = selector;
733
}
734

    
735
/* protected mode jump */
736
void jmp_seg(int selector, unsigned int new_eip)
737
{
738
    SegmentCache sc1;
739
    uint32_t e1, e2, cpl, dpl, rpl;
740

    
741
    if ((selector & 0xfffc) == 0) {
742
        raise_exception_err(EXCP0D_GPF, 0);
743
    }
744

    
745
    if (load_segment(&e1, &e2, selector) != 0)
746
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
747
    cpl = env->segs[R_CS].selector & 3;
748
    if (e2 & DESC_S_MASK) {
749
        if (!(e2 & DESC_CS_MASK))
750
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
751
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752
        if (e2 & DESC_CS_MASK) {
753
            /* conforming code segment */
754
            if (dpl > cpl)
755
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
756
        } else {
757
            /* non conforming code segment */
758
            rpl = selector & 3;
759
            if (rpl > cpl)
760
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
761
            if (dpl != cpl)
762
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763
        }
764
        if (!(e2 & DESC_P_MASK))
765
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
766
        load_seg_cache(&sc1, e1, e2);
767
        if (new_eip > sc1.limit)
768
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769
        env->segs[R_CS].base = sc1.base;
770
        env->segs[R_CS].limit = sc1.limit;
771
        env->segs[R_CS].flags = sc1.flags;
772
        env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
773
        EIP = new_eip;
774
    } else {
775
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
776
                  selector, new_eip);
777
    }
778
}
779

    
780
/* init the segment cache in vm86 mode */
781
static inline void load_seg_vm(int seg, int selector)
782
{
783
    SegmentCache *sc = &env->segs[seg];
784
    selector &= 0xffff;
785
    sc->base = (uint8_t *)(selector << 4);
786
    sc->selector = selector;
787
    sc->flags = 0;
788
    sc->limit = 0xffff;
789
}
790

    
791
/* real mode iret */
792
void helper_iret_real(int shift)
793
{
794
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
795
    uint8_t *ssp;
796
    int eflags_mask;
797
    
798
    sp = env->regs[R_ESP] & 0xffff;
799
    ssp = env->segs[R_SS].base + sp;
800
    if (shift == 1) {
801
        /* 32 bits */
802
        new_eflags = ldl(ssp + 8);
803
        new_cs = ldl(ssp + 4) & 0xffff;
804
        new_eip = ldl(ssp) & 0xffff;
805
    } else {
806
        /* 16 bits */
807
        new_eflags = lduw(ssp + 4);
808
        new_cs = lduw(ssp + 2);
809
        new_eip = lduw(ssp);
810
    }
811
    new_esp = sp + (6 << shift);
812
    env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
813
        (new_esp & 0xffff);
814
    load_seg_vm(R_CS, new_cs);
815
    env->eip = new_eip;
816
    eflags_mask = FL_UPDATE_CPL0_MASK;
817
    if (shift == 0)
818
        eflags_mask &= 0xffff;
819
    load_eflags(new_eflags, eflags_mask);
820
}
821

    
822
/* protected mode iret */
823
void helper_iret_protected(int shift)
824
{
825
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
826
    uint32_t new_es, new_ds, new_fs, new_gs;
827
    uint32_t e1, e2;
828
    int cpl, dpl, rpl, eflags_mask;
829
    uint8_t *ssp;
830
    
831
    sp = env->regs[R_ESP];
832
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
833
        sp &= 0xffff;
834
    ssp = env->segs[R_SS].base + sp;
835
    if (shift == 1) {
836
        /* 32 bits */
837
        new_eflags = ldl(ssp + 8);
838
        new_cs = ldl(ssp + 4) & 0xffff;
839
        new_eip = ldl(ssp);
840
        if (new_eflags & VM_MASK)
841
            goto return_to_vm86;
842
    } else {
843
        /* 16 bits */
844
        new_eflags = lduw(ssp + 4);
845
        new_cs = lduw(ssp + 2);
846
        new_eip = lduw(ssp);
847
    }
848
    if ((new_cs & 0xfffc) == 0)
849
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
850
    if (load_segment(&e1, &e2, new_cs) != 0)
851
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
852
    if (!(e2 & DESC_S_MASK) ||
853
        !(e2 & DESC_CS_MASK))
854
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
855
    cpl = env->segs[R_CS].selector & 3;
856
    rpl = new_cs & 3; 
857
    if (rpl < cpl)
858
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
859
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
860
    if (e2 & DESC_CS_MASK) {
861
        if (dpl > rpl)
862
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
863
    } else {
864
        if (dpl != rpl)
865
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
866
    }
867
    if (!(e2 & DESC_P_MASK))
868
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
869
    
870
    if (rpl == cpl) {
871
        /* return to same priledge level */
872
        load_seg(R_CS, new_cs, env->eip);
873
        new_esp = sp + (6 << shift);
874
    } else {
875
        /* return to differentr priviledge level */
876
        if (shift == 1) {
877
            /* 32 bits */
878
            new_esp = ldl(ssp + 12);
879
            new_ss = ldl(ssp + 16) & 0xffff;
880
        } else {
881
            /* 16 bits */
882
            new_esp = lduw(ssp + 6);
883
            new_ss = lduw(ssp + 8);
884
        }
885
        
886
        if ((new_ss & 3) != rpl)
887
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
888
        if (load_segment(&e1, &e2, new_ss) != 0)
889
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
890
        if (!(e2 & DESC_S_MASK) ||
891
            (e2 & DESC_CS_MASK) ||
892
            !(e2 & DESC_W_MASK))
893
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
894
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895
        if (dpl != rpl)
896
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
897
        if (!(e2 & DESC_P_MASK))
898
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
899

    
900
        load_seg(R_CS, new_cs, env->eip);
901
        load_seg(R_SS, new_ss, env->eip);
902
    }
903
    if (env->segs[R_SS].flags & DESC_B_MASK)
904
        env->regs[R_ESP] = new_esp;
905
    else
906
        env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
907
            (new_esp & 0xffff);
908
    env->eip = new_eip;
909
    if (cpl == 0)
910
        eflags_mask = FL_UPDATE_CPL0_MASK;
911
    else
912
        eflags_mask = FL_UPDATE_MASK32;
913
    if (shift == 0)
914
        eflags_mask &= 0xffff;
915
    load_eflags(new_eflags, eflags_mask);
916
    return;
917

    
918
 return_to_vm86:
919
    new_esp = ldl(ssp + 12);
920
    new_ss = ldl(ssp + 16);
921
    new_es = ldl(ssp + 20);
922
    new_ds = ldl(ssp + 24);
923
    new_fs = ldl(ssp + 28);
924
    new_gs = ldl(ssp + 32);
925
    
926
    /* modify processor state */
927
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
928
    load_seg_vm(R_CS, new_cs);
929
    load_seg_vm(R_SS, new_ss);
930
    load_seg_vm(R_ES, new_es);
931
    load_seg_vm(R_DS, new_ds);
932
    load_seg_vm(R_FS, new_fs);
933
    load_seg_vm(R_GS, new_gs);
934
    
935
    env->eip = new_eip;
936
    env->regs[R_ESP] = new_esp;
937
}
938

    
939
void helper_movl_crN_T0(int reg)
940
{
941
    env->cr[reg] = T0;
942
    switch(reg) {
943
    case 0:
944
        cpu_x86_update_cr0(env);
945
        break;
946
    case 3:
947
        cpu_x86_update_cr3(env);
948
        break;
949
    }
950
}
951

    
952
/* XXX: do more */
953
void helper_movl_drN_T0(int reg)
954
{
955
    env->dr[reg] = T0;
956
}
957

    
958
void helper_invlpg(unsigned int addr)
959
{
960
    cpu_x86_flush_tlb(env, addr);
961
}
962

    
963
/* rdtsc */
964
#ifndef __i386__
965
uint64_t emu_time;
966
#endif
967

    
968
void helper_rdtsc(void)
969
{
970
    uint64_t val;
971
#ifdef __i386__
972
    asm("rdtsc" : "=A" (val));
973
#else
974
    /* better than nothing: the time increases */
975
    val = emu_time++;
976
#endif
977
    EAX = val;
978
    EDX = val >> 32;
979
}
980

    
981
void helper_wrmsr(void)
982
{
983
    switch(ECX) {
984
    case MSR_IA32_SYSENTER_CS:
985
        env->sysenter_cs = EAX & 0xffff;
986
        break;
987
    case MSR_IA32_SYSENTER_ESP:
988
        env->sysenter_esp = EAX;
989
        break;
990
    case MSR_IA32_SYSENTER_EIP:
991
        env->sysenter_eip = EAX;
992
        break;
993
    default:
994
        /* XXX: exception ? */
995
        break; 
996
    }
997
}
998

    
999
void helper_rdmsr(void)
1000
{
1001
    switch(ECX) {
1002
    case MSR_IA32_SYSENTER_CS:
1003
        EAX = env->sysenter_cs;
1004
        EDX = 0;
1005
        break;
1006
    case MSR_IA32_SYSENTER_ESP:
1007
        EAX = env->sysenter_esp;
1008
        EDX = 0;
1009
        break;
1010
    case MSR_IA32_SYSENTER_EIP:
1011
        EAX = env->sysenter_eip;
1012
        EDX = 0;
1013
        break;
1014
    default:
1015
        /* XXX: exception ? */
1016
        break; 
1017
    }
1018
}
1019

    
1020
void helper_lsl(void)
1021
{
1022
    unsigned int selector, limit;
1023
    uint32_t e1, e2;
1024

    
1025
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1026
    selector = T0 & 0xffff;
1027
    if (load_segment(&e1, &e2, selector) != 0)
1028
        return;
1029
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1030
    if (e2 & (1 << 23))
1031
        limit = (limit << 12) | 0xfff;
1032
    T1 = limit;
1033
    CC_SRC |= CC_Z;
1034
}
1035

    
1036
void helper_lar(void)
1037
{
1038
    unsigned int selector;
1039
    uint32_t e1, e2;
1040

    
1041
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1042
    selector = T0 & 0xffff;
1043
    if (load_segment(&e1, &e2, selector) != 0)
1044
        return;
1045
    T1 = e2 & 0x00f0ff00;
1046
    CC_SRC |= CC_Z;
1047
}
1048

    
1049
/* FPU helpers */
1050

    
1051
#ifndef USE_X86LDOUBLE
1052
void helper_fldt_ST0_A0(void)
1053
{
1054
    int new_fpstt;
1055
    new_fpstt = (env->fpstt - 1) & 7;
1056
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1057
    env->fpstt = new_fpstt;
1058
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1059
}
1060

    
1061
void helper_fstt_ST0_A0(void)
1062
{
1063
    helper_fstt(ST0, (uint8_t *)A0);
1064
}
1065
#endif
1066

    
1067
/* BCD ops */
1068

    
1069
#define MUL10(iv) ( iv + iv + (iv << 3) )
1070

    
1071
void helper_fbld_ST0_A0(void)
1072
{
1073
    CPU86_LDouble tmp;
1074
    uint64_t val;
1075
    unsigned int v;
1076
    int i;
1077

    
1078
    val = 0;
1079
    for(i = 8; i >= 0; i--) {
1080
        v = ldub((uint8_t *)A0 + i);
1081
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1082
    }
1083
    tmp = val;
1084
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1085
        tmp = -tmp;
1086
    fpush();
1087
    ST0 = tmp;
1088
}
1089

    
1090
void helper_fbst_ST0_A0(void)
1091
{
1092
    CPU86_LDouble tmp;
1093
    int v;
1094
    uint8_t *mem_ref, *mem_end;
1095
    int64_t val;
1096

    
1097
    tmp = rint(ST0);
1098
    val = (int64_t)tmp;
1099
    mem_ref = (uint8_t *)A0;
1100
    mem_end = mem_ref + 9;
1101
    if (val < 0) {
1102
        stb(mem_end, 0x80);
1103
        val = -val;
1104
    } else {
1105
        stb(mem_end, 0x00);
1106
    }
1107
    while (mem_ref < mem_end) {
1108
        if (val == 0)
1109
            break;
1110
        v = val % 100;
1111
        val = val / 100;
1112
        v = ((v / 10) << 4) | (v % 10);
1113
        stb(mem_ref++, v);
1114
    }
1115
    while (mem_ref < mem_end) {
1116
        stb(mem_ref++, 0);
1117
    }
1118
}
1119

    
1120
void helper_f2xm1(void)
1121
{
1122
    ST0 = pow(2.0,ST0) - 1.0;
1123
}
1124

    
1125
void helper_fyl2x(void)
1126
{
1127
    CPU86_LDouble fptemp;
1128
    
1129
    fptemp = ST0;
1130
    if (fptemp>0.0){
1131
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1132
        ST1 *= fptemp;
1133
        fpop();
1134
    } else { 
1135
        env->fpus &= (~0x4700);
1136
        env->fpus |= 0x400;
1137
    }
1138
}
1139

    
1140
void helper_fptan(void)
1141
{
1142
    CPU86_LDouble fptemp;
1143

    
1144
    fptemp = ST0;
1145
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1146
        env->fpus |= 0x400;
1147
    } else {
1148
        ST0 = tan(fptemp);
1149
        fpush();
1150
        ST0 = 1.0;
1151
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1152
        /* the above code is for  |arg| < 2**52 only */
1153
    }
1154
}
1155

    
1156
void helper_fpatan(void)
1157
{
1158
    CPU86_LDouble fptemp, fpsrcop;
1159

    
1160
    fpsrcop = ST1;
1161
    fptemp = ST0;
1162
    ST1 = atan2(fpsrcop,fptemp);
1163
    fpop();
1164
}
1165

    
1166
void helper_fxtract(void)
1167
{
1168
    CPU86_LDoubleU temp;
1169
    unsigned int expdif;
1170

    
1171
    temp.d = ST0;
1172
    expdif = EXPD(temp) - EXPBIAS;
1173
    /*DP exponent bias*/
1174
    ST0 = expdif;
1175
    fpush();
1176
    BIASEXPONENT(temp);
1177
    ST0 = temp.d;
1178
}
1179

    
1180
void helper_fprem1(void)
1181
{
1182
    CPU86_LDouble dblq, fpsrcop, fptemp;
1183
    CPU86_LDoubleU fpsrcop1, fptemp1;
1184
    int expdif;
1185
    int q;
1186

    
1187
    fpsrcop = ST0;
1188
    fptemp = ST1;
1189
    fpsrcop1.d = fpsrcop;
1190
    fptemp1.d = fptemp;
1191
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1192
    if (expdif < 53) {
1193
        dblq = fpsrcop / fptemp;
1194
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1195
        ST0 = fpsrcop - fptemp*dblq;
1196
        q = (int)dblq; /* cutting off top bits is assumed here */
1197
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1198
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1199
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1200
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1201
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1202
    } else {
1203
        env->fpus |= 0x400;  /* C2 <-- 1 */
1204
        fptemp = pow(2.0, expdif-50);
1205
        fpsrcop = (ST0 / ST1) / fptemp;
1206
        /* fpsrcop = integer obtained by rounding to the nearest */
1207
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1208
            floor(fpsrcop): ceil(fpsrcop);
1209
        ST0 -= (ST1 * fpsrcop * fptemp);
1210
    }
1211
}
1212

    
1213
void helper_fprem(void)
1214
{
1215
    CPU86_LDouble dblq, fpsrcop, fptemp;
1216
    CPU86_LDoubleU fpsrcop1, fptemp1;
1217
    int expdif;
1218
    int q;
1219
    
1220
    fpsrcop = ST0;
1221
    fptemp = ST1;
1222
    fpsrcop1.d = fpsrcop;
1223
    fptemp1.d = fptemp;
1224
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1225
    if ( expdif < 53 ) {
1226
        dblq = fpsrcop / fptemp;
1227
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1228
        ST0 = fpsrcop - fptemp*dblq;
1229
        q = (int)dblq; /* cutting off top bits is assumed here */
1230
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1231
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1232
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1233
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1234
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1235
    } else {
1236
        env->fpus |= 0x400;  /* C2 <-- 1 */
1237
        fptemp = pow(2.0, expdif-50);
1238
        fpsrcop = (ST0 / ST1) / fptemp;
1239
        /* fpsrcop = integer obtained by chopping */
1240
        fpsrcop = (fpsrcop < 0.0)?
1241
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
1242
        ST0 -= (ST1 * fpsrcop * fptemp);
1243
    }
1244
}
1245

    
1246
void helper_fyl2xp1(void)
1247
{
1248
    CPU86_LDouble fptemp;
1249

    
1250
    fptemp = ST0;
1251
    if ((fptemp+1.0)>0.0) {
1252
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1253
        ST1 *= fptemp;
1254
        fpop();
1255
    } else { 
1256
        env->fpus &= (~0x4700);
1257
        env->fpus |= 0x400;
1258
    }
1259
}
1260

    
1261
void helper_fsqrt(void)
1262
{
1263
    CPU86_LDouble fptemp;
1264

    
1265
    fptemp = ST0;
1266
    if (fptemp<0.0) { 
1267
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1268
        env->fpus |= 0x400;
1269
    }
1270
    ST0 = sqrt(fptemp);
1271
}
1272

    
1273
void helper_fsincos(void)
1274
{
1275
    CPU86_LDouble fptemp;
1276

    
1277
    fptemp = ST0;
1278
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1279
        env->fpus |= 0x400;
1280
    } else {
1281
        ST0 = sin(fptemp);
1282
        fpush();
1283
        ST0 = cos(fptemp);
1284
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1285
        /* the above code is for  |arg| < 2**63 only */
1286
    }
1287
}
1288

    
1289
void helper_frndint(void)
1290
{
1291
    CPU86_LDouble a;
1292

    
1293
    a = ST0;
1294
#ifdef __arm__
1295
    switch(env->fpuc & RC_MASK) {
1296
    default:
1297
    case RC_NEAR:
1298
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
1299
        break;
1300
    case RC_DOWN:
1301
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1302
        break;
1303
    case RC_UP:
1304
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1305
        break;
1306
    case RC_CHOP:
1307
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1308
        break;
1309
    }
1310
#else
1311
    a = rint(a);
1312
#endif
1313
    ST0 = a;
1314
}
1315

    
1316
void helper_fscale(void)
1317
{
1318
    CPU86_LDouble fpsrcop, fptemp;
1319

    
1320
    fpsrcop = 2.0;
1321
    fptemp = pow(fpsrcop,ST1);
1322
    ST0 *= fptemp;
1323
}
1324

    
1325
void helper_fsin(void)
1326
{
1327
    CPU86_LDouble fptemp;
1328

    
1329
    fptemp = ST0;
1330
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1331
        env->fpus |= 0x400;
1332
    } else {
1333
        ST0 = sin(fptemp);
1334
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1335
        /* the above code is for  |arg| < 2**53 only */
1336
    }
1337
}
1338

    
1339
void helper_fcos(void)
1340
{
1341
    CPU86_LDouble fptemp;
1342

    
1343
    fptemp = ST0;
1344
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1345
        env->fpus |= 0x400;
1346
    } else {
1347
        ST0 = cos(fptemp);
1348
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1349
        /* the above code is for  |arg5 < 2**63 only */
1350
    }
1351
}
1352

    
1353
void helper_fxam_ST0(void)
1354
{
1355
    CPU86_LDoubleU temp;
1356
    int expdif;
1357

    
1358
    temp.d = ST0;
1359

    
1360
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1361
    if (SIGND(temp))
1362
        env->fpus |= 0x200; /* C1 <-- 1 */
1363

    
1364
    expdif = EXPD(temp);
1365
    if (expdif == MAXEXPD) {
1366
        if (MANTD(temp) == 0)
1367
            env->fpus |=  0x500 /*Infinity*/;
1368
        else
1369
            env->fpus |=  0x100 /*NaN*/;
1370
    } else if (expdif == 0) {
1371
        if (MANTD(temp) == 0)
1372
            env->fpus |=  0x4000 /*Zero*/;
1373
        else
1374
            env->fpus |= 0x4400 /*Denormal*/;
1375
    } else {
1376
        env->fpus |= 0x400;
1377
    }
1378
}
1379

    
1380
void helper_fstenv(uint8_t *ptr, int data32)
1381
{
1382
    int fpus, fptag, exp, i;
1383
    uint64_t mant;
1384
    CPU86_LDoubleU tmp;
1385

    
1386
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1387
    fptag = 0;
1388
    for (i=7; i>=0; i--) {
1389
        fptag <<= 2;
1390
        if (env->fptags[i]) {
1391
            fptag |= 3;
1392
        } else {
1393
            tmp.d = env->fpregs[i];
1394
            exp = EXPD(tmp);
1395
            mant = MANTD(tmp);
1396
            if (exp == 0 && mant == 0) {
1397
                /* zero */
1398
                fptag |= 1;
1399
            } else if (exp == 0 || exp == MAXEXPD
1400
#ifdef USE_X86LDOUBLE
1401
                       || (mant & (1LL << 63)) == 0
1402
#endif
1403
                       ) {
1404
                /* NaNs, infinity, denormal */
1405
                fptag |= 2;
1406
            }
1407
        }
1408
    }
1409
    if (data32) {
1410
        /* 32 bit */
1411
        stl(ptr, env->fpuc);
1412
        stl(ptr + 4, fpus);
1413
        stl(ptr + 8, fptag);
1414
        stl(ptr + 12, 0);
1415
        stl(ptr + 16, 0);
1416
        stl(ptr + 20, 0);
1417
        stl(ptr + 24, 0);
1418
    } else {
1419
        /* 16 bit */
1420
        stw(ptr, env->fpuc);
1421
        stw(ptr + 2, fpus);
1422
        stw(ptr + 4, fptag);
1423
        stw(ptr + 6, 0);
1424
        stw(ptr + 8, 0);
1425
        stw(ptr + 10, 0);
1426
        stw(ptr + 12, 0);
1427
    }
1428
}
1429

    
1430
void helper_fldenv(uint8_t *ptr, int data32)
1431
{
1432
    int i, fpus, fptag;
1433

    
1434
    if (data32) {
1435
        env->fpuc = lduw(ptr);
1436
        fpus = lduw(ptr + 4);
1437
        fptag = lduw(ptr + 8);
1438
    }
1439
    else {
1440
        env->fpuc = lduw(ptr);
1441
        fpus = lduw(ptr + 2);
1442
        fptag = lduw(ptr + 4);
1443
    }
1444
    env->fpstt = (fpus >> 11) & 7;
1445
    env->fpus = fpus & ~0x3800;
1446
    for(i = 0;i < 7; i++) {
1447
        env->fptags[i] = ((fptag & 3) == 3);
1448
        fptag >>= 2;
1449
    }
1450
}
1451

    
1452
void helper_fsave(uint8_t *ptr, int data32)
1453
{
1454
    CPU86_LDouble tmp;
1455
    int i;
1456

    
1457
    helper_fstenv(ptr, data32);
1458

    
1459
    ptr += (14 << data32);
1460
    for(i = 0;i < 8; i++) {
1461
        tmp = ST(i);
1462
#ifdef USE_X86LDOUBLE
1463
        *(long double *)ptr = tmp;
1464
#else
1465
        helper_fstt(tmp, ptr);
1466
#endif        
1467
        ptr += 10;
1468
    }
1469

    
1470
    /* fninit */
1471
    env->fpus = 0;
1472
    env->fpstt = 0;
1473
    env->fpuc = 0x37f;
1474
    env->fptags[0] = 1;
1475
    env->fptags[1] = 1;
1476
    env->fptags[2] = 1;
1477
    env->fptags[3] = 1;
1478
    env->fptags[4] = 1;
1479
    env->fptags[5] = 1;
1480
    env->fptags[6] = 1;
1481
    env->fptags[7] = 1;
1482
}
1483

    
1484
void helper_frstor(uint8_t *ptr, int data32)
1485
{
1486
    CPU86_LDouble tmp;
1487
    int i;
1488

    
1489
    helper_fldenv(ptr, data32);
1490
    ptr += (14 << data32);
1491

    
1492
    for(i = 0;i < 8; i++) {
1493
#ifdef USE_X86LDOUBLE
1494
        tmp = *(long double *)ptr;
1495
#else
1496
        tmp = helper_fldt(ptr);
1497
#endif        
1498
        ST(i) = tmp;
1499
        ptr += 10;
1500
    }
1501
}
1502