Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 4796f5e9

History | View | Annotate | Download (69.5 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
const uint8_t parity_table[256] = {
25
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
};
58

    
59
/* modulo 17 table */
60
const uint8_t rclw_table[32] = {
61
    0, 1, 2, 3, 4, 5, 6, 7, 
62
    8, 9,10,11,12,13,14,15,
63
   16, 0, 1, 2, 3, 4, 5, 6,
64
    7, 8, 9,10,11,12,13,14,
65
};
66

    
67
/* modulo 9 table */
68
const uint8_t rclb_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 0, 1, 2, 3, 4, 5, 6,
71
    7, 8, 0, 1, 2, 3, 4, 5, 
72
    6, 7, 8, 0, 1, 2, 3, 4,
73
};
74

    
75
const CPU86_LDouble f15rk[7] =
76
{
77
    0.00000000000000000000L,
78
    1.00000000000000000000L,
79
    3.14159265358979323851L,  /*pi*/
80
    0.30102999566398119523L,  /*lg2*/
81
    0.69314718055994530943L,  /*ln2*/
82
    1.44269504088896340739L,  /*l2e*/
83
    3.32192809488736234781L,  /*l2t*/
84
};
85
    
86
/* thread support */
87

    
88
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
89

    
90
void cpu_lock(void)
91
{
92
    spin_lock(&global_cpu_lock);
93
}
94

    
95
void cpu_unlock(void)
96
{
97
    spin_unlock(&global_cpu_lock);
98
}
99

    
100
void cpu_loop_exit(void)
101
{
102
    /* NOTE: the register at this point must be saved by hand because
103
       longjmp restore them */
104
#ifdef reg_EAX
105
    env->regs[R_EAX] = EAX;
106
#endif
107
#ifdef reg_ECX
108
    env->regs[R_ECX] = ECX;
109
#endif
110
#ifdef reg_EDX
111
    env->regs[R_EDX] = EDX;
112
#endif
113
#ifdef reg_EBX
114
    env->regs[R_EBX] = EBX;
115
#endif
116
#ifdef reg_ESP
117
    env->regs[R_ESP] = ESP;
118
#endif
119
#ifdef reg_EBP
120
    env->regs[R_EBP] = EBP;
121
#endif
122
#ifdef reg_ESI
123
    env->regs[R_ESI] = ESI;
124
#endif
125
#ifdef reg_EDI
126
    env->regs[R_EDI] = EDI;
127
#endif
128
    longjmp(env->jmp_env, 1);
129
}
130

    
131
/* return non zero if error */
132
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
133
                               int selector)
134
{
135
    SegmentCache *dt;
136
    int index;
137
    uint8_t *ptr;
138

    
139
    if (selector & 0x4)
140
        dt = &env->ldt;
141
    else
142
        dt = &env->gdt;
143
    index = selector & ~7;
144
    if ((index + 7) > dt->limit)
145
        return -1;
146
    ptr = dt->base + index;
147
    *e1_ptr = ldl_kernel(ptr);
148
    *e2_ptr = ldl_kernel(ptr + 4);
149
    return 0;
150
}
151
                                     
152
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
153
{
154
    unsigned int limit;
155
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
156
    if (e2 & DESC_G_MASK)
157
        limit = (limit << 12) | 0xfff;
158
    return limit;
159
}
160

    
161
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
162
{
163
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
164
}
165

    
166
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
167
{
168
    sc->base = get_seg_base(e1, e2);
169
    sc->limit = get_seg_limit(e1, e2);
170
    sc->flags = e2;
171
}
172

    
173
/* init the segment cache in vm86 mode. */
174
static inline void load_seg_vm(int seg, int selector)
175
{
176
    selector &= 0xffff;
177
    cpu_x86_load_seg_cache(env, seg, selector, 
178
                           (uint8_t *)(selector << 4), 0xffff, 0);
179
}
180

    
181
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
182
                                       uint32_t *esp_ptr, int dpl)
183
{
184
    int type, index, shift;
185
    
186
#if 0
187
    {
188
        int i;
189
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
190
        for(i=0;i<env->tr.limit;i++) {
191
            printf("%02x ", env->tr.base[i]);
192
            if ((i & 7) == 7) printf("\n");
193
        }
194
        printf("\n");
195
    }
196
#endif
197

    
198
    if (!(env->tr.flags & DESC_P_MASK))
199
        cpu_abort(env, "invalid tss");
200
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
201
    if ((type & 7) != 1)
202
        cpu_abort(env, "invalid tss type");
203
    shift = type >> 3;
204
    index = (dpl * 4 + 2) << shift;
205
    if (index + (4 << shift) - 1 > env->tr.limit)
206
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
207
    if (shift == 0) {
208
        *esp_ptr = lduw_kernel(env->tr.base + index);
209
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
210
    } else {
211
        *esp_ptr = ldl_kernel(env->tr.base + index);
212
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
213
    }
214
}
215

    
216
/* XXX: merge with load_seg() */
217
static void tss_load_seg(int seg_reg, int selector)
218
{
219
    uint32_t e1, e2;
220
    int rpl, dpl, cpl;
221

    
222
    if ((selector & 0xfffc) != 0) {
223
        if (load_segment(&e1, &e2, selector) != 0)
224
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225
        if (!(e2 & DESC_S_MASK))
226
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
        rpl = selector & 3;
228
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
229
        cpl = env->hflags & HF_CPL_MASK;
230
        if (seg_reg == R_CS) {
231
            if (!(e2 & DESC_CS_MASK))
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            if (dpl != rpl)
234
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
            if ((e2 & DESC_C_MASK) && dpl > rpl)
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
                
238
        } else if (seg_reg == R_SS) {
239
            /* SS must be writable data */
240
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            if (dpl != cpl || dpl != rpl)
243
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244
        } else {
245
            /* not readable code */
246
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
            /* if data or non conforming code, checks the rights */
249
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
250
                if (dpl < cpl || dpl < rpl)
251
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
            }
253
        }
254
        if (!(e2 & DESC_P_MASK))
255
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
256
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
257
                       get_seg_base(e1, e2),
258
                       get_seg_limit(e1, e2),
259
                       e2);
260
    } else {
261
        if (seg_reg == R_SS || seg_reg == R_CS) 
262
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263
    }
264
}
265

    
266
#define SWITCH_TSS_JMP  0
267
#define SWITCH_TSS_IRET 1
268
#define SWITCH_TSS_CALL 2
269

    
270
/* XXX: restore CPU state in registers (PowerPC case) */
271
static void switch_tss(int tss_selector, 
272
                       uint32_t e1, uint32_t e2, int source)
273
{
274
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275
    uint8_t *tss_base;
276
    uint32_t new_regs[8], new_segs[6];
277
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278
    uint32_t old_eflags, eflags_mask;
279
    SegmentCache *dt;
280
    int index;
281
    uint8_t *ptr;
282

    
283
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
284
#ifdef DEBUG_PCALL
285
    if (loglevel)
286
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
287
#endif
288

    
289
    /* if task gate, we read the TSS segment and we load it */
290
    if (type == 5) {
291
        if (!(e2 & DESC_P_MASK))
292
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
293
        tss_selector = e1 >> 16;
294
        if (tss_selector & 4)
295
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296
        if (load_segment(&e1, &e2, tss_selector) != 0)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
        if (e2 & DESC_S_MASK)
299
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
301
        if ((type & 7) != 1)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
    }
304

    
305
    if (!(e2 & DESC_P_MASK))
306
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
307

    
308
    if (type & 8)
309
        tss_limit_max = 103;
310
    else
311
        tss_limit_max = 43;
312
    tss_limit = get_seg_limit(e1, e2);
313
    tss_base = get_seg_base(e1, e2);
314
    if ((tss_selector & 4) != 0 || 
315
        tss_limit < tss_limit_max)
316
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
317
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
318
    if (old_type & 8)
319
        old_tss_limit_max = 103;
320
    else
321
        old_tss_limit_max = 43;
322

    
323
    /* read all the registers from the new TSS */
324
    if (type & 8) {
325
        /* 32 bit */
326
        new_cr3 = ldl_kernel(tss_base + 0x1c);
327
        new_eip = ldl_kernel(tss_base + 0x20);
328
        new_eflags = ldl_kernel(tss_base + 0x24);
329
        for(i = 0; i < 8; i++)
330
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
331
        for(i = 0; i < 6; i++)
332
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
333
        new_ldt = lduw_kernel(tss_base + 0x60);
334
        new_trap = ldl_kernel(tss_base + 0x64);
335
    } else {
336
        /* 16 bit */
337
        new_cr3 = 0;
338
        new_eip = lduw_kernel(tss_base + 0x0e);
339
        new_eflags = lduw_kernel(tss_base + 0x10);
340
        for(i = 0; i < 8; i++)
341
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
342
        for(i = 0; i < 4; i++)
343
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
344
        new_ldt = lduw_kernel(tss_base + 0x2a);
345
        new_segs[R_FS] = 0;
346
        new_segs[R_GS] = 0;
347
        new_trap = 0;
348
    }
349
    
350
    /* NOTE: we must avoid memory exceptions during the task switch,
351
       so we make dummy accesses before */
352
    /* XXX: it can still fail in some cases, so a bigger hack is
353
       necessary to valid the TLB after having done the accesses */
354

    
355
    v1 = ldub_kernel(env->tr.base);
356
    v2 = ldub(env->tr.base + old_tss_limit_max);
357
    stb_kernel(env->tr.base, v1);
358
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
359
    
360
    /* clear busy bit (it is restartable) */
361
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
362
        uint8_t *ptr;
363
        uint32_t e2;
364
        ptr = env->gdt.base + (env->tr.selector << 3);
365
        e2 = ldl_kernel(ptr + 4);
366
        e2 &= ~DESC_TSS_BUSY_MASK;
367
        stl_kernel(ptr + 4, e2);
368
    }
369
    old_eflags = compute_eflags();
370
    if (source == SWITCH_TSS_IRET)
371
        old_eflags &= ~NT_MASK;
372
    
373
    /* save the current state in the old TSS */
374
    if (type & 8) {
375
        /* 32 bit */
376
        stl_kernel(env->tr.base + 0x20, env->eip);
377
        stl_kernel(env->tr.base + 0x24, old_eflags);
378
        for(i = 0; i < 8; i++)
379
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
380
        for(i = 0; i < 6; i++)
381
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
382
    } else {
383
        /* 16 bit */
384
        stw_kernel(env->tr.base + 0x0e, new_eip);
385
        stw_kernel(env->tr.base + 0x10, old_eflags);
386
        for(i = 0; i < 8; i++)
387
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
388
        for(i = 0; i < 4; i++)
389
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
390
    }
391
    
392
    /* now if an exception occurs, it will occurs in the next task
393
       context */
394

    
395
    if (source == SWITCH_TSS_CALL) {
396
        stw_kernel(tss_base, env->tr.selector);
397
        new_eflags |= NT_MASK;
398
    }
399

    
400
    /* set busy bit */
401
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
402
        uint8_t *ptr;
403
        uint32_t e2;
404
        ptr = env->gdt.base + (tss_selector << 3);
405
        e2 = ldl_kernel(ptr + 4);
406
        e2 |= DESC_TSS_BUSY_MASK;
407
        stl_kernel(ptr + 4, e2);
408
    }
409

    
410
    /* set the new CPU state */
411
    /* from this point, any exception which occurs can give problems */
412
    env->cr[0] |= CR0_TS_MASK;
413
    env->tr.selector = tss_selector;
414
    env->tr.base = tss_base;
415
    env->tr.limit = tss_limit;
416
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
417
    
418
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
419
        env->cr[3] = new_cr3;
420
        cpu_x86_update_cr3(env);
421
    }
422
    
423
    /* load all registers without an exception, then reload them with
424
       possible exception */
425
    env->eip = new_eip;
426
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
427
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK;
428
    if (!(type & 8))
429
        eflags_mask &= 0xffff;
430
    load_eflags(new_eflags, eflags_mask);
431
    for(i = 0; i < 8; i++)
432
        env->regs[i] = new_regs[i];
433
    if (new_eflags & VM_MASK) {
434
        for(i = 0; i < 6; i++) 
435
            load_seg_vm(i, new_segs[i]);
436
        /* in vm86, CPL is always 3 */
437
        cpu_x86_set_cpl(env, 3);
438
    } else {
439
        /* CPL is set the RPL of CS */
440
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441
        /* first just selectors as the rest may trigger exceptions */
442
        for(i = 0; i < 6; i++)
443
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
444
    }
445
    
446
    env->ldt.selector = new_ldt & ~4;
447
    env->ldt.base = NULL;
448
    env->ldt.limit = 0;
449
    env->ldt.flags = 0;
450

    
451
    /* load the LDT */
452
    if (new_ldt & 4)
453
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454

    
455
    dt = &env->gdt;
456
    index = new_ldt & ~7;
457
    if ((index + 7) > dt->limit)
458
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
    ptr = dt->base + index;
460
    e1 = ldl_kernel(ptr);
461
    e2 = ldl_kernel(ptr + 4);
462
    if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
    if (!(e2 & DESC_P_MASK))
465
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
    load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    
468
    /* load the segments */
469
    if (!(new_eflags & VM_MASK)) {
470
        tss_load_seg(R_CS, new_segs[R_CS]);
471
        tss_load_seg(R_SS, new_segs[R_SS]);
472
        tss_load_seg(R_ES, new_segs[R_ES]);
473
        tss_load_seg(R_DS, new_segs[R_DS]);
474
        tss_load_seg(R_FS, new_segs[R_FS]);
475
        tss_load_seg(R_GS, new_segs[R_GS]);
476
    }
477
    
478
    /* check that EIP is in the CS segment limits */
479
    if (new_eip > env->segs[R_CS].limit) {
480
        raise_exception_err(EXCP0D_GPF, 0);
481
    }
482
}
483

    
484
/* check if Port I/O is allowed in TSS */
485
static inline void check_io(int addr, int size)
486
{
487
    int io_offset, val, mask;
488
    
489
    /* TSS must be a valid 32 bit one */
490
    if (!(env->tr.flags & DESC_P_MASK) ||
491
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
492
        env->tr.limit < 103)
493
        goto fail;
494
    io_offset = lduw_kernel(env->tr.base + 0x66);
495
    io_offset += (addr >> 3);
496
    /* Note: the check needs two bytes */
497
    if ((io_offset + 1) > env->tr.limit)
498
        goto fail;
499
    val = lduw_kernel(env->tr.base + io_offset);
500
    val >>= (addr & 7);
501
    mask = (1 << size) - 1;
502
    /* all bits must be zero to allow the I/O */
503
    if ((val & mask) != 0) {
504
    fail:
505
        raise_exception_err(EXCP0D_GPF, 0);
506
    }
507
}
508

    
509
void check_iob_T0(void)
510
{
511
    check_io(T0, 1);
512
}
513

    
514
void check_iow_T0(void)
515
{
516
    check_io(T0, 2);
517
}
518

    
519
void check_iol_T0(void)
520
{
521
    check_io(T0, 4);
522
}
523

    
524
void check_iob_DX(void)
525
{
526
    check_io(EDX & 0xffff, 1);
527
}
528

    
529
void check_iow_DX(void)
530
{
531
    check_io(EDX & 0xffff, 2);
532
}
533

    
534
void check_iol_DX(void)
535
{
536
    check_io(EDX & 0xffff, 4);
537
}
538

    
539
static inline unsigned int get_sp_mask(unsigned int e2)
540
{
541
    if (e2 & DESC_B_MASK)
542
        return 0xffffffff;
543
    else
544
        return 0xffff;
545
}
546

    
547
/* XXX: add a is_user flag to have proper security support */
548
#define PUSHW(ssp, sp, sp_mask, val)\
549
{\
550
    sp -= 2;\
551
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
552
}
553

    
554
#define PUSHL(ssp, sp, sp_mask, val)\
555
{\
556
    sp -= 4;\
557
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
558
}
559

    
560
#define POPW(ssp, sp, sp_mask, val)\
561
{\
562
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
563
    sp += 2;\
564
}
565

    
566
#define POPL(ssp, sp, sp_mask, val)\
567
{\
568
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
569
    sp += 4;\
570
}
571

    
572
/* protected mode interrupt */
573
static void do_interrupt_protected(int intno, int is_int, int error_code,
574
                                   unsigned int next_eip, int is_hw)
575
{
576
    SegmentCache *dt;
577
    uint8_t *ptr, *ssp;
578
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
579
    int has_error_code, new_stack, shift;
580
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
581
    uint32_t old_eip;
582

    
583
    has_error_code = 0;
584
    if (!is_int && !is_hw) {
585
        switch(intno) {
586
        case 8:
587
        case 10:
588
        case 11:
589
        case 12:
590
        case 13:
591
        case 14:
592
        case 17:
593
            has_error_code = 1;
594
            break;
595
        }
596
    }
597

    
598
    dt = &env->idt;
599
    if (intno * 8 + 7 > dt->limit)
600
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
601
    ptr = dt->base + intno * 8;
602
    e1 = ldl_kernel(ptr);
603
    e2 = ldl_kernel(ptr + 4);
604
    /* check gate type */
605
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
606
    switch(type) {
607
    case 5: /* task gate */
608
        /* must do that check here to return the correct error code */
609
        if (!(e2 & DESC_P_MASK))
610
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
611
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
612
        if (has_error_code) {
613
            int mask;
614
            /* push the error code */
615
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
616
            if (env->segs[R_SS].flags & DESC_B_MASK)
617
                mask = 0xffffffff;
618
            else
619
                mask = 0xffff;
620
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
621
            ssp = env->segs[R_SS].base + esp;
622
            if (shift)
623
                stl_kernel(ssp, error_code);
624
            else
625
                stw_kernel(ssp, error_code);
626
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
627
        }
628
        return;
629
    case 6: /* 286 interrupt gate */
630
    case 7: /* 286 trap gate */
631
    case 14: /* 386 interrupt gate */
632
    case 15: /* 386 trap gate */
633
        break;
634
    default:
635
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
636
        break;
637
    }
638
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
639
    cpl = env->hflags & HF_CPL_MASK;
640
    /* check privledge if software int */
641
    if (is_int && dpl < cpl)
642
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
643
    /* check valid bit */
644
    if (!(e2 & DESC_P_MASK))
645
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
646
    selector = e1 >> 16;
647
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
648
    if ((selector & 0xfffc) == 0)
649
        raise_exception_err(EXCP0D_GPF, 0);
650

    
651
    if (load_segment(&e1, &e2, selector) != 0)
652
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
653
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
654
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
655
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
656
    if (dpl > cpl)
657
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658
    if (!(e2 & DESC_P_MASK))
659
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
660
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
661
        /* to inner priviledge */
662
        get_ss_esp_from_tss(&ss, &esp, dpl);
663
        if ((ss & 0xfffc) == 0)
664
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
665
        if ((ss & 3) != dpl)
666
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
667
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
668
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
669
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
670
        if (ss_dpl != dpl)
671
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
672
        if (!(ss_e2 & DESC_S_MASK) ||
673
            (ss_e2 & DESC_CS_MASK) ||
674
            !(ss_e2 & DESC_W_MASK))
675
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
676
        if (!(ss_e2 & DESC_P_MASK))
677
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
678
        new_stack = 1;
679
        sp_mask = get_sp_mask(ss_e2);
680
        ssp = get_seg_base(ss_e1, ss_e2);
681
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
682
        /* to same priviledge */
683
        if (env->eflags & VM_MASK)
684
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
685
        new_stack = 0;
686
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
687
        ssp = env->segs[R_SS].base;
688
        esp = ESP;
689
        dpl = cpl;
690
    } else {
691
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
692
        new_stack = 0; /* avoid warning */
693
        sp_mask = 0; /* avoid warning */
694
        ssp = NULL; /* avoid warning */
695
        esp = 0; /* avoid warning */
696
    }
697

    
698
    shift = type >> 3;
699

    
700
#if 0
701
    /* XXX: check that enough room is available */
702
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
703
    if (env->eflags & VM_MASK)
704
        push_size += 8;
705
    push_size <<= shift;
706
#endif
707
    if (is_int)
708
        old_eip = next_eip;
709
    else
710
        old_eip = env->eip;
711
    if (shift == 1) {
712
        if (new_stack) {
713
            if (env->eflags & VM_MASK) {
714
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
715
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
716
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
717
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
718
            }
719
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
720
            PUSHL(ssp, esp, sp_mask, ESP);
721
        }
722
        PUSHL(ssp, esp, sp_mask, compute_eflags());
723
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
724
        PUSHL(ssp, esp, sp_mask, old_eip);
725
        if (has_error_code) {
726
            PUSHL(ssp, esp, sp_mask, error_code);
727
        }
728
    } else {
729
        if (new_stack) {
730
            if (env->eflags & VM_MASK) {
731
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
732
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
733
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
734
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
735
            }
736
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
737
            PUSHW(ssp, esp, sp_mask, ESP);
738
        }
739
        PUSHW(ssp, esp, sp_mask, compute_eflags());
740
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
741
        PUSHW(ssp, esp, sp_mask, old_eip);
742
        if (has_error_code) {
743
            PUSHW(ssp, esp, sp_mask, error_code);
744
        }
745
    }
746
    
747
    if (new_stack) {
748
        if (env->eflags & VM_MASK) {
749
            /* XXX: explain me why W2K hangs if the whole segment cache is
750
               reset ? */
751
            env->segs[R_ES].selector = 0;
752
            env->segs[R_ES].flags = 0;
753
            env->segs[R_DS].selector = 0;
754
            env->segs[R_DS].flags = 0;
755
            env->segs[R_FS].selector = 0;
756
            env->segs[R_FS].flags = 0;
757
            env->segs[R_GS].selector = 0;
758
            env->segs[R_GS].flags = 0;
759
        }
760
        ss = (ss & ~3) | dpl;
761
        cpu_x86_load_seg_cache(env, R_SS, ss, 
762
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
763
    }
764
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
765

    
766
    selector = (selector & ~3) | dpl;
767
    cpu_x86_load_seg_cache(env, R_CS, selector, 
768
                   get_seg_base(e1, e2),
769
                   get_seg_limit(e1, e2),
770
                   e2);
771
    cpu_x86_set_cpl(env, dpl);
772
    env->eip = offset;
773

    
774
    /* interrupt gate clear IF mask */
775
    if ((type & 1) == 0) {
776
        env->eflags &= ~IF_MASK;
777
    }
778
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
779
}
780

    
781
/* real mode interrupt */
782
static void do_interrupt_real(int intno, int is_int, int error_code,
783
                              unsigned int next_eip)
784
{
785
    SegmentCache *dt;
786
    uint8_t *ptr, *ssp;
787
    int selector;
788
    uint32_t offset, esp;
789
    uint32_t old_cs, old_eip;
790

    
791
    /* real mode (simpler !) */
792
    dt = &env->idt;
793
    if (intno * 4 + 3 > dt->limit)
794
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
795
    ptr = dt->base + intno * 4;
796
    offset = lduw_kernel(ptr);
797
    selector = lduw_kernel(ptr + 2);
798
    esp = ESP;
799
    ssp = env->segs[R_SS].base;
800
    if (is_int)
801
        old_eip = next_eip;
802
    else
803
        old_eip = env->eip;
804
    old_cs = env->segs[R_CS].selector;
805
    /* XXX: use SS segment size ? */
806
    PUSHW(ssp, esp, 0xffff, compute_eflags());
807
    PUSHW(ssp, esp, 0xffff, old_cs);
808
    PUSHW(ssp, esp, 0xffff, old_eip);
809
    
810
    /* update processor state */
811
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
812
    env->eip = offset;
813
    env->segs[R_CS].selector = selector;
814
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
815
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
816
}
817

    
818
/* fake user mode interrupt */
819
void do_interrupt_user(int intno, int is_int, int error_code, 
820
                       unsigned int next_eip)
821
{
822
    SegmentCache *dt;
823
    uint8_t *ptr;
824
    int dpl, cpl;
825
    uint32_t e2;
826

    
827
    dt = &env->idt;
828
    ptr = dt->base + (intno * 8);
829
    e2 = ldl_kernel(ptr + 4);
830
    
831
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
832
    cpl = env->hflags & HF_CPL_MASK;
833
    /* check privledge if software int */
834
    if (is_int && dpl < cpl)
835
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
836

    
837
    /* Since we emulate only user space, we cannot do more than
838
       exiting the emulation with the suitable exception and error
839
       code */
840
    if (is_int)
841
        EIP = next_eip;
842
}
843

    
844
/*
845
 * Begin excution of an interruption. is_int is TRUE if coming from
846
 * the int instruction. next_eip is the EIP value AFTER the interrupt
847
 * instruction. It is only relevant if is_int is TRUE.  
848
 */
849
void do_interrupt(int intno, int is_int, int error_code, 
850
                  unsigned int next_eip, int is_hw)
851
{
852
#if 0
853
    {
854
        extern FILE *stdout;
855
        static int count;
856
        if ((env->cr[0] && CR0_PE_MASK)) {
857
            fprintf(stdout, "%d: interrupt: vector=%02x error_code=%04x int=%d CPL=%d CS:EIP=%04x:%08x SS:ESP=%04x:%08x EAX=%08x\n",
858
                    count, intno, error_code, is_int,
859
                    env->hflags & HF_CPL_MASK,
860
                    env->segs[R_CS].selector, EIP,
861
                    env->segs[R_SS].selector, ESP, 
862
                    EAX);
863
            if (0) {
864
                cpu_x86_dump_state(env, stdout, X86_DUMP_CCOP);
865
#if 0
866
                {
867
                    int i;
868
                    uint8_t *ptr;
869
                    fprintf(stdout, "       code=");
870
                    ptr = env->segs[R_CS].base + env->eip;
871
                    for(i = 0; i < 16; i++) {
872
                        fprintf(stdout, " %02x", ldub(ptr + i));
873
                    }
874
                    fprintf(stdout, "\n");
875
                }
876
#endif
877
            }
878
            count++;
879
        }
880
    }
881
#endif
882

    
883
#ifdef DEBUG_PCALL
884
    if (loglevel) {
885
        static int count;
886
        fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
887
                count, intno, error_code, is_int);
888
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
889
#if 0
890
        {
891
            int i;
892
            uint8_t *ptr;
893
            fprintf(logfile, "       code=");
894
            ptr = env->segs[R_CS].base + env->eip;
895
            for(i = 0; i < 16; i++) {
896
                fprintf(logfile, " %02x", ldub(ptr + i));
897
            }
898
            fprintf(logfile, "\n");
899
        }
900
#endif
901
        count++;
902
    }
903
#endif
904
    if (env->cr[0] & CR0_PE_MASK) {
905
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
906
    } else {
907
        do_interrupt_real(intno, is_int, error_code, next_eip);
908
    }
909
}
910

    
911
/*
912
 * Signal an interruption. It is executed in the main CPU loop.
913
 * is_int is TRUE if coming from the int instruction. next_eip is the
914
 * EIP value AFTER the interrupt instruction. It is only relevant if
915
 * is_int is TRUE.  
916
 */
917
void raise_interrupt(int intno, int is_int, int error_code, 
918
                     unsigned int next_eip)
919
{
920
    env->exception_index = intno;
921
    env->error_code = error_code;
922
    env->exception_is_int = is_int;
923
    env->exception_next_eip = next_eip;
924
    cpu_loop_exit();
925
}
926

    
927
/* shortcuts to generate exceptions */
928
void raise_exception_err(int exception_index, int error_code)
929
{
930
    raise_interrupt(exception_index, 0, error_code, 0);
931
}
932

    
933
void raise_exception(int exception_index)
934
{
935
    raise_interrupt(exception_index, 0, 0, 0);
936
}
937

    
938
#ifdef BUGGY_GCC_DIV64
939
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
940
   call it from another function */
941
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
942
{
943
    *q_ptr = num / den;
944
    return num % den;
945
}
946

    
947
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
948
{
949
    *q_ptr = num / den;
950
    return num % den;
951
}
952
#endif
953

    
954
void helper_divl_EAX_T0(uint32_t eip)
955
{
956
    unsigned int den, q, r;
957
    uint64_t num;
958
    
959
    num = EAX | ((uint64_t)EDX << 32);
960
    den = T0;
961
    if (den == 0) {
962
        EIP = eip;
963
        raise_exception(EXCP00_DIVZ);
964
    }
965
#ifdef BUGGY_GCC_DIV64
966
    r = div64(&q, num, den);
967
#else
968
    q = (num / den);
969
    r = (num % den);
970
#endif
971
    EAX = q;
972
    EDX = r;
973
}
974

    
975
void helper_idivl_EAX_T0(uint32_t eip)
976
{
977
    int den, q, r;
978
    int64_t num;
979
    
980
    num = EAX | ((uint64_t)EDX << 32);
981
    den = T0;
982
    if (den == 0) {
983
        EIP = eip;
984
        raise_exception(EXCP00_DIVZ);
985
    }
986
#ifdef BUGGY_GCC_DIV64
987
    r = idiv64(&q, num, den);
988
#else
989
    q = (num / den);
990
    r = (num % den);
991
#endif
992
    EAX = q;
993
    EDX = r;
994
}
995

    
996
void helper_cmpxchg8b(void)
997
{
998
    uint64_t d;
999
    int eflags;
1000

    
1001
    eflags = cc_table[CC_OP].compute_all();
1002
    d = ldq((uint8_t *)A0);
1003
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1004
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1005
        eflags |= CC_Z;
1006
    } else {
1007
        EDX = d >> 32;
1008
        EAX = d;
1009
        eflags &= ~CC_Z;
1010
    }
1011
    CC_SRC = eflags;
1012
}
1013

    
1014
#define CPUID_FP87 (1 << 0)
1015
#define CPUID_VME  (1 << 1)
1016
#define CPUID_DE   (1 << 2)
1017
#define CPUID_PSE  (1 << 3)
1018
#define CPUID_TSC  (1 << 4)
1019
#define CPUID_MSR  (1 << 5)
1020
#define CPUID_PAE  (1 << 6)
1021
#define CPUID_MCE  (1 << 7)
1022
#define CPUID_CX8  (1 << 8)
1023
#define CPUID_APIC (1 << 9)
1024
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1025
#define CPUID_MTRR (1 << 12)
1026
#define CPUID_PGE  (1 << 13)
1027
#define CPUID_MCA  (1 << 14)
1028
#define CPUID_CMOV (1 << 15)
1029
/* ... */
1030
#define CPUID_MMX  (1 << 23)
1031
#define CPUID_FXSR (1 << 24)
1032
#define CPUID_SSE  (1 << 25)
1033
#define CPUID_SSE2 (1 << 26)
1034

    
1035
void helper_cpuid(void)
1036
{
1037
    switch(EAX) {
1038
    case 0:
1039
        EAX = 2; /* max EAX index supported */
1040
        EBX = 0x756e6547;
1041
        ECX = 0x6c65746e;
1042
        EDX = 0x49656e69;
1043
        break;
1044
    case 1:
1045
        {
1046
            int family, model, stepping;
1047
            /* EAX = 1 info */
1048
#if 0
1049
            /* pentium 75-200 */
1050
            family = 5;
1051
            model = 2;
1052
            stepping = 11;
1053
#else
1054
            /* pentium pro */
1055
            family = 6;
1056
            model = 1;
1057
            stepping = 3;
1058
#endif
1059
            EAX = (family << 8) | (model << 4) | stepping;
1060
            EBX = 0;
1061
            ECX = 0;
1062
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1063
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1064
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1065
        }
1066
        break;
1067
    default:
1068
        /* cache info: needed for Pentium Pro compatibility */
1069
        EAX = 0x410601;
1070
        EBX = 0;
1071
        ECX = 0;
1072
        EDX = 0;
1073
        break;
1074
    }
1075
}
1076

    
1077
void helper_lldt_T0(void)
1078
{
1079
    int selector;
1080
    SegmentCache *dt;
1081
    uint32_t e1, e2;
1082
    int index;
1083
    uint8_t *ptr;
1084
    
1085
    selector = T0 & 0xffff;
1086
    if ((selector & 0xfffc) == 0) {
1087
        /* XXX: NULL selector case: invalid LDT */
1088
        env->ldt.base = NULL;
1089
        env->ldt.limit = 0;
1090
    } else {
1091
        if (selector & 0x4)
1092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1093
        dt = &env->gdt;
1094
        index = selector & ~7;
1095
        if ((index + 7) > dt->limit)
1096
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1097
        ptr = dt->base + index;
1098
        e1 = ldl_kernel(ptr);
1099
        e2 = ldl_kernel(ptr + 4);
1100
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1101
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1102
        if (!(e2 & DESC_P_MASK))
1103
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1104
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1105
    }
1106
    env->ldt.selector = selector;
1107
}
1108

    
1109
void helper_ltr_T0(void)
1110
{
1111
    int selector;
1112
    SegmentCache *dt;
1113
    uint32_t e1, e2;
1114
    int index, type;
1115
    uint8_t *ptr;
1116
    
1117
    selector = T0 & 0xffff;
1118
    if ((selector & 0xfffc) == 0) {
1119
        /* NULL selector case: invalid LDT */
1120
        env->tr.base = NULL;
1121
        env->tr.limit = 0;
1122
        env->tr.flags = 0;
1123
    } else {
1124
        if (selector & 0x4)
1125
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1126
        dt = &env->gdt;
1127
        index = selector & ~7;
1128
        if ((index + 7) > dt->limit)
1129
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1130
        ptr = dt->base + index;
1131
        e1 = ldl_kernel(ptr);
1132
        e2 = ldl_kernel(ptr + 4);
1133
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1134
        if ((e2 & DESC_S_MASK) || 
1135
            (type != 1 && type != 9))
1136
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1137
        if (!(e2 & DESC_P_MASK))
1138
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1139
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1140
        e2 |= DESC_TSS_BUSY_MASK;
1141
        stl_kernel(ptr + 4, e2);
1142
    }
1143
    env->tr.selector = selector;
1144
}
1145

    
1146
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1147
void load_seg(int seg_reg, int selector)
1148
{
1149
    uint32_t e1, e2;
1150
    int cpl, dpl, rpl;
1151
    SegmentCache *dt;
1152
    int index;
1153
    uint8_t *ptr;
1154

    
1155
    selector &= 0xffff;
1156
    if ((selector & 0xfffc) == 0) {
1157
        /* null selector case */
1158
        if (seg_reg == R_SS)
1159
            raise_exception_err(EXCP0D_GPF, 0);
1160
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1161
    } else {
1162
        
1163
        if (selector & 0x4)
1164
            dt = &env->ldt;
1165
        else
1166
            dt = &env->gdt;
1167
        index = selector & ~7;
1168
        if ((index + 7) > dt->limit)
1169
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1170
        ptr = dt->base + index;
1171
        e1 = ldl_kernel(ptr);
1172
        e2 = ldl_kernel(ptr + 4);
1173

    
1174
        if (!(e2 & DESC_S_MASK))
1175
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1176
        rpl = selector & 3;
1177
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
        cpl = env->hflags & HF_CPL_MASK;
1179
        if (seg_reg == R_SS) {
1180
            /* must be writable segment */
1181
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1182
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1183
            if (rpl != cpl || dpl != cpl)
1184
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1185
        } else {
1186
            /* must be readable segment */
1187
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1188
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1189
            
1190
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1191
                /* if not conforming code, test rights */
1192
                if (dpl < cpl || dpl < rpl)
1193
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1194
            }
1195
        }
1196

    
1197
        if (!(e2 & DESC_P_MASK)) {
1198
            if (seg_reg == R_SS)
1199
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1200
            else
1201
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1202
        }
1203

    
1204
        /* set the access bit if not already set */
1205
        if (!(e2 & DESC_A_MASK)) {
1206
            e2 |= DESC_A_MASK;
1207
            stl_kernel(ptr + 4, e2);
1208
        }
1209

    
1210
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1211
                       get_seg_base(e1, e2),
1212
                       get_seg_limit(e1, e2),
1213
                       e2);
1214
#if 0
1215
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1216
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1217
#endif
1218
    }
1219
}
1220

    
1221
/* protected mode jump */
1222
void helper_ljmp_protected_T0_T1(void)
1223
{
1224
    int new_cs, new_eip, gate_cs, type;
1225
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1226

    
1227
    new_cs = T0;
1228
    new_eip = T1;
1229
    if ((new_cs & 0xfffc) == 0)
1230
        raise_exception_err(EXCP0D_GPF, 0);
1231
    if (load_segment(&e1, &e2, new_cs) != 0)
1232
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1233
    cpl = env->hflags & HF_CPL_MASK;
1234
    if (e2 & DESC_S_MASK) {
1235
        if (!(e2 & DESC_CS_MASK))
1236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1237
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1238
        if (e2 & DESC_C_MASK) {
1239
            /* conforming code segment */
1240
            if (dpl > cpl)
1241
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1242
        } else {
1243
            /* non conforming code segment */
1244
            rpl = new_cs & 3;
1245
            if (rpl > cpl)
1246
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1247
            if (dpl != cpl)
1248
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1249
        }
1250
        if (!(e2 & DESC_P_MASK))
1251
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1252
        limit = get_seg_limit(e1, e2);
1253
        if (new_eip > limit)
1254
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1255
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1256
                       get_seg_base(e1, e2), limit, e2);
1257
        EIP = new_eip;
1258
    } else {
1259
        /* jump to call or task gate */
1260
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1261
        rpl = new_cs & 3;
1262
        cpl = env->hflags & HF_CPL_MASK;
1263
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1264
        switch(type) {
1265
        case 1: /* 286 TSS */
1266
        case 9: /* 386 TSS */
1267
        case 5: /* task gate */
1268
            if (dpl < cpl || dpl < rpl)
1269
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1270
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1271
            break;
1272
        case 4: /* 286 call gate */
1273
        case 12: /* 386 call gate */
1274
            if ((dpl < cpl) || (dpl < rpl))
1275
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1276
            if (!(e2 & DESC_P_MASK))
1277
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1278
            gate_cs = e1 >> 16;
1279
            if (load_segment(&e1, &e2, gate_cs) != 0)
1280
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1281
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1282
            /* must be code segment */
1283
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1284
                 (DESC_S_MASK | DESC_CS_MASK)))
1285
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1286
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1287
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1288
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1289
            if (!(e2 & DESC_P_MASK))
1290
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1291
            new_eip = (e1 & 0xffff);
1292
            if (type == 12)
1293
                new_eip |= (e2 & 0xffff0000);
1294
            limit = get_seg_limit(e1, e2);
1295
            if (new_eip > limit)
1296
                raise_exception_err(EXCP0D_GPF, 0);
1297
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1298
                                   get_seg_base(e1, e2), limit, e2);
1299
            EIP = new_eip;
1300
            break;
1301
        default:
1302
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1303
            break;
1304
        }
1305
    }
1306
}
1307

    
1308
/* real mode call */
1309
void helper_lcall_real_T0_T1(int shift, int next_eip)
1310
{
1311
    int new_cs, new_eip;
1312
    uint32_t esp, esp_mask;
1313
    uint8_t *ssp;
1314

    
1315
    new_cs = T0;
1316
    new_eip = T1;
1317
    esp = ESP;
1318
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1319
    ssp = env->segs[R_SS].base;
1320
    if (shift) {
1321
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1322
        PUSHL(ssp, esp, esp_mask, next_eip);
1323
    } else {
1324
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1325
        PUSHW(ssp, esp, esp_mask, next_eip);
1326
    }
1327

    
1328
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1329
    env->eip = new_eip;
1330
    env->segs[R_CS].selector = new_cs;
1331
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1332
}
1333

    
1334
/* protected mode call */
1335
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1336
{
1337
    int new_cs, new_eip, new_stack, i;
1338
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1339
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1340
    uint32_t val, limit, old_sp_mask;
1341
    uint8_t *ssp, *old_ssp;
1342
    
1343
    new_cs = T0;
1344
    new_eip = T1;
1345
#ifdef DEBUG_PCALL
1346
    if (loglevel) {
1347
        fprintf(logfile, "lcall %04x:%08x\n",
1348
                new_cs, new_eip);
1349
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1350
    }
1351
#endif
1352
    if ((new_cs & 0xfffc) == 0)
1353
        raise_exception_err(EXCP0D_GPF, 0);
1354
    if (load_segment(&e1, &e2, new_cs) != 0)
1355
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1356
    cpl = env->hflags & HF_CPL_MASK;
1357
#ifdef DEBUG_PCALL
1358
    if (loglevel) {
1359
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1360
    }
1361
#endif
1362
    if (e2 & DESC_S_MASK) {
1363
        if (!(e2 & DESC_CS_MASK))
1364
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1365
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1366
        if (e2 & DESC_C_MASK) {
1367
            /* conforming code segment */
1368
            if (dpl > cpl)
1369
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1370
        } else {
1371
            /* non conforming code segment */
1372
            rpl = new_cs & 3;
1373
            if (rpl > cpl)
1374
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1375
            if (dpl != cpl)
1376
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1377
        }
1378
        if (!(e2 & DESC_P_MASK))
1379
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1380

    
1381
        sp = ESP;
1382
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1383
        ssp = env->segs[R_SS].base;
1384
        if (shift) {
1385
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1386
            PUSHL(ssp, sp, sp_mask, next_eip);
1387
        } else {
1388
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1389
            PUSHW(ssp, sp, sp_mask, next_eip);
1390
        }
1391
        
1392
        limit = get_seg_limit(e1, e2);
1393
        if (new_eip > limit)
1394
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1395
        /* from this point, not restartable */
1396
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1397
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1398
                       get_seg_base(e1, e2), limit, e2);
1399
        EIP = new_eip;
1400
    } else {
1401
        /* check gate type */
1402
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1403
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1404
        rpl = new_cs & 3;
1405
        switch(type) {
1406
        case 1: /* available 286 TSS */
1407
        case 9: /* available 386 TSS */
1408
        case 5: /* task gate */
1409
            if (dpl < cpl || dpl < rpl)
1410
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1411
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
1412
            break;
1413
        case 4: /* 286 call gate */
1414
        case 12: /* 386 call gate */
1415
            break;
1416
        default:
1417
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1418
            break;
1419
        }
1420
        shift = type >> 3;
1421

    
1422
        if (dpl < cpl || dpl < rpl)
1423
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1424
        /* check valid bit */
1425
        if (!(e2 & DESC_P_MASK))
1426
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1427
        selector = e1 >> 16;
1428
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1429
        param_count = e2 & 0x1f;
1430
        if ((selector & 0xfffc) == 0)
1431
            raise_exception_err(EXCP0D_GPF, 0);
1432

    
1433
        if (load_segment(&e1, &e2, selector) != 0)
1434
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1435
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1436
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1437
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1438
        if (dpl > cpl)
1439
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1440
        if (!(e2 & DESC_P_MASK))
1441
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1442

    
1443
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1444
            /* to inner priviledge */
1445
            get_ss_esp_from_tss(&ss, &sp, dpl);
1446
#ifdef DEBUG_PCALL
1447
            if (loglevel)
1448
                fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n", 
1449
                        ss, sp, param_count, ESP);
1450
#endif
1451
            if ((ss & 0xfffc) == 0)
1452
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1453
            if ((ss & 3) != dpl)
1454
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1455
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1456
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1457
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1458
            if (ss_dpl != dpl)
1459
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1460
            if (!(ss_e2 & DESC_S_MASK) ||
1461
                (ss_e2 & DESC_CS_MASK) ||
1462
                !(ss_e2 & DESC_W_MASK))
1463
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1464
            if (!(ss_e2 & DESC_P_MASK))
1465
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1466
            
1467
            //            push_size = ((param_count * 2) + 8) << shift;
1468

    
1469
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1470
            old_ssp = env->segs[R_SS].base;
1471
            
1472
            sp_mask = get_sp_mask(ss_e2);
1473
            ssp = get_seg_base(ss_e1, ss_e2);
1474
            if (shift) {
1475
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1476
                PUSHL(ssp, sp, sp_mask, ESP);
1477
                for(i = param_count - 1; i >= 0; i--) {
1478
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1479
                    PUSHL(ssp, sp, sp_mask, val);
1480
                }
1481
            } else {
1482
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1483
                PUSHW(ssp, sp, sp_mask, ESP);
1484
                for(i = param_count - 1; i >= 0; i--) {
1485
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1486
                    PUSHW(ssp, sp, sp_mask, val);
1487
                }
1488
            }
1489
            new_stack = 1;
1490
        } else {
1491
            /* to same priviledge */
1492
            sp = ESP;
1493
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1494
            ssp = env->segs[R_SS].base;
1495
            //            push_size = (4 << shift);
1496
            new_stack = 0;
1497
        }
1498

    
1499
        if (shift) {
1500
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1501
            PUSHL(ssp, sp, sp_mask, next_eip);
1502
        } else {
1503
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1504
            PUSHW(ssp, sp, sp_mask, next_eip);
1505
        }
1506

    
1507
        /* from this point, not restartable */
1508

    
1509
        if (new_stack) {
1510
            ss = (ss & ~3) | dpl;
1511
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1512
                                   ssp,
1513
                                   get_seg_limit(ss_e1, ss_e2),
1514
                                   ss_e2);
1515
        }
1516

    
1517
        selector = (selector & ~3) | dpl;
1518
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1519
                       get_seg_base(e1, e2),
1520
                       get_seg_limit(e1, e2),
1521
                       e2);
1522
        cpu_x86_set_cpl(env, dpl);
1523
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1524
        EIP = offset;
1525
    }
1526
}
1527

    
1528
/* real and vm86 mode iret */
1529
void helper_iret_real(int shift)
1530
{
1531
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1532
    uint8_t *ssp;
1533
    int eflags_mask;
1534

    
1535
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1536
    sp = ESP;
1537
    ssp = env->segs[R_SS].base;
1538
    if (shift == 1) {
1539
        /* 32 bits */
1540
        POPL(ssp, sp, sp_mask, new_eip);
1541
        POPL(ssp, sp, sp_mask, new_cs);
1542
        new_cs &= 0xffff;
1543
        POPL(ssp, sp, sp_mask, new_eflags);
1544
    } else {
1545
        /* 16 bits */
1546
        POPW(ssp, sp, sp_mask, new_eip);
1547
        POPW(ssp, sp, sp_mask, new_cs);
1548
        POPW(ssp, sp, sp_mask, new_eflags);
1549
    }
1550
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1551
    load_seg_vm(R_CS, new_cs);
1552
    env->eip = new_eip;
1553
    if (env->eflags & VM_MASK)
1554
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK;
1555
    else
1556
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK;
1557
    if (shift == 0)
1558
        eflags_mask &= 0xffff;
1559
    load_eflags(new_eflags, eflags_mask);
1560
}
1561

    
1562
static inline void validate_seg(int seg_reg, int cpl)
1563
{
1564
    int dpl;
1565
    uint32_t e2;
1566
    
1567
    e2 = env->segs[seg_reg].flags;
1568
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1569
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1570
        /* data or non conforming code segment */
1571
        if (dpl < cpl) {
1572
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1573
        }
1574
    }
1575
}
1576

    
1577
/* protected mode iret */
1578
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1579
{
1580
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1581
    uint32_t new_es, new_ds, new_fs, new_gs;
1582
    uint32_t e1, e2, ss_e1, ss_e2;
1583
    int cpl, dpl, rpl, eflags_mask, iopl;
1584
    uint8_t *ssp;
1585
    
1586
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1587
    sp = ESP;
1588
    ssp = env->segs[R_SS].base;
1589
    if (shift == 1) {
1590
        /* 32 bits */
1591
        POPL(ssp, sp, sp_mask, new_eip);
1592
        POPL(ssp, sp, sp_mask, new_cs);
1593
        new_cs &= 0xffff;
1594
        if (is_iret) {
1595
            POPL(ssp, sp, sp_mask, new_eflags);
1596
            if (new_eflags & VM_MASK)
1597
                goto return_to_vm86;
1598
        }
1599
    } else {
1600
        /* 16 bits */
1601
        POPW(ssp, sp, sp_mask, new_eip);
1602
        POPW(ssp, sp, sp_mask, new_cs);
1603
        if (is_iret)
1604
            POPW(ssp, sp, sp_mask, new_eflags);
1605
    }
1606
#ifdef DEBUG_PCALL
1607
    if (loglevel) {
1608
        fprintf(logfile, "lret new %04x:%08x addend=0x%x\n",
1609
                new_cs, new_eip, addend);
1610
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1611
    }
1612
#endif
1613
    if ((new_cs & 0xfffc) == 0)
1614
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1615
    if (load_segment(&e1, &e2, new_cs) != 0)
1616
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1617
    if (!(e2 & DESC_S_MASK) ||
1618
        !(e2 & DESC_CS_MASK))
1619
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1620
    cpl = env->hflags & HF_CPL_MASK;
1621
    rpl = new_cs & 3; 
1622
    if (rpl < cpl)
1623
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1624
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1625
    if (e2 & DESC_C_MASK) {
1626
        if (dpl > rpl)
1627
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1628
    } else {
1629
        if (dpl != rpl)
1630
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1631
    }
1632
    if (!(e2 & DESC_P_MASK))
1633
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1634
    
1635
    sp += addend;
1636
    if (rpl == cpl) {
1637
        /* return to same priledge level */
1638
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1639
                       get_seg_base(e1, e2),
1640
                       get_seg_limit(e1, e2),
1641
                       e2);
1642
    } else {
1643
        /* return to different priviledge level */
1644
        if (shift == 1) {
1645
            /* 32 bits */
1646
            POPL(ssp, sp, sp_mask, new_esp);
1647
            POPL(ssp, sp, sp_mask, new_ss);
1648
            new_ss &= 0xffff;
1649
        } else {
1650
            /* 16 bits */
1651
            POPW(ssp, sp, sp_mask, new_esp);
1652
            POPW(ssp, sp, sp_mask, new_ss);
1653
        }
1654
        
1655
        if ((new_ss & 3) != rpl)
1656
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1657
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1658
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1659
        if (!(ss_e2 & DESC_S_MASK) ||
1660
            (ss_e2 & DESC_CS_MASK) ||
1661
            !(ss_e2 & DESC_W_MASK))
1662
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1663
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1664
        if (dpl != rpl)
1665
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1666
        if (!(ss_e2 & DESC_P_MASK))
1667
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1668

    
1669
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1670
                       get_seg_base(e1, e2),
1671
                       get_seg_limit(e1, e2),
1672
                       e2);
1673
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1674
                       get_seg_base(ss_e1, ss_e2),
1675
                       get_seg_limit(ss_e1, ss_e2),
1676
                       ss_e2);
1677
        cpu_x86_set_cpl(env, rpl);
1678
        sp = new_esp;
1679
        /* XXX: change sp_mask according to old segment ? */
1680

    
1681
        /* validate data segments */
1682
        validate_seg(R_ES, cpl);
1683
        validate_seg(R_DS, cpl);
1684
        validate_seg(R_FS, cpl);
1685
        validate_seg(R_GS, cpl);
1686
    }
1687
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1688
    env->eip = new_eip;
1689
    if (is_iret) {
1690
        /* NOTE: 'cpl' is the _old_ CPL */
1691
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK;
1692
        if (cpl == 0)
1693
            eflags_mask |= IOPL_MASK;
1694
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1695
        if (cpl <= iopl)
1696
            eflags_mask |= IF_MASK;
1697
        if (shift == 0)
1698
            eflags_mask &= 0xffff;
1699
        load_eflags(new_eflags, eflags_mask);
1700
    }
1701
    return;
1702

    
1703
 return_to_vm86:
1704
    POPL(ssp, sp, sp_mask, new_esp);
1705
    POPL(ssp, sp, sp_mask, new_ss);
1706
    POPL(ssp, sp, sp_mask, new_es);
1707
    POPL(ssp, sp, sp_mask, new_ds);
1708
    POPL(ssp, sp, sp_mask, new_fs);
1709
    POPL(ssp, sp, sp_mask, new_gs);
1710
    
1711
    /* modify processor state */
1712
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1713
                IF_MASK | IOPL_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1714
    load_seg_vm(R_CS, new_cs & 0xffff);
1715
    cpu_x86_set_cpl(env, 3);
1716
    load_seg_vm(R_SS, new_ss & 0xffff);
1717
    load_seg_vm(R_ES, new_es & 0xffff);
1718
    load_seg_vm(R_DS, new_ds & 0xffff);
1719
    load_seg_vm(R_FS, new_fs & 0xffff);
1720
    load_seg_vm(R_GS, new_gs & 0xffff);
1721

    
1722
    env->eip = new_eip;
1723
    ESP = new_esp;
1724
}
1725

    
1726
void helper_iret_protected(int shift)
1727
{
1728
    int tss_selector, type;
1729
    uint32_t e1, e2;
1730
    
1731
    /* specific case for TSS */
1732
    if (env->eflags & NT_MASK) {
1733
        tss_selector = lduw_kernel(env->tr.base + 0);
1734
        if (tss_selector & 4)
1735
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1736
        if (load_segment(&e1, &e2, tss_selector) != 0)
1737
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1738
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1739
        /* NOTE: we check both segment and busy TSS */
1740
        if (type != 3)
1741
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1742
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1743
    } else {
1744
        helper_ret_protected(shift, 1, 0);
1745
    }
1746
}
1747

    
1748
void helper_lret_protected(int shift, int addend)
1749
{
1750
    helper_ret_protected(shift, 0, addend);
1751
}
1752

    
1753
void helper_movl_crN_T0(int reg)
1754
{
1755
    env->cr[reg] = T0;
1756
    switch(reg) {
1757
    case 0:
1758
        cpu_x86_update_cr0(env);
1759
        break;
1760
    case 3:
1761
        cpu_x86_update_cr3(env);
1762
        break;
1763
    }
1764
}
1765

    
1766
/* XXX: do more */
1767
void helper_movl_drN_T0(int reg)
1768
{
1769
    env->dr[reg] = T0;
1770
}
1771

    
1772
void helper_invlpg(unsigned int addr)
1773
{
1774
    cpu_x86_flush_tlb(env, addr);
1775
}
1776

    
1777
/* rdtsc */
1778
#ifndef __i386__
1779
uint64_t emu_time;
1780
#endif
1781

    
1782
void helper_rdtsc(void)
1783
{
1784
    uint64_t val;
1785
#ifdef __i386__
1786
    asm("rdtsc" : "=A" (val));
1787
#else
1788
    /* better than nothing: the time increases */
1789
    val = emu_time++;
1790
#endif
1791
    EAX = val;
1792
    EDX = val >> 32;
1793
}
1794

    
1795
void helper_wrmsr(void)
1796
{
1797
    switch(ECX) {
1798
    case MSR_IA32_SYSENTER_CS:
1799
        env->sysenter_cs = EAX & 0xffff;
1800
        break;
1801
    case MSR_IA32_SYSENTER_ESP:
1802
        env->sysenter_esp = EAX;
1803
        break;
1804
    case MSR_IA32_SYSENTER_EIP:
1805
        env->sysenter_eip = EAX;
1806
        break;
1807
    default:
1808
        /* XXX: exception ? */
1809
        break; 
1810
    }
1811
}
1812

    
1813
void helper_rdmsr(void)
1814
{
1815
    switch(ECX) {
1816
    case MSR_IA32_SYSENTER_CS:
1817
        EAX = env->sysenter_cs;
1818
        EDX = 0;
1819
        break;
1820
    case MSR_IA32_SYSENTER_ESP:
1821
        EAX = env->sysenter_esp;
1822
        EDX = 0;
1823
        break;
1824
    case MSR_IA32_SYSENTER_EIP:
1825
        EAX = env->sysenter_eip;
1826
        EDX = 0;
1827
        break;
1828
    default:
1829
        /* XXX: exception ? */
1830
        break; 
1831
    }
1832
}
1833

    
1834
void helper_lsl(void)
1835
{
1836
    unsigned int selector, limit;
1837
    uint32_t e1, e2;
1838
    int rpl, dpl, cpl, type;
1839

    
1840
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1841
    selector = T0 & 0xffff;
1842
    if (load_segment(&e1, &e2, selector) != 0)
1843
        return;
1844
    rpl = selector & 3;
1845
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1846
    cpl = env->hflags & HF_CPL_MASK;
1847
    if (e2 & DESC_S_MASK) {
1848
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1849
            /* conforming */
1850
        } else {
1851
            if (dpl < cpl || dpl < rpl)
1852
                return;
1853
        }
1854
    } else {
1855
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1856
        switch(type) {
1857
        case 1:
1858
        case 2:
1859
        case 3:
1860
        case 9:
1861
        case 11:
1862
            break;
1863
        default:
1864
            return;
1865
        }
1866
        if (dpl < cpl || dpl < rpl)
1867
            return;
1868
    }
1869
    limit = get_seg_limit(e1, e2);
1870
    T1 = limit;
1871
    CC_SRC |= CC_Z;
1872
}
1873

    
1874
void helper_lar(void)
1875
{
1876
    unsigned int selector;
1877
    uint32_t e1, e2;
1878
    int rpl, dpl, cpl, type;
1879

    
1880
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1881
    selector = T0 & 0xffff;
1882
    if ((selector & 0xfffc) == 0)
1883
        return;
1884
    if (load_segment(&e1, &e2, selector) != 0)
1885
        return;
1886
    rpl = selector & 3;
1887
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1888
    cpl = env->hflags & HF_CPL_MASK;
1889
    if (e2 & DESC_S_MASK) {
1890
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1891
            /* conforming */
1892
        } else {
1893
            if (dpl < cpl || dpl < rpl)
1894
                return;
1895
        }
1896
    } else {
1897
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1898
        switch(type) {
1899
        case 1:
1900
        case 2:
1901
        case 3:
1902
        case 4:
1903
        case 5:
1904
        case 9:
1905
        case 11:
1906
        case 12:
1907
            break;
1908
        default:
1909
            return;
1910
        }
1911
        if (dpl < cpl || dpl < rpl)
1912
            return;
1913
    }
1914
    T1 = e2 & 0x00f0ff00;
1915
    CC_SRC |= CC_Z;
1916
}
1917

    
1918
void helper_verr(void)
1919
{
1920
    unsigned int selector;
1921
    uint32_t e1, e2;
1922
    int rpl, dpl, cpl;
1923

    
1924
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1925
    selector = T0 & 0xffff;
1926
    if ((selector & 0xfffc) == 0)
1927
        return;
1928
    if (load_segment(&e1, &e2, selector) != 0)
1929
        return;
1930
    if (!(e2 & DESC_S_MASK))
1931
        return;
1932
    rpl = selector & 3;
1933
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1934
    cpl = env->hflags & HF_CPL_MASK;
1935
    if (e2 & DESC_CS_MASK) {
1936
        if (!(e2 & DESC_R_MASK))
1937
            return;
1938
        if (!(e2 & DESC_C_MASK)) {
1939
            if (dpl < cpl || dpl < rpl)
1940
                return;
1941
        }
1942
    } else {
1943
        if (dpl < cpl || dpl < rpl)
1944
            return;
1945
    }
1946
    CC_SRC |= CC_Z;
1947
}
1948

    
1949
void helper_verw(void)
1950
{
1951
    unsigned int selector;
1952
    uint32_t e1, e2;
1953
    int rpl, dpl, cpl;
1954

    
1955
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1956
    selector = T0 & 0xffff;
1957
    if ((selector & 0xfffc) == 0)
1958
        return;
1959
    if (load_segment(&e1, &e2, selector) != 0)
1960
        return;
1961
    if (!(e2 & DESC_S_MASK))
1962
        return;
1963
    rpl = selector & 3;
1964
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1965
    cpl = env->hflags & HF_CPL_MASK;
1966
    if (e2 & DESC_CS_MASK) {
1967
        return;
1968
    } else {
1969
        if (dpl < cpl || dpl < rpl)
1970
            return;
1971
        if (!(e2 & DESC_W_MASK))
1972
            return;
1973
    }
1974
    CC_SRC |= CC_Z;
1975
}
1976

    
1977
/* FPU helpers */
1978

    
1979
void helper_fldt_ST0_A0(void)
1980
{
1981
    int new_fpstt;
1982
    new_fpstt = (env->fpstt - 1) & 7;
1983
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1984
    env->fpstt = new_fpstt;
1985
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1986
}
1987

    
1988
void helper_fstt_ST0_A0(void)
1989
{
1990
    helper_fstt(ST0, (uint8_t *)A0);
1991
}
1992

    
1993
/* BCD ops */
1994

    
1995
#define MUL10(iv) ( iv + iv + (iv << 3) )
1996

    
1997
void helper_fbld_ST0_A0(void)
1998
{
1999
    CPU86_LDouble tmp;
2000
    uint64_t val;
2001
    unsigned int v;
2002
    int i;
2003

    
2004
    val = 0;
2005
    for(i = 8; i >= 0; i--) {
2006
        v = ldub((uint8_t *)A0 + i);
2007
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2008
    }
2009
    tmp = val;
2010
    if (ldub((uint8_t *)A0 + 9) & 0x80)
2011
        tmp = -tmp;
2012
    fpush();
2013
    ST0 = tmp;
2014
}
2015

    
2016
void helper_fbst_ST0_A0(void)
2017
{
2018
    CPU86_LDouble tmp;
2019
    int v;
2020
    uint8_t *mem_ref, *mem_end;
2021
    int64_t val;
2022

    
2023
    tmp = rint(ST0);
2024
    val = (int64_t)tmp;
2025
    mem_ref = (uint8_t *)A0;
2026
    mem_end = mem_ref + 9;
2027
    if (val < 0) {
2028
        stb(mem_end, 0x80);
2029
        val = -val;
2030
    } else {
2031
        stb(mem_end, 0x00);
2032
    }
2033
    while (mem_ref < mem_end) {
2034
        if (val == 0)
2035
            break;
2036
        v = val % 100;
2037
        val = val / 100;
2038
        v = ((v / 10) << 4) | (v % 10);
2039
        stb(mem_ref++, v);
2040
    }
2041
    while (mem_ref < mem_end) {
2042
        stb(mem_ref++, 0);
2043
    }
2044
}
2045

    
2046
void helper_f2xm1(void)
2047
{
2048
    ST0 = pow(2.0,ST0) - 1.0;
2049
}
2050

    
2051
void helper_fyl2x(void)
2052
{
2053
    CPU86_LDouble fptemp;
2054
    
2055
    fptemp = ST0;
2056
    if (fptemp>0.0){
2057
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2058
        ST1 *= fptemp;
2059
        fpop();
2060
    } else { 
2061
        env->fpus &= (~0x4700);
2062
        env->fpus |= 0x400;
2063
    }
2064
}
2065

    
2066
void helper_fptan(void)
2067
{
2068
    CPU86_LDouble fptemp;
2069

    
2070
    fptemp = ST0;
2071
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2072
        env->fpus |= 0x400;
2073
    } else {
2074
        ST0 = tan(fptemp);
2075
        fpush();
2076
        ST0 = 1.0;
2077
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2078
        /* the above code is for  |arg| < 2**52 only */
2079
    }
2080
}
2081

    
2082
void helper_fpatan(void)
2083
{
2084
    CPU86_LDouble fptemp, fpsrcop;
2085

    
2086
    fpsrcop = ST1;
2087
    fptemp = ST0;
2088
    ST1 = atan2(fpsrcop,fptemp);
2089
    fpop();
2090
}
2091

    
2092
void helper_fxtract(void)
2093
{
2094
    CPU86_LDoubleU temp;
2095
    unsigned int expdif;
2096

    
2097
    temp.d = ST0;
2098
    expdif = EXPD(temp) - EXPBIAS;
2099
    /*DP exponent bias*/
2100
    ST0 = expdif;
2101
    fpush();
2102
    BIASEXPONENT(temp);
2103
    ST0 = temp.d;
2104
}
2105

    
2106
void helper_fprem1(void)
2107
{
2108
    CPU86_LDouble dblq, fpsrcop, fptemp;
2109
    CPU86_LDoubleU fpsrcop1, fptemp1;
2110
    int expdif;
2111
    int q;
2112

    
2113
    fpsrcop = ST0;
2114
    fptemp = ST1;
2115
    fpsrcop1.d = fpsrcop;
2116
    fptemp1.d = fptemp;
2117
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2118
    if (expdif < 53) {
2119
        dblq = fpsrcop / fptemp;
2120
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2121
        ST0 = fpsrcop - fptemp*dblq;
2122
        q = (int)dblq; /* cutting off top bits is assumed here */
2123
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2124
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2125
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2126
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2127
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2128
    } else {
2129
        env->fpus |= 0x400;  /* C2 <-- 1 */
2130
        fptemp = pow(2.0, expdif-50);
2131
        fpsrcop = (ST0 / ST1) / fptemp;
2132
        /* fpsrcop = integer obtained by rounding to the nearest */
2133
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2134
            floor(fpsrcop): ceil(fpsrcop);
2135
        ST0 -= (ST1 * fpsrcop * fptemp);
2136
    }
2137
}
2138

    
2139
void helper_fprem(void)
2140
{
2141
    CPU86_LDouble dblq, fpsrcop, fptemp;
2142
    CPU86_LDoubleU fpsrcop1, fptemp1;
2143
    int expdif;
2144
    int q;
2145
    
2146
    fpsrcop = ST0;
2147
    fptemp = ST1;
2148
    fpsrcop1.d = fpsrcop;
2149
    fptemp1.d = fptemp;
2150
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2151
    if ( expdif < 53 ) {
2152
        dblq = fpsrcop / fptemp;
2153
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2154
        ST0 = fpsrcop - fptemp*dblq;
2155
        q = (int)dblq; /* cutting off top bits is assumed here */
2156
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2157
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2158
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2159
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2160
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2161
    } else {
2162
        env->fpus |= 0x400;  /* C2 <-- 1 */
2163
        fptemp = pow(2.0, expdif-50);
2164
        fpsrcop = (ST0 / ST1) / fptemp;
2165
        /* fpsrcop = integer obtained by chopping */
2166
        fpsrcop = (fpsrcop < 0.0)?
2167
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2168
        ST0 -= (ST1 * fpsrcop * fptemp);
2169
    }
2170
}
2171

    
2172
void helper_fyl2xp1(void)
2173
{
2174
    CPU86_LDouble fptemp;
2175

    
2176
    fptemp = ST0;
2177
    if ((fptemp+1.0)>0.0) {
2178
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2179
        ST1 *= fptemp;
2180
        fpop();
2181
    } else { 
2182
        env->fpus &= (~0x4700);
2183
        env->fpus |= 0x400;
2184
    }
2185
}
2186

    
2187
void helper_fsqrt(void)
2188
{
2189
    CPU86_LDouble fptemp;
2190

    
2191
    fptemp = ST0;
2192
    if (fptemp<0.0) { 
2193
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2194
        env->fpus |= 0x400;
2195
    }
2196
    ST0 = sqrt(fptemp);
2197
}
2198

    
2199
void helper_fsincos(void)
2200
{
2201
    CPU86_LDouble fptemp;
2202

    
2203
    fptemp = ST0;
2204
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2205
        env->fpus |= 0x400;
2206
    } else {
2207
        ST0 = sin(fptemp);
2208
        fpush();
2209
        ST0 = cos(fptemp);
2210
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2211
        /* the above code is for  |arg| < 2**63 only */
2212
    }
2213
}
2214

    
2215
void helper_frndint(void)
2216
{
2217
    CPU86_LDouble a;
2218

    
2219
    a = ST0;
2220
#ifdef __arm__
2221
    switch(env->fpuc & RC_MASK) {
2222
    default:
2223
    case RC_NEAR:
2224
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2225
        break;
2226
    case RC_DOWN:
2227
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2228
        break;
2229
    case RC_UP:
2230
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2231
        break;
2232
    case RC_CHOP:
2233
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2234
        break;
2235
    }
2236
#else
2237
    a = rint(a);
2238
#endif
2239
    ST0 = a;
2240
}
2241

    
2242
void helper_fscale(void)
2243
{
2244
    CPU86_LDouble fpsrcop, fptemp;
2245

    
2246
    fpsrcop = 2.0;
2247
    fptemp = pow(fpsrcop,ST1);
2248
    ST0 *= fptemp;
2249
}
2250

    
2251
void helper_fsin(void)
2252
{
2253
    CPU86_LDouble fptemp;
2254

    
2255
    fptemp = ST0;
2256
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2257
        env->fpus |= 0x400;
2258
    } else {
2259
        ST0 = sin(fptemp);
2260
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2261
        /* the above code is for  |arg| < 2**53 only */
2262
    }
2263
}
2264

    
2265
void helper_fcos(void)
2266
{
2267
    CPU86_LDouble fptemp;
2268

    
2269
    fptemp = ST0;
2270
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2271
        env->fpus |= 0x400;
2272
    } else {
2273
        ST0 = cos(fptemp);
2274
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2275
        /* the above code is for  |arg5 < 2**63 only */
2276
    }
2277
}
2278

    
2279
void helper_fxam_ST0(void)
2280
{
2281
    CPU86_LDoubleU temp;
2282
    int expdif;
2283

    
2284
    temp.d = ST0;
2285

    
2286
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2287
    if (SIGND(temp))
2288
        env->fpus |= 0x200; /* C1 <-- 1 */
2289

    
2290
    expdif = EXPD(temp);
2291
    if (expdif == MAXEXPD) {
2292
        if (MANTD(temp) == 0)
2293
            env->fpus |=  0x500 /*Infinity*/;
2294
        else
2295
            env->fpus |=  0x100 /*NaN*/;
2296
    } else if (expdif == 0) {
2297
        if (MANTD(temp) == 0)
2298
            env->fpus |=  0x4000 /*Zero*/;
2299
        else
2300
            env->fpus |= 0x4400 /*Denormal*/;
2301
    } else {
2302
        env->fpus |= 0x400;
2303
    }
2304
}
2305

    
2306
void helper_fstenv(uint8_t *ptr, int data32)
2307
{
2308
    int fpus, fptag, exp, i;
2309
    uint64_t mant;
2310
    CPU86_LDoubleU tmp;
2311

    
2312
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2313
    fptag = 0;
2314
    for (i=7; i>=0; i--) {
2315
        fptag <<= 2;
2316
        if (env->fptags[i]) {
2317
            fptag |= 3;
2318
        } else {
2319
            tmp.d = env->fpregs[i];
2320
            exp = EXPD(tmp);
2321
            mant = MANTD(tmp);
2322
            if (exp == 0 && mant == 0) {
2323
                /* zero */
2324
                fptag |= 1;
2325
            } else if (exp == 0 || exp == MAXEXPD
2326
#ifdef USE_X86LDOUBLE
2327
                       || (mant & (1LL << 63)) == 0
2328
#endif
2329
                       ) {
2330
                /* NaNs, infinity, denormal */
2331
                fptag |= 2;
2332
            }
2333
        }
2334
    }
2335
    if (data32) {
2336
        /* 32 bit */
2337
        stl(ptr, env->fpuc);
2338
        stl(ptr + 4, fpus);
2339
        stl(ptr + 8, fptag);
2340
        stl(ptr + 12, 0);
2341
        stl(ptr + 16, 0);
2342
        stl(ptr + 20, 0);
2343
        stl(ptr + 24, 0);
2344
    } else {
2345
        /* 16 bit */
2346
        stw(ptr, env->fpuc);
2347
        stw(ptr + 2, fpus);
2348
        stw(ptr + 4, fptag);
2349
        stw(ptr + 6, 0);
2350
        stw(ptr + 8, 0);
2351
        stw(ptr + 10, 0);
2352
        stw(ptr + 12, 0);
2353
    }
2354
}
2355

    
2356
void helper_fldenv(uint8_t *ptr, int data32)
2357
{
2358
    int i, fpus, fptag;
2359

    
2360
    if (data32) {
2361
        env->fpuc = lduw(ptr);
2362
        fpus = lduw(ptr + 4);
2363
        fptag = lduw(ptr + 8);
2364
    }
2365
    else {
2366
        env->fpuc = lduw(ptr);
2367
        fpus = lduw(ptr + 2);
2368
        fptag = lduw(ptr + 4);
2369
    }
2370
    env->fpstt = (fpus >> 11) & 7;
2371
    env->fpus = fpus & ~0x3800;
2372
    for(i = 0;i < 7; i++) {
2373
        env->fptags[i] = ((fptag & 3) == 3);
2374
        fptag >>= 2;
2375
    }
2376
}
2377

    
2378
void helper_fsave(uint8_t *ptr, int data32)
2379
{
2380
    CPU86_LDouble tmp;
2381
    int i;
2382

    
2383
    helper_fstenv(ptr, data32);
2384

    
2385
    ptr += (14 << data32);
2386
    for(i = 0;i < 8; i++) {
2387
        tmp = ST(i);
2388
        helper_fstt(tmp, ptr);
2389
        ptr += 10;
2390
    }
2391

    
2392
    /* fninit */
2393
    env->fpus = 0;
2394
    env->fpstt = 0;
2395
    env->fpuc = 0x37f;
2396
    env->fptags[0] = 1;
2397
    env->fptags[1] = 1;
2398
    env->fptags[2] = 1;
2399
    env->fptags[3] = 1;
2400
    env->fptags[4] = 1;
2401
    env->fptags[5] = 1;
2402
    env->fptags[6] = 1;
2403
    env->fptags[7] = 1;
2404
}
2405

    
2406
void helper_frstor(uint8_t *ptr, int data32)
2407
{
2408
    CPU86_LDouble tmp;
2409
    int i;
2410

    
2411
    helper_fldenv(ptr, data32);
2412
    ptr += (14 << data32);
2413

    
2414
    for(i = 0;i < 8; i++) {
2415
        tmp = helper_fldt(ptr);
2416
        ST(i) = tmp;
2417
        ptr += 10;
2418
    }
2419
}
2420

    
2421
#if !defined(CONFIG_USER_ONLY) 
2422

    
2423
#define MMUSUFFIX _mmu
2424
#define GETPC() (__builtin_return_address(0))
2425

    
2426
#define SHIFT 0
2427
#include "softmmu_template.h"
2428

    
2429
#define SHIFT 1
2430
#include "softmmu_template.h"
2431

    
2432
#define SHIFT 2
2433
#include "softmmu_template.h"
2434

    
2435
#define SHIFT 3
2436
#include "softmmu_template.h"
2437

    
2438
#endif
2439

    
2440
/* try to fill the TLB and return an exception if error. If retaddr is
2441
   NULL, it means that the function was called in C code (i.e. not
2442
   from generated code or from helper.c) */
2443
/* XXX: fix it to restore all registers */
2444
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2445
{
2446
    TranslationBlock *tb;
2447
    int ret;
2448
    unsigned long pc;
2449
    CPUX86State *saved_env;
2450

    
2451
    /* XXX: hack to restore env in all cases, even if not called from
2452
       generated code */
2453
    saved_env = env;
2454
    env = cpu_single_env;
2455

    
2456
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2457
    if (ret) {
2458
        if (retaddr) {
2459
            /* now we have a real cpu fault */
2460
            pc = (unsigned long)retaddr;
2461
            tb = tb_find_pc(pc);
2462
            if (tb) {
2463
                /* the PC is inside the translated code. It means that we have
2464
                   a virtual CPU fault */
2465
                cpu_restore_state(tb, env, pc);
2466
            }
2467
        }
2468
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2469
    }
2470
    env = saved_env;
2471
}