Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 4136f33c

History | View | Annotate | Download (67.2 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
const uint8_t parity_table[256] = {
25
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
};
58

    
59
/* modulo 17 table */
60
const uint8_t rclw_table[32] = {
61
    0, 1, 2, 3, 4, 5, 6, 7, 
62
    8, 9,10,11,12,13,14,15,
63
   16, 0, 1, 2, 3, 4, 5, 6,
64
    7, 8, 9,10,11,12,13,14,
65
};
66

    
67
/* modulo 9 table */
68
const uint8_t rclb_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 0, 1, 2, 3, 4, 5, 6,
71
    7, 8, 0, 1, 2, 3, 4, 5, 
72
    6, 7, 8, 0, 1, 2, 3, 4,
73
};
74

    
75
const CPU86_LDouble f15rk[7] =
76
{
77
    0.00000000000000000000L,
78
    1.00000000000000000000L,
79
    3.14159265358979323851L,  /*pi*/
80
    0.30102999566398119523L,  /*lg2*/
81
    0.69314718055994530943L,  /*ln2*/
82
    1.44269504088896340739L,  /*l2e*/
83
    3.32192809488736234781L,  /*l2t*/
84
};
85
    
86
/* thread support */
87

    
88
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
89

    
90
void cpu_lock(void)
91
{
92
    spin_lock(&global_cpu_lock);
93
}
94

    
95
void cpu_unlock(void)
96
{
97
    spin_unlock(&global_cpu_lock);
98
}
99

    
100
void cpu_loop_exit(void)
101
{
102
    /* NOTE: the register at this point must be saved by hand because
103
       longjmp restore them */
104
#ifdef reg_EAX
105
    env->regs[R_EAX] = EAX;
106
#endif
107
#ifdef reg_ECX
108
    env->regs[R_ECX] = ECX;
109
#endif
110
#ifdef reg_EDX
111
    env->regs[R_EDX] = EDX;
112
#endif
113
#ifdef reg_EBX
114
    env->regs[R_EBX] = EBX;
115
#endif
116
#ifdef reg_ESP
117
    env->regs[R_ESP] = ESP;
118
#endif
119
#ifdef reg_EBP
120
    env->regs[R_EBP] = EBP;
121
#endif
122
#ifdef reg_ESI
123
    env->regs[R_ESI] = ESI;
124
#endif
125
#ifdef reg_EDI
126
    env->regs[R_EDI] = EDI;
127
#endif
128
    longjmp(env->jmp_env, 1);
129
}
130

    
131
/* return non zero if error */
132
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
133
                               int selector)
134
{
135
    SegmentCache *dt;
136
    int index;
137
    uint8_t *ptr;
138

    
139
    if (selector & 0x4)
140
        dt = &env->ldt;
141
    else
142
        dt = &env->gdt;
143
    index = selector & ~7;
144
    if ((index + 7) > dt->limit)
145
        return -1;
146
    ptr = dt->base + index;
147
    *e1_ptr = ldl_kernel(ptr);
148
    *e2_ptr = ldl_kernel(ptr + 4);
149
    return 0;
150
}
151
                                     
152
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
153
{
154
    unsigned int limit;
155
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
156
    if (e2 & DESC_G_MASK)
157
        limit = (limit << 12) | 0xfff;
158
    return limit;
159
}
160

    
161
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
162
{
163
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
164
}
165

    
166
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
167
{
168
    sc->base = get_seg_base(e1, e2);
169
    sc->limit = get_seg_limit(e1, e2);
170
    sc->flags = e2;
171
}
172

    
173
/* init the segment cache in vm86 mode. */
174
static inline void load_seg_vm(int seg, int selector)
175
{
176
    selector &= 0xffff;
177
    cpu_x86_load_seg_cache(env, seg, selector, 
178
                           (uint8_t *)(selector << 4), 0xffff, 0);
179
}
180

    
181
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
182
                                       uint32_t *esp_ptr, int dpl)
183
{
184
    int type, index, shift;
185
    
186
#if 0
187
    {
188
        int i;
189
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
190
        for(i=0;i<env->tr.limit;i++) {
191
            printf("%02x ", env->tr.base[i]);
192
            if ((i & 7) == 7) printf("\n");
193
        }
194
        printf("\n");
195
    }
196
#endif
197

    
198
    if (!(env->tr.flags & DESC_P_MASK))
199
        cpu_abort(env, "invalid tss");
200
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
201
    if ((type & 7) != 1)
202
        cpu_abort(env, "invalid tss type");
203
    shift = type >> 3;
204
    index = (dpl * 4 + 2) << shift;
205
    if (index + (4 << shift) - 1 > env->tr.limit)
206
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
207
    if (shift == 0) {
208
        *esp_ptr = lduw_kernel(env->tr.base + index);
209
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
210
    } else {
211
        *esp_ptr = ldl_kernel(env->tr.base + index);
212
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
213
    }
214
}
215

    
216
/* XXX: merge with load_seg() */
217
static void tss_load_seg(int seg_reg, int selector)
218
{
219
    uint32_t e1, e2;
220
    int rpl, dpl, cpl;
221

    
222
    if ((selector & 0xfffc) != 0) {
223
        if (load_segment(&e1, &e2, selector) != 0)
224
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225
        if (!(e2 & DESC_S_MASK))
226
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
        rpl = selector & 3;
228
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
229
        cpl = env->hflags & HF_CPL_MASK;
230
        if (seg_reg == R_CS) {
231
            if (!(e2 & DESC_CS_MASK))
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            if (dpl != rpl)
234
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
            if ((e2 & DESC_C_MASK) && dpl > rpl)
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
                
238
        } else if (seg_reg == R_SS) {
239
            /* SS must be writable data */
240
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            if (dpl != cpl || dpl != rpl)
243
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244
        } else {
245
            /* not readable code */
246
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
            /* if data or non conforming code, checks the rights */
249
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
250
                if (dpl < cpl || dpl < rpl)
251
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
            }
253
        }
254
        if (!(e2 & DESC_P_MASK))
255
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
256
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
257
                       get_seg_base(e1, e2),
258
                       get_seg_limit(e1, e2),
259
                       e2);
260
    } else {
261
        if (seg_reg == R_SS || seg_reg == R_CS) 
262
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263
    }
264
}
265

    
266
#define SWITCH_TSS_JMP  0
267
#define SWITCH_TSS_IRET 1
268
#define SWITCH_TSS_CALL 2
269

    
270
/* XXX: restore CPU state in registers (PowerPC case) */
271
static void switch_tss(int tss_selector, 
272
                       uint32_t e1, uint32_t e2, int source)
273
{
274
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275
    uint8_t *tss_base;
276
    uint32_t new_regs[8], new_segs[6];
277
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278
    uint32_t old_eflags, eflags_mask;
279
    SegmentCache *dt;
280
    int index;
281
    uint8_t *ptr;
282

    
283
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
284

    
285
    /* if task gate, we read the TSS segment and we load it */
286
    if (type == 5) {
287
        if (!(e2 & DESC_P_MASK))
288
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289
        tss_selector = e1 >> 16;
290
        if (tss_selector & 4)
291
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292
        if (load_segment(&e1, &e2, tss_selector) != 0)
293
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294
        if (e2 & DESC_S_MASK)
295
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297
        if ((type & 7) != 1)
298
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
299
    }
300

    
301
    if (!(e2 & DESC_P_MASK))
302
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303

    
304
    if (type & 8)
305
        tss_limit_max = 103;
306
    else
307
        tss_limit_max = 43;
308
    tss_limit = get_seg_limit(e1, e2);
309
    tss_base = get_seg_base(e1, e2);
310
    if ((tss_selector & 4) != 0 || 
311
        tss_limit < tss_limit_max)
312
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314
    if (old_type & 8)
315
        old_tss_limit_max = 103;
316
    else
317
        old_tss_limit_max = 43;
318

    
319
    /* read all the registers from the new TSS */
320
    if (type & 8) {
321
        /* 32 bit */
322
        new_cr3 = ldl_kernel(tss_base + 0x1c);
323
        new_eip = ldl_kernel(tss_base + 0x20);
324
        new_eflags = ldl_kernel(tss_base + 0x24);
325
        for(i = 0; i < 8; i++)
326
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327
        for(i = 0; i < 6; i++)
328
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329
        new_ldt = lduw_kernel(tss_base + 0x60);
330
        new_trap = ldl_kernel(tss_base + 0x64);
331
    } else {
332
        /* 16 bit */
333
        new_cr3 = 0;
334
        new_eip = lduw_kernel(tss_base + 0x0e);
335
        new_eflags = lduw_kernel(tss_base + 0x10);
336
        for(i = 0; i < 8; i++)
337
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338
        for(i = 0; i < 4; i++)
339
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340
        new_ldt = lduw_kernel(tss_base + 0x2a);
341
        new_segs[R_FS] = 0;
342
        new_segs[R_GS] = 0;
343
        new_trap = 0;
344
    }
345
    
346
    /* NOTE: we must avoid memory exceptions during the task switch,
347
       so we make dummy accesses before */
348
    /* XXX: it can still fail in some cases, so a bigger hack is
349
       necessary to valid the TLB after having done the accesses */
350

    
351
    v1 = ldub_kernel(env->tr.base);
352
    v2 = ldub(env->tr.base + old_tss_limit_max);
353
    stb_kernel(env->tr.base, v1);
354
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
355
    
356
    /* clear busy bit (it is restartable) */
357
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
358
        uint8_t *ptr;
359
        uint32_t e2;
360
        ptr = env->gdt.base + (env->tr.selector << 3);
361
        e2 = ldl_kernel(ptr + 4);
362
        e2 &= ~DESC_TSS_BUSY_MASK;
363
        stl_kernel(ptr + 4, e2);
364
    }
365
    old_eflags = compute_eflags();
366
    if (source == SWITCH_TSS_IRET)
367
        old_eflags &= ~NT_MASK;
368
    
369
    /* save the current state in the old TSS */
370
    if (type & 8) {
371
        /* 32 bit */
372
        stl_kernel(env->tr.base + 0x20, env->eip);
373
        stl_kernel(env->tr.base + 0x24, old_eflags);
374
        for(i = 0; i < 8; i++)
375
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
376
        for(i = 0; i < 6; i++)
377
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
378
    } else {
379
        /* 16 bit */
380
        stw_kernel(env->tr.base + 0x0e, new_eip);
381
        stw_kernel(env->tr.base + 0x10, old_eflags);
382
        for(i = 0; i < 8; i++)
383
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
384
        for(i = 0; i < 4; i++)
385
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
386
    }
387
    
388
    /* now if an exception occurs, it will occurs in the next task
389
       context */
390

    
391
    if (source == SWITCH_TSS_CALL) {
392
        stw_kernel(tss_base, env->tr.selector);
393
        new_eflags |= NT_MASK;
394
    }
395

    
396
    /* set busy bit */
397
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
398
        uint8_t *ptr;
399
        uint32_t e2;
400
        ptr = env->gdt.base + (tss_selector << 3);
401
        e2 = ldl_kernel(ptr + 4);
402
        e2 |= DESC_TSS_BUSY_MASK;
403
        stl_kernel(ptr + 4, e2);
404
    }
405

    
406
    /* set the new CPU state */
407
    /* from this point, any exception which occurs can give problems */
408
    env->cr[0] |= CR0_TS_MASK;
409
    env->tr.selector = tss_selector;
410
    env->tr.base = tss_base;
411
    env->tr.limit = tss_limit;
412
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
413
    
414
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
415
        env->cr[3] = new_cr3;
416
        cpu_x86_update_cr3(env);
417
    }
418
    
419
    /* load all registers without an exception, then reload them with
420
       possible exception */
421
    env->eip = new_eip;
422
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
423
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK;
424
    if (!(type & 8))
425
        eflags_mask &= 0xffff;
426
    load_eflags(new_eflags, eflags_mask);
427
    for(i = 0; i < 8; i++)
428
        env->regs[i] = new_regs[i];
429
    if (new_eflags & VM_MASK) {
430
        for(i = 0; i < 6; i++) 
431
            load_seg_vm(i, new_segs[i]);
432
        /* in vm86, CPL is always 3 */
433
        cpu_x86_set_cpl(env, 3);
434
    } else {
435
        /* CPL is set the RPL of CS */
436
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
437
        /* first just selectors as the rest may trigger exceptions */
438
        for(i = 0; i < 6; i++)
439
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
440
    }
441
    
442
    env->ldt.selector = new_ldt & ~4;
443
    env->ldt.base = NULL;
444
    env->ldt.limit = 0;
445
    env->ldt.flags = 0;
446

    
447
    /* load the LDT */
448
    if (new_ldt & 4)
449
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
450

    
451
    dt = &env->gdt;
452
    index = new_ldt & ~7;
453
    if ((index + 7) > dt->limit)
454
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
455
    ptr = dt->base + index;
456
    e1 = ldl_kernel(ptr);
457
    e2 = ldl_kernel(ptr + 4);
458
    if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
459
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460
    if (!(e2 & DESC_P_MASK))
461
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
462
    load_seg_cache_raw_dt(&env->ldt, e1, e2);
463
    
464
    /* load the segments */
465
    if (!(new_eflags & VM_MASK)) {
466
        tss_load_seg(R_CS, new_segs[R_CS]);
467
        tss_load_seg(R_SS, new_segs[R_SS]);
468
        tss_load_seg(R_ES, new_segs[R_ES]);
469
        tss_load_seg(R_DS, new_segs[R_DS]);
470
        tss_load_seg(R_FS, new_segs[R_FS]);
471
        tss_load_seg(R_GS, new_segs[R_GS]);
472
    }
473
    
474
    /* check that EIP is in the CS segment limits */
475
    if (new_eip > env->segs[R_CS].limit) {
476
        raise_exception_err(EXCP0D_GPF, 0);
477
    }
478
}
479

    
480
/* check if Port I/O is allowed in TSS */
481
static inline void check_io(int addr, int size)
482
{
483
    int io_offset, val, mask;
484
    
485
    /* TSS must be a valid 32 bit one */
486
    if (!(env->tr.flags & DESC_P_MASK) ||
487
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
488
        env->tr.limit < 103)
489
        goto fail;
490
    io_offset = lduw_kernel(env->tr.base + 0x66);
491
    io_offset += (addr >> 3);
492
    /* Note: the check needs two bytes */
493
    if ((io_offset + 1) > env->tr.limit)
494
        goto fail;
495
    val = lduw_kernel(env->tr.base + io_offset);
496
    val >>= (addr & 7);
497
    mask = (1 << size) - 1;
498
    /* all bits must be zero to allow the I/O */
499
    if ((val & mask) != 0) {
500
    fail:
501
        raise_exception_err(EXCP0D_GPF, 0);
502
    }
503
}
504

    
505
void check_iob_T0(void)
506
{
507
    check_io(T0, 1);
508
}
509

    
510
void check_iow_T0(void)
511
{
512
    check_io(T0, 2);
513
}
514

    
515
void check_iol_T0(void)
516
{
517
    check_io(T0, 4);
518
}
519

    
520
void check_iob_DX(void)
521
{
522
    check_io(EDX & 0xffff, 1);
523
}
524

    
525
void check_iow_DX(void)
526
{
527
    check_io(EDX & 0xffff, 2);
528
}
529

    
530
void check_iol_DX(void)
531
{
532
    check_io(EDX & 0xffff, 4);
533
}
534

    
535
static inline unsigned int get_sp_mask(unsigned int e2)
536
{
537
    if (e2 & DESC_B_MASK)
538
        return 0xffffffff;
539
    else
540
        return 0xffff;
541
}
542

    
543
/* XXX: add a is_user flag to have proper security support */
544
#define PUSHW(ssp, sp, sp_mask, val)\
545
{\
546
    sp -= 2;\
547
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
548
}
549

    
550
#define PUSHL(ssp, sp, sp_mask, val)\
551
{\
552
    sp -= 4;\
553
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
554
}
555

    
556
#define POPW(ssp, sp, sp_mask, val)\
557
{\
558
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
559
    sp += 2;\
560
}
561

    
562
#define POPL(ssp, sp, sp_mask, val)\
563
{\
564
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
565
    sp += 4;\
566
}
567

    
568
/* protected mode interrupt */
569
static void do_interrupt_protected(int intno, int is_int, int error_code,
570
                                   unsigned int next_eip, int is_hw)
571
{
572
    SegmentCache *dt;
573
    uint8_t *ptr, *ssp;
574
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
575
    int has_error_code, new_stack, shift;
576
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
577
    uint32_t old_eip;
578

    
579
    has_error_code = 0;
580
    if (!is_int && !is_hw) {
581
        switch(intno) {
582
        case 8:
583
        case 10:
584
        case 11:
585
        case 12:
586
        case 13:
587
        case 14:
588
        case 17:
589
            has_error_code = 1;
590
            break;
591
        }
592
    }
593

    
594
    dt = &env->idt;
595
    if (intno * 8 + 7 > dt->limit)
596
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
597
    ptr = dt->base + intno * 8;
598
    e1 = ldl_kernel(ptr);
599
    e2 = ldl_kernel(ptr + 4);
600
    /* check gate type */
601
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
602
    switch(type) {
603
    case 5: /* task gate */
604
        /* must do that check here to return the correct error code */
605
        if (!(e2 & DESC_P_MASK))
606
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
607
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
608
        if (has_error_code) {
609
            int mask;
610
            /* push the error code */
611
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
612
            if (env->segs[R_SS].flags & DESC_B_MASK)
613
                mask = 0xffffffff;
614
            else
615
                mask = 0xffff;
616
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
617
            ssp = env->segs[R_SS].base + esp;
618
            if (shift)
619
                stl_kernel(ssp, error_code);
620
            else
621
                stw_kernel(ssp, error_code);
622
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
623
        }
624
        return;
625
    case 6: /* 286 interrupt gate */
626
    case 7: /* 286 trap gate */
627
    case 14: /* 386 interrupt gate */
628
    case 15: /* 386 trap gate */
629
        break;
630
    default:
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
        break;
633
    }
634
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
635
    cpl = env->hflags & HF_CPL_MASK;
636
    /* check privledge if software int */
637
    if (is_int && dpl < cpl)
638
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
639
    /* check valid bit */
640
    if (!(e2 & DESC_P_MASK))
641
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
    selector = e1 >> 16;
643
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
644
    if ((selector & 0xfffc) == 0)
645
        raise_exception_err(EXCP0D_GPF, 0);
646

    
647
    if (load_segment(&e1, &e2, selector) != 0)
648
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
649
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
650
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
651
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
652
    if (dpl > cpl)
653
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
654
    if (!(e2 & DESC_P_MASK))
655
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
656
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
657
        /* to inner priviledge */
658
        get_ss_esp_from_tss(&ss, &esp, dpl);
659
        if ((ss & 0xfffc) == 0)
660
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
661
        if ((ss & 3) != dpl)
662
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
663
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
664
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
665
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
666
        if (ss_dpl != dpl)
667
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
668
        if (!(ss_e2 & DESC_S_MASK) ||
669
            (ss_e2 & DESC_CS_MASK) ||
670
            !(ss_e2 & DESC_W_MASK))
671
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
672
        if (!(ss_e2 & DESC_P_MASK))
673
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
674
        new_stack = 1;
675
        sp_mask = get_sp_mask(ss_e2);
676
        ssp = get_seg_base(ss_e1, ss_e2);
677
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
678
        /* to same priviledge */
679
        new_stack = 0;
680
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
681
        ssp = env->segs[R_SS].base;
682
        esp = ESP;
683
    } else {
684
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
685
        new_stack = 0; /* avoid warning */
686
        sp_mask = 0; /* avoid warning */
687
        ssp = NULL; /* avoid warning */
688
        esp = 0; /* avoid warning */
689
    }
690

    
691
    shift = type >> 3;
692

    
693
#if 0
694
    /* XXX: check that enough room is available */
695
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
696
    if (env->eflags & VM_MASK)
697
        push_size += 8;
698
    push_size <<= shift;
699
#endif
700
    if (is_int)
701
        old_eip = next_eip;
702
    else
703
        old_eip = env->eip;
704
    if (shift == 1) {
705
        if (env->eflags & VM_MASK) {
706
            PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
707
            PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
708
            PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
709
            PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
710
        }
711
        if (new_stack) {
712
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
713
            PUSHL(ssp, esp, sp_mask, ESP);
714
        }
715
        PUSHL(ssp, esp, sp_mask, compute_eflags());
716
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
717
        PUSHL(ssp, esp, sp_mask, old_eip);
718
        if (has_error_code) {
719
            PUSHL(ssp, esp, sp_mask, error_code);
720
        }
721
    } else {
722
        if (new_stack) {
723
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
724
            PUSHW(ssp, esp, sp_mask, ESP);
725
        }
726
        PUSHW(ssp, esp, sp_mask, compute_eflags());
727
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
728
        PUSHW(ssp, esp, sp_mask, old_eip);
729
        if (has_error_code) {
730
            PUSHW(ssp, esp, sp_mask, error_code);
731
        }
732
    }
733
    
734
    if (new_stack) {
735
        ss = (ss & ~3) | dpl;
736
        cpu_x86_load_seg_cache(env, R_SS, ss, 
737
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
738
    }
739
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
740

    
741
    selector = (selector & ~3) | dpl;
742
    cpu_x86_load_seg_cache(env, R_CS, selector, 
743
                   get_seg_base(e1, e2),
744
                   get_seg_limit(e1, e2),
745
                   e2);
746
    cpu_x86_set_cpl(env, dpl);
747
    env->eip = offset;
748

    
749
    /* interrupt gate clear IF mask */
750
    if ((type & 1) == 0) {
751
        env->eflags &= ~IF_MASK;
752
    }
753
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
754
}
755

    
756
/* real mode interrupt */
757
static void do_interrupt_real(int intno, int is_int, int error_code,
758
                              unsigned int next_eip)
759
{
760
    SegmentCache *dt;
761
    uint8_t *ptr, *ssp;
762
    int selector;
763
    uint32_t offset, esp;
764
    uint32_t old_cs, old_eip;
765

    
766
    /* real mode (simpler !) */
767
    dt = &env->idt;
768
    if (intno * 4 + 3 > dt->limit)
769
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
770
    ptr = dt->base + intno * 4;
771
    offset = lduw_kernel(ptr);
772
    selector = lduw_kernel(ptr + 2);
773
    esp = ESP;
774
    ssp = env->segs[R_SS].base;
775
    if (is_int)
776
        old_eip = next_eip;
777
    else
778
        old_eip = env->eip;
779
    old_cs = env->segs[R_CS].selector;
780
    /* XXX: use SS segment size ? */
781
    PUSHW(ssp, esp, 0xffff, compute_eflags());
782
    PUSHW(ssp, esp, 0xffff, old_cs);
783
    PUSHW(ssp, esp, 0xffff, old_eip);
784
    
785
    /* update processor state */
786
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
787
    env->eip = offset;
788
    env->segs[R_CS].selector = selector;
789
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
790
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
791
}
792

    
793
/* fake user mode interrupt */
794
void do_interrupt_user(int intno, int is_int, int error_code, 
795
                       unsigned int next_eip)
796
{
797
    SegmentCache *dt;
798
    uint8_t *ptr;
799
    int dpl, cpl;
800
    uint32_t e2;
801

    
802
    dt = &env->idt;
803
    ptr = dt->base + (intno * 8);
804
    e2 = ldl_kernel(ptr + 4);
805
    
806
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
807
    cpl = env->hflags & HF_CPL_MASK;
808
    /* check privledge if software int */
809
    if (is_int && dpl < cpl)
810
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
811

    
812
    /* Since we emulate only user space, we cannot do more than
813
       exiting the emulation with the suitable exception and error
814
       code */
815
    if (is_int)
816
        EIP = next_eip;
817
}
818

    
819
/*
820
 * Begin excution of an interruption. is_int is TRUE if coming from
821
 * the int instruction. next_eip is the EIP value AFTER the interrupt
822
 * instruction. It is only relevant if is_int is TRUE.  
823
 */
824
void do_interrupt(int intno, int is_int, int error_code, 
825
                  unsigned int next_eip, int is_hw)
826
{
827
#ifdef DEBUG_PCALL
828
    if (loglevel) {
829
        static int count;
830
        fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
831
                count, intno, error_code, is_int);
832
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
833
#if 0
834
        {
835
            int i;
836
            uint8_t *ptr;
837
            printf("       code=");
838
            ptr = env->segs[R_CS].base + env->eip;
839
            for(i = 0; i < 16; i++) {
840
                printf(" %02x", ldub(ptr + i));
841
            }
842
            printf("\n");
843
        }
844
#endif
845
        count++;
846
    }
847
#endif
848
    if (env->cr[0] & CR0_PE_MASK) {
849
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
850
    } else {
851
        do_interrupt_real(intno, is_int, error_code, next_eip);
852
    }
853
}
854

    
855
/*
856
 * Signal an interruption. It is executed in the main CPU loop.
857
 * is_int is TRUE if coming from the int instruction. next_eip is the
858
 * EIP value AFTER the interrupt instruction. It is only relevant if
859
 * is_int is TRUE.  
860
 */
861
void raise_interrupt(int intno, int is_int, int error_code, 
862
                     unsigned int next_eip)
863
{
864
    env->exception_index = intno;
865
    env->error_code = error_code;
866
    env->exception_is_int = is_int;
867
    env->exception_next_eip = next_eip;
868
    cpu_loop_exit();
869
}
870

    
871
/* shortcuts to generate exceptions */
872
void raise_exception_err(int exception_index, int error_code)
873
{
874
    raise_interrupt(exception_index, 0, error_code, 0);
875
}
876

    
877
void raise_exception(int exception_index)
878
{
879
    raise_interrupt(exception_index, 0, 0, 0);
880
}
881

    
882
#ifdef BUGGY_GCC_DIV64
883
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
884
   call it from another function */
885
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
886
{
887
    *q_ptr = num / den;
888
    return num % den;
889
}
890

    
891
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
892
{
893
    *q_ptr = num / den;
894
    return num % den;
895
}
896
#endif
897

    
898
void helper_divl_EAX_T0(uint32_t eip)
899
{
900
    unsigned int den, q, r;
901
    uint64_t num;
902
    
903
    num = EAX | ((uint64_t)EDX << 32);
904
    den = T0;
905
    if (den == 0) {
906
        EIP = eip;
907
        raise_exception(EXCP00_DIVZ);
908
    }
909
#ifdef BUGGY_GCC_DIV64
910
    r = div64(&q, num, den);
911
#else
912
    q = (num / den);
913
    r = (num % den);
914
#endif
915
    EAX = q;
916
    EDX = r;
917
}
918

    
919
void helper_idivl_EAX_T0(uint32_t eip)
920
{
921
    int den, q, r;
922
    int64_t num;
923
    
924
    num = EAX | ((uint64_t)EDX << 32);
925
    den = T0;
926
    if (den == 0) {
927
        EIP = eip;
928
        raise_exception(EXCP00_DIVZ);
929
    }
930
#ifdef BUGGY_GCC_DIV64
931
    r = idiv64(&q, num, den);
932
#else
933
    q = (num / den);
934
    r = (num % den);
935
#endif
936
    EAX = q;
937
    EDX = r;
938
}
939

    
940
void helper_cmpxchg8b(void)
941
{
942
    uint64_t d;
943
    int eflags;
944

    
945
    eflags = cc_table[CC_OP].compute_all();
946
    d = ldq((uint8_t *)A0);
947
    if (d == (((uint64_t)EDX << 32) | EAX)) {
948
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
949
        eflags |= CC_Z;
950
    } else {
951
        EDX = d >> 32;
952
        EAX = d;
953
        eflags &= ~CC_Z;
954
    }
955
    CC_SRC = eflags;
956
}
957

    
958
/* We simulate a pre-MMX pentium as in valgrind */
959
#define CPUID_FP87 (1 << 0)
960
#define CPUID_VME  (1 << 1)
961
#define CPUID_DE   (1 << 2)
962
#define CPUID_PSE  (1 << 3)
963
#define CPUID_TSC  (1 << 4)
964
#define CPUID_MSR  (1 << 5)
965
#define CPUID_PAE  (1 << 6)
966
#define CPUID_MCE  (1 << 7)
967
#define CPUID_CX8  (1 << 8)
968
#define CPUID_APIC (1 << 9)
969
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
970
#define CPUID_MTRR (1 << 12)
971
#define CPUID_PGE  (1 << 13)
972
#define CPUID_MCA  (1 << 14)
973
#define CPUID_CMOV (1 << 15)
974
/* ... */
975
#define CPUID_MMX  (1 << 23)
976
#define CPUID_FXSR (1 << 24)
977
#define CPUID_SSE  (1 << 25)
978
#define CPUID_SSE2 (1 << 26)
979

    
980
void helper_cpuid(void)
981
{
982
    if (EAX == 0) {
983
        EAX = 1; /* max EAX index supported */
984
        EBX = 0x756e6547;
985
        ECX = 0x6c65746e;
986
        EDX = 0x49656e69;
987
    } else if (EAX == 1) {
988
        int family, model, stepping;
989
        /* EAX = 1 info */
990
#if 0
991
        /* pentium 75-200 */
992
        family = 5;
993
        model = 2;
994
        stepping = 11;
995
#else
996
        /* pentium pro */
997
        family = 6;
998
        model = 1;
999
        stepping = 3;
1000
#endif
1001
        EAX = (family << 8) | (model << 4) | stepping;
1002
        EBX = 0;
1003
        ECX = 0;
1004
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1005
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
1006
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1007
    }
1008
}
1009

    
1010
void helper_lldt_T0(void)
1011
{
1012
    int selector;
1013
    SegmentCache *dt;
1014
    uint32_t e1, e2;
1015
    int index;
1016
    uint8_t *ptr;
1017
    
1018
    selector = T0 & 0xffff;
1019
    if ((selector & 0xfffc) == 0) {
1020
        /* XXX: NULL selector case: invalid LDT */
1021
        env->ldt.base = NULL;
1022
        env->ldt.limit = 0;
1023
    } else {
1024
        if (selector & 0x4)
1025
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1026
        dt = &env->gdt;
1027
        index = selector & ~7;
1028
        if ((index + 7) > dt->limit)
1029
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1030
        ptr = dt->base + index;
1031
        e1 = ldl_kernel(ptr);
1032
        e2 = ldl_kernel(ptr + 4);
1033
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1034
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035
        if (!(e2 & DESC_P_MASK))
1036
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1037
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1038
    }
1039
    env->ldt.selector = selector;
1040
}
1041

    
1042
void helper_ltr_T0(void)
1043
{
1044
    int selector;
1045
    SegmentCache *dt;
1046
    uint32_t e1, e2;
1047
    int index, type;
1048
    uint8_t *ptr;
1049
    
1050
    selector = T0 & 0xffff;
1051
    if ((selector & 0xfffc) == 0) {
1052
        /* NULL selector case: invalid LDT */
1053
        env->tr.base = NULL;
1054
        env->tr.limit = 0;
1055
        env->tr.flags = 0;
1056
    } else {
1057
        if (selector & 0x4)
1058
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1059
        dt = &env->gdt;
1060
        index = selector & ~7;
1061
        if ((index + 7) > dt->limit)
1062
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1063
        ptr = dt->base + index;
1064
        e1 = ldl_kernel(ptr);
1065
        e2 = ldl_kernel(ptr + 4);
1066
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1067
        if ((e2 & DESC_S_MASK) || 
1068
            (type != 1 && type != 9))
1069
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1070
        if (!(e2 & DESC_P_MASK))
1071
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1072
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1073
        e2 |= 0x00000200; /* set the busy bit */
1074
        stl_kernel(ptr + 4, e2);
1075
    }
1076
    env->tr.selector = selector;
1077
}
1078

    
1079
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1080
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1081
{
1082
    uint32_t e1, e2;
1083
    int cpl, dpl, rpl;
1084
    SegmentCache *dt;
1085
    int index;
1086
    uint8_t *ptr;
1087

    
1088
    if ((selector & 0xfffc) == 0) {
1089
        /* null selector case */
1090
        if (seg_reg == R_SS) {
1091
            EIP = cur_eip;
1092
            raise_exception_err(EXCP0D_GPF, 0);
1093
        } else {
1094
            cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1095
        }
1096
    } else {
1097
        
1098
        if (selector & 0x4)
1099
            dt = &env->ldt;
1100
        else
1101
            dt = &env->gdt;
1102
        index = selector & ~7;
1103
        if ((index + 7) > dt->limit) {
1104
            EIP = cur_eip;
1105
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1106
        }
1107
        ptr = dt->base + index;
1108
        e1 = ldl_kernel(ptr);
1109
        e2 = ldl_kernel(ptr + 4);
1110

    
1111
        if (!(e2 & DESC_S_MASK)) {
1112
            EIP = cur_eip;
1113
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1114
        }
1115
        rpl = selector & 3;
1116
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1117
        cpl = env->hflags & HF_CPL_MASK;
1118
        if (seg_reg == R_SS) {
1119
            /* must be writable segment */
1120
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1121
                EIP = cur_eip;
1122
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1123
            }
1124
            if (rpl != cpl || dpl != cpl) {
1125
                EIP = cur_eip;
1126
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1127
            }
1128
        } else {
1129
            /* must be readable segment */
1130
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1131
                EIP = cur_eip;
1132
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1133
            }
1134
            
1135
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1136
                /* if not conforming code, test rights */
1137
                if (dpl < cpl || dpl < rpl) {
1138
                    EIP = cur_eip;
1139
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1140
                }
1141
            }
1142
        }
1143

    
1144
        if (!(e2 & DESC_P_MASK)) {
1145
            EIP = cur_eip;
1146
            if (seg_reg == R_SS)
1147
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1148
            else
1149
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1150
        }
1151

    
1152
        /* set the access bit if not already set */
1153
        if (!(e2 & DESC_A_MASK)) {
1154
            e2 |= DESC_A_MASK;
1155
            stl_kernel(ptr + 4, e2);
1156
        }
1157

    
1158
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1159
                       get_seg_base(e1, e2),
1160
                       get_seg_limit(e1, e2),
1161
                       e2);
1162
#if 0
1163
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1164
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1165
#endif
1166
    }
1167
}
1168

    
1169
/* protected mode jump */
1170
void helper_ljmp_protected_T0_T1(void)
1171
{
1172
    int new_cs, new_eip, gate_cs, type;
1173
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1174

    
1175
    new_cs = T0;
1176
    new_eip = T1;
1177
    if ((new_cs & 0xfffc) == 0)
1178
        raise_exception_err(EXCP0D_GPF, 0);
1179
    if (load_segment(&e1, &e2, new_cs) != 0)
1180
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1181
    cpl = env->hflags & HF_CPL_MASK;
1182
    if (e2 & DESC_S_MASK) {
1183
        if (!(e2 & DESC_CS_MASK))
1184
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1185
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1186
        if (e2 & DESC_C_MASK) {
1187
            /* conforming code segment */
1188
            if (dpl > cpl)
1189
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1190
        } else {
1191
            /* non conforming code segment */
1192
            rpl = new_cs & 3;
1193
            if (rpl > cpl)
1194
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1195
            if (dpl != cpl)
1196
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1197
        }
1198
        if (!(e2 & DESC_P_MASK))
1199
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1200
        limit = get_seg_limit(e1, e2);
1201
        if (new_eip > limit)
1202
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1203
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1204
                       get_seg_base(e1, e2), limit, e2);
1205
        EIP = new_eip;
1206
    } else {
1207
        /* jump to call or task gate */
1208
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1209
        rpl = new_cs & 3;
1210
        cpl = env->hflags & HF_CPL_MASK;
1211
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1212
        switch(type) {
1213
        case 1: /* 286 TSS */
1214
        case 9: /* 386 TSS */
1215
        case 5: /* task gate */
1216
            if (dpl < cpl || dpl < rpl)
1217
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1218
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1219
            break;
1220
        case 4: /* 286 call gate */
1221
        case 12: /* 386 call gate */
1222
            if ((dpl < cpl) || (dpl < rpl))
1223
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1224
            if (!(e2 & DESC_P_MASK))
1225
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1226
            gate_cs = e1 >> 16;
1227
            if (load_segment(&e1, &e2, gate_cs) != 0)
1228
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1229
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1230
            /* must be code segment */
1231
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1232
                 (DESC_S_MASK | DESC_CS_MASK)))
1233
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1234
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1235
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1236
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1237
            if (!(e2 & DESC_P_MASK))
1238
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1239
            new_eip = (e1 & 0xffff);
1240
            if (type == 12)
1241
                new_eip |= (e2 & 0xffff0000);
1242
            limit = get_seg_limit(e1, e2);
1243
            if (new_eip > limit)
1244
                raise_exception_err(EXCP0D_GPF, 0);
1245
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1246
                                   get_seg_base(e1, e2), limit, e2);
1247
            EIP = new_eip;
1248
            break;
1249
        default:
1250
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1251
            break;
1252
        }
1253
    }
1254
}
1255

    
1256
/* real mode call */
1257
void helper_lcall_real_T0_T1(int shift, int next_eip)
1258
{
1259
    int new_cs, new_eip;
1260
    uint32_t esp, esp_mask;
1261
    uint8_t *ssp;
1262

    
1263
    new_cs = T0;
1264
    new_eip = T1;
1265
    esp = ESP;
1266
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1267
    ssp = env->segs[R_SS].base;
1268
    if (shift) {
1269
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1270
        PUSHL(ssp, esp, esp_mask, next_eip);
1271
    } else {
1272
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1273
        PUSHW(ssp, esp, esp_mask, next_eip);
1274
    }
1275

    
1276
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1277
    env->eip = new_eip;
1278
    env->segs[R_CS].selector = new_cs;
1279
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1280
}
1281

    
1282
/* protected mode call */
1283
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1284
{
1285
    int new_cs, new_eip, new_stack, i;
1286
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1287
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1288
    uint32_t val, limit, old_sp_mask;
1289
    uint8_t *ssp, *old_ssp;
1290
    
1291
    new_cs = T0;
1292
    new_eip = T1;
1293
#ifdef DEBUG_PCALL
1294
    if (loglevel) {
1295
        fprintf(logfile, "lcall %04x:%08x\n",
1296
                new_cs, new_eip);
1297
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1298
    }
1299
#endif
1300
    if ((new_cs & 0xfffc) == 0)
1301
        raise_exception_err(EXCP0D_GPF, 0);
1302
    if (load_segment(&e1, &e2, new_cs) != 0)
1303
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1304
    cpl = env->hflags & HF_CPL_MASK;
1305
#ifdef DEBUG_PCALL
1306
    if (loglevel) {
1307
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1308
    }
1309
#endif
1310
    if (e2 & DESC_S_MASK) {
1311
        if (!(e2 & DESC_CS_MASK))
1312
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1313
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1314
        if (e2 & DESC_C_MASK) {
1315
            /* conforming code segment */
1316
            if (dpl > cpl)
1317
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1318
        } else {
1319
            /* non conforming code segment */
1320
            rpl = new_cs & 3;
1321
            if (rpl > cpl)
1322
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1323
            if (dpl != cpl)
1324
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1325
        }
1326
        if (!(e2 & DESC_P_MASK))
1327
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1328

    
1329
        sp = ESP;
1330
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1331
        ssp = env->segs[R_SS].base;
1332
        if (shift) {
1333
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1334
            PUSHL(ssp, sp, sp_mask, next_eip);
1335
        } else {
1336
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1337
            PUSHW(ssp, sp, sp_mask, next_eip);
1338
        }
1339
        
1340
        limit = get_seg_limit(e1, e2);
1341
        if (new_eip > limit)
1342
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1343
        /* from this point, not restartable */
1344
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1345
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1346
                       get_seg_base(e1, e2), limit, e2);
1347
        EIP = new_eip;
1348
    } else {
1349
        /* check gate type */
1350
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1351
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1352
        rpl = new_cs & 3;
1353
        switch(type) {
1354
        case 1: /* available 286 TSS */
1355
        case 9: /* available 386 TSS */
1356
        case 5: /* task gate */
1357
            if (dpl < cpl || dpl < rpl)
1358
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1359
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
1360
            break;
1361
        case 4: /* 286 call gate */
1362
        case 12: /* 386 call gate */
1363
            break;
1364
        default:
1365
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1366
            break;
1367
        }
1368
        shift = type >> 3;
1369

    
1370
        if (dpl < cpl || dpl < rpl)
1371
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1372
        /* check valid bit */
1373
        if (!(e2 & DESC_P_MASK))
1374
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1375
        selector = e1 >> 16;
1376
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1377
        param_count = e2 & 0x1f;
1378
        if ((selector & 0xfffc) == 0)
1379
            raise_exception_err(EXCP0D_GPF, 0);
1380

    
1381
        if (load_segment(&e1, &e2, selector) != 0)
1382
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1383
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1384
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1385
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1386
        if (dpl > cpl)
1387
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1388
        if (!(e2 & DESC_P_MASK))
1389
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1390

    
1391
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1392
            /* to inner priviledge */
1393
            get_ss_esp_from_tss(&ss, &sp, dpl);
1394
#ifdef DEBUG_PCALL
1395
            if (loglevel)
1396
                fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n", 
1397
                        ss, sp, param_count, ESP);
1398
#endif
1399
            if ((ss & 0xfffc) == 0)
1400
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1401
            if ((ss & 3) != dpl)
1402
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1403
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1404
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1405
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1406
            if (ss_dpl != dpl)
1407
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1408
            if (!(ss_e2 & DESC_S_MASK) ||
1409
                (ss_e2 & DESC_CS_MASK) ||
1410
                !(ss_e2 & DESC_W_MASK))
1411
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1412
            if (!(ss_e2 & DESC_P_MASK))
1413
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1414
            
1415
            //            push_size = ((param_count * 2) + 8) << shift;
1416

    
1417
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1418
            old_ssp = env->segs[R_SS].base;
1419
            
1420
            sp_mask = get_sp_mask(ss_e2);
1421
            ssp = get_seg_base(ss_e1, ss_e2);
1422
            if (shift) {
1423
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1424
                PUSHL(ssp, sp, sp_mask, ESP);
1425
                for(i = param_count - 1; i >= 0; i--) {
1426
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1427
                    PUSHL(ssp, sp, sp_mask, val);
1428
                }
1429
            } else {
1430
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1431
                PUSHW(ssp, sp, sp_mask, ESP);
1432
                for(i = param_count - 1; i >= 0; i--) {
1433
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1434
                    PUSHW(ssp, sp, sp_mask, val);
1435
                }
1436
            }
1437
            new_stack = 1;
1438
        } else {
1439
            /* to same priviledge */
1440
            sp = ESP;
1441
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1442
            ssp = env->segs[R_SS].base;
1443
            //            push_size = (4 << shift);
1444
            new_stack = 0;
1445
        }
1446

    
1447
        if (shift) {
1448
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1449
            PUSHL(ssp, sp, sp_mask, next_eip);
1450
        } else {
1451
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1452
            PUSHW(ssp, sp, sp_mask, next_eip);
1453
        }
1454

    
1455
        /* from this point, not restartable */
1456

    
1457
        if (new_stack) {
1458
            ss = (ss & ~3) | dpl;
1459
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1460
                                   ssp,
1461
                                   get_seg_limit(ss_e1, ss_e2),
1462
                                   ss_e2);
1463
        }
1464

    
1465
        selector = (selector & ~3) | dpl;
1466
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1467
                       get_seg_base(e1, e2),
1468
                       get_seg_limit(e1, e2),
1469
                       e2);
1470
        cpu_x86_set_cpl(env, dpl);
1471
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1472
        EIP = offset;
1473
    }
1474
}
1475

    
1476
/* real and vm86 mode iret */
1477
void helper_iret_real(int shift)
1478
{
1479
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1480
    uint8_t *ssp;
1481
    int eflags_mask;
1482

    
1483
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1484
    sp = ESP;
1485
    ssp = env->segs[R_SS].base;
1486
    if (shift == 1) {
1487
        /* 32 bits */
1488
        POPL(ssp, sp, sp_mask, new_eip);
1489
        POPL(ssp, sp, sp_mask, new_cs);
1490
        new_cs &= 0xffff;
1491
        POPL(ssp, sp, sp_mask, new_eflags);
1492
    } else {
1493
        /* 16 bits */
1494
        POPW(ssp, sp, sp_mask, new_eip);
1495
        POPW(ssp, sp, sp_mask, new_cs);
1496
        POPW(ssp, sp, sp_mask, new_eflags);
1497
    }
1498
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1499
    load_seg_vm(R_CS, new_cs);
1500
    env->eip = new_eip;
1501
    if (env->eflags & VM_MASK)
1502
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK;
1503
    else
1504
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK;
1505
    if (shift == 0)
1506
        eflags_mask &= 0xffff;
1507
    load_eflags(new_eflags, eflags_mask);
1508
}
1509

    
1510
/* protected mode iret */
1511
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1512
{
1513
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1514
    uint32_t new_es, new_ds, new_fs, new_gs;
1515
    uint32_t e1, e2, ss_e1, ss_e2;
1516
    int cpl, dpl, rpl, eflags_mask, iopl;
1517
    uint8_t *ssp;
1518
    
1519
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1520
    sp = ESP;
1521
    ssp = env->segs[R_SS].base;
1522
    if (shift == 1) {
1523
        /* 32 bits */
1524
        POPL(ssp, sp, sp_mask, new_eip);
1525
        POPL(ssp, sp, sp_mask, new_cs);
1526
        new_cs &= 0xffff;
1527
        if (is_iret) {
1528
            POPL(ssp, sp, sp_mask, new_eflags);
1529
            if (new_eflags & VM_MASK)
1530
                goto return_to_vm86;
1531
        }
1532
    } else {
1533
        /* 16 bits */
1534
        POPW(ssp, sp, sp_mask, new_eip);
1535
        POPW(ssp, sp, sp_mask, new_cs);
1536
        if (is_iret)
1537
            POPW(ssp, sp, sp_mask, new_eflags);
1538
    }
1539
#ifdef DEBUG_PCALL
1540
    if (loglevel) {
1541
        fprintf(logfile, "lret new %04x:%08x addend=0x%x\n",
1542
                new_cs, new_eip, addend);
1543
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1544
    }
1545
#endif
1546
    if ((new_cs & 0xfffc) == 0)
1547
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1548
    if (load_segment(&e1, &e2, new_cs) != 0)
1549
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1550
    if (!(e2 & DESC_S_MASK) ||
1551
        !(e2 & DESC_CS_MASK))
1552
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1553
    cpl = env->hflags & HF_CPL_MASK;
1554
    rpl = new_cs & 3; 
1555
    if (rpl < cpl)
1556
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1557
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1558
    if (e2 & DESC_C_MASK) {
1559
        if (dpl > rpl)
1560
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1561
    } else {
1562
        if (dpl != rpl)
1563
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1564
    }
1565
    if (!(e2 & DESC_P_MASK))
1566
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1567
    
1568
    sp += addend;
1569
    if (rpl == cpl) {
1570
        /* return to same priledge level */
1571
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1572
                       get_seg_base(e1, e2),
1573
                       get_seg_limit(e1, e2),
1574
                       e2);
1575
    } else {
1576
        /* return to different priviledge level */
1577
        if (shift == 1) {
1578
            /* 32 bits */
1579
            POPL(ssp, sp, sp_mask, new_esp);
1580
            POPL(ssp, sp, sp_mask, new_ss);
1581
            new_ss &= 0xffff;
1582
        } else {
1583
            /* 16 bits */
1584
            POPW(ssp, sp, sp_mask, new_esp);
1585
            POPW(ssp, sp, sp_mask, new_ss);
1586
        }
1587
        
1588
        if ((new_ss & 3) != rpl)
1589
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1590
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1591
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1592
        if (!(ss_e2 & DESC_S_MASK) ||
1593
            (ss_e2 & DESC_CS_MASK) ||
1594
            !(ss_e2 & DESC_W_MASK))
1595
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1596
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1597
        if (dpl != rpl)
1598
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1599
        if (!(ss_e2 & DESC_P_MASK))
1600
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1601

    
1602
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1603
                       get_seg_base(e1, e2),
1604
                       get_seg_limit(e1, e2),
1605
                       e2);
1606
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1607
                       get_seg_base(ss_e1, ss_e2),
1608
                       get_seg_limit(ss_e1, ss_e2),
1609
                       ss_e2);
1610
        cpu_x86_set_cpl(env, rpl);
1611
        sp = new_esp;
1612
        /* XXX: change sp_mask according to old segment ? */
1613
    }
1614
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1615
    env->eip = new_eip;
1616
    if (is_iret) {
1617
        /* NOTE: 'cpl' is the _old_ CPL */
1618
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK;
1619
        if (cpl == 0)
1620
            eflags_mask |= IOPL_MASK;
1621
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1622
        if (cpl <= iopl)
1623
            eflags_mask |= IF_MASK;
1624
        if (shift == 0)
1625
            eflags_mask &= 0xffff;
1626
        load_eflags(new_eflags, eflags_mask);
1627
    }
1628
    return;
1629

    
1630
 return_to_vm86:
1631
    POPL(ssp, sp, sp_mask, new_esp);
1632
    POPL(ssp, sp, sp_mask, new_ss);
1633
    POPL(ssp, sp, sp_mask, new_es);
1634
    POPL(ssp, sp, sp_mask, new_ds);
1635
    POPL(ssp, sp, sp_mask, new_fs);
1636
    POPL(ssp, sp, sp_mask, new_gs);
1637
    
1638
    /* modify processor state */
1639
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1640
                IF_MASK | IOPL_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1641
    load_seg_vm(R_CS, new_cs & 0xffff);
1642
    cpu_x86_set_cpl(env, 3);
1643
    load_seg_vm(R_SS, new_ss & 0xffff);
1644
    load_seg_vm(R_ES, new_es & 0xffff);
1645
    load_seg_vm(R_DS, new_ds & 0xffff);
1646
    load_seg_vm(R_FS, new_fs & 0xffff);
1647
    load_seg_vm(R_GS, new_gs & 0xffff);
1648

    
1649
    env->eip = new_eip;
1650
    ESP = new_esp;
1651
}
1652

    
1653
void helper_iret_protected(int shift)
1654
{
1655
    int tss_selector, type;
1656
    uint32_t e1, e2;
1657
    
1658
    /* specific case for TSS */
1659
    if (env->eflags & NT_MASK) {
1660
        tss_selector = lduw_kernel(env->tr.base + 0);
1661
        if (tss_selector & 4)
1662
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1663
        if (load_segment(&e1, &e2, tss_selector) != 0)
1664
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1665
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1666
        /* NOTE: we check both segment and busy TSS */
1667
        if (type != 3)
1668
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1669
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1670
    } else {
1671
        helper_ret_protected(shift, 1, 0);
1672
    }
1673
}
1674

    
1675
void helper_lret_protected(int shift, int addend)
1676
{
1677
    helper_ret_protected(shift, 0, addend);
1678
}
1679

    
1680
void helper_movl_crN_T0(int reg)
1681
{
1682
    env->cr[reg] = T0;
1683
    switch(reg) {
1684
    case 0:
1685
        cpu_x86_update_cr0(env);
1686
        break;
1687
    case 3:
1688
        cpu_x86_update_cr3(env);
1689
        break;
1690
    }
1691
}
1692

    
1693
/* XXX: do more */
1694
void helper_movl_drN_T0(int reg)
1695
{
1696
    env->dr[reg] = T0;
1697
}
1698

    
1699
void helper_invlpg(unsigned int addr)
1700
{
1701
    cpu_x86_flush_tlb(env, addr);
1702
}
1703

    
1704
/* rdtsc */
1705
#ifndef __i386__
1706
uint64_t emu_time;
1707
#endif
1708

    
1709
void helper_rdtsc(void)
1710
{
1711
    uint64_t val;
1712
#ifdef __i386__
1713
    asm("rdtsc" : "=A" (val));
1714
#else
1715
    /* better than nothing: the time increases */
1716
    val = emu_time++;
1717
#endif
1718
    EAX = val;
1719
    EDX = val >> 32;
1720
}
1721

    
1722
void helper_wrmsr(void)
1723
{
1724
    switch(ECX) {
1725
    case MSR_IA32_SYSENTER_CS:
1726
        env->sysenter_cs = EAX & 0xffff;
1727
        break;
1728
    case MSR_IA32_SYSENTER_ESP:
1729
        env->sysenter_esp = EAX;
1730
        break;
1731
    case MSR_IA32_SYSENTER_EIP:
1732
        env->sysenter_eip = EAX;
1733
        break;
1734
    default:
1735
        /* XXX: exception ? */
1736
        break; 
1737
    }
1738
}
1739

    
1740
void helper_rdmsr(void)
1741
{
1742
    switch(ECX) {
1743
    case MSR_IA32_SYSENTER_CS:
1744
        EAX = env->sysenter_cs;
1745
        EDX = 0;
1746
        break;
1747
    case MSR_IA32_SYSENTER_ESP:
1748
        EAX = env->sysenter_esp;
1749
        EDX = 0;
1750
        break;
1751
    case MSR_IA32_SYSENTER_EIP:
1752
        EAX = env->sysenter_eip;
1753
        EDX = 0;
1754
        break;
1755
    default:
1756
        /* XXX: exception ? */
1757
        break; 
1758
    }
1759
}
1760

    
1761
void helper_lsl(void)
1762
{
1763
    unsigned int selector, limit;
1764
    uint32_t e1, e2;
1765
    int rpl, dpl, cpl, type;
1766

    
1767
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1768
    selector = T0 & 0xffff;
1769
    if (load_segment(&e1, &e2, selector) != 0)
1770
        return;
1771
    rpl = selector & 3;
1772
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1773
    cpl = env->hflags & HF_CPL_MASK;
1774
    if (e2 & DESC_S_MASK) {
1775
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1776
            /* conforming */
1777
        } else {
1778
            if (dpl < cpl || dpl < rpl)
1779
                return;
1780
        }
1781
    } else {
1782
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1783
        switch(type) {
1784
        case 1:
1785
        case 2:
1786
        case 3:
1787
        case 9:
1788
        case 11:
1789
            break;
1790
        default:
1791
            return;
1792
        }
1793
        if (dpl < cpl || dpl < rpl)
1794
            return;
1795
    }
1796
    limit = get_seg_limit(e1, e2);
1797
    T1 = limit;
1798
    CC_SRC |= CC_Z;
1799
}
1800

    
1801
void helper_lar(void)
1802
{
1803
    unsigned int selector;
1804
    uint32_t e1, e2;
1805
    int rpl, dpl, cpl, type;
1806

    
1807
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1808
    selector = T0 & 0xffff;
1809
    if ((selector & 0xfffc) == 0)
1810
        return;
1811
    if (load_segment(&e1, &e2, selector) != 0)
1812
        return;
1813
    rpl = selector & 3;
1814
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1815
    cpl = env->hflags & HF_CPL_MASK;
1816
    if (e2 & DESC_S_MASK) {
1817
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1818
            /* conforming */
1819
        } else {
1820
            if (dpl < cpl || dpl < rpl)
1821
                return;
1822
        }
1823
    } else {
1824
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1825
        switch(type) {
1826
        case 1:
1827
        case 2:
1828
        case 3:
1829
        case 4:
1830
        case 5:
1831
        case 9:
1832
        case 11:
1833
        case 12:
1834
            break;
1835
        default:
1836
            return;
1837
        }
1838
        if (dpl < cpl || dpl < rpl)
1839
            return;
1840
    }
1841
    T1 = e2 & 0x00f0ff00;
1842
    CC_SRC |= CC_Z;
1843
}
1844

    
1845
void helper_verr(void)
1846
{
1847
    unsigned int selector;
1848
    uint32_t e1, e2;
1849
    int rpl, dpl, cpl;
1850

    
1851
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1852
    selector = T0 & 0xffff;
1853
    if ((selector & 0xfffc) == 0)
1854
        return;
1855
    if (load_segment(&e1, &e2, selector) != 0)
1856
        return;
1857
    if (!(e2 & DESC_S_MASK))
1858
        return;
1859
    rpl = selector & 3;
1860
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1861
    cpl = env->hflags & HF_CPL_MASK;
1862
    if (e2 & DESC_CS_MASK) {
1863
        if (!(e2 & DESC_R_MASK))
1864
            return;
1865
        if (!(e2 & DESC_C_MASK)) {
1866
            if (dpl < cpl || dpl < rpl)
1867
                return;
1868
        }
1869
    } else {
1870
        if (dpl < cpl || dpl < rpl)
1871
            return;
1872
    }
1873
    CC_SRC |= CC_Z;
1874
}
1875

    
1876
void helper_verw(void)
1877
{
1878
    unsigned int selector;
1879
    uint32_t e1, e2;
1880
    int rpl, dpl, cpl;
1881

    
1882
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1883
    selector = T0 & 0xffff;
1884
    if ((selector & 0xfffc) == 0)
1885
        return;
1886
    if (load_segment(&e1, &e2, selector) != 0)
1887
        return;
1888
    if (!(e2 & DESC_S_MASK))
1889
        return;
1890
    rpl = selector & 3;
1891
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1892
    cpl = env->hflags & HF_CPL_MASK;
1893
    if (e2 & DESC_CS_MASK) {
1894
        return;
1895
    } else {
1896
        if (dpl < cpl || dpl < rpl)
1897
            return;
1898
        if (!(e2 & DESC_W_MASK))
1899
            return;
1900
    }
1901
    CC_SRC |= CC_Z;
1902
}
1903

    
1904
/* FPU helpers */
1905

    
1906
void helper_fldt_ST0_A0(void)
1907
{
1908
    int new_fpstt;
1909
    new_fpstt = (env->fpstt - 1) & 7;
1910
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1911
    env->fpstt = new_fpstt;
1912
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1913
}
1914

    
1915
void helper_fstt_ST0_A0(void)
1916
{
1917
    helper_fstt(ST0, (uint8_t *)A0);
1918
}
1919

    
1920
/* BCD ops */
1921

    
1922
#define MUL10(iv) ( iv + iv + (iv << 3) )
1923

    
1924
void helper_fbld_ST0_A0(void)
1925
{
1926
    CPU86_LDouble tmp;
1927
    uint64_t val;
1928
    unsigned int v;
1929
    int i;
1930

    
1931
    val = 0;
1932
    for(i = 8; i >= 0; i--) {
1933
        v = ldub((uint8_t *)A0 + i);
1934
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1935
    }
1936
    tmp = val;
1937
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1938
        tmp = -tmp;
1939
    fpush();
1940
    ST0 = tmp;
1941
}
1942

    
1943
void helper_fbst_ST0_A0(void)
1944
{
1945
    CPU86_LDouble tmp;
1946
    int v;
1947
    uint8_t *mem_ref, *mem_end;
1948
    int64_t val;
1949

    
1950
    tmp = rint(ST0);
1951
    val = (int64_t)tmp;
1952
    mem_ref = (uint8_t *)A0;
1953
    mem_end = mem_ref + 9;
1954
    if (val < 0) {
1955
        stb(mem_end, 0x80);
1956
        val = -val;
1957
    } else {
1958
        stb(mem_end, 0x00);
1959
    }
1960
    while (mem_ref < mem_end) {
1961
        if (val == 0)
1962
            break;
1963
        v = val % 100;
1964
        val = val / 100;
1965
        v = ((v / 10) << 4) | (v % 10);
1966
        stb(mem_ref++, v);
1967
    }
1968
    while (mem_ref < mem_end) {
1969
        stb(mem_ref++, 0);
1970
    }
1971
}
1972

    
1973
void helper_f2xm1(void)
1974
{
1975
    ST0 = pow(2.0,ST0) - 1.0;
1976
}
1977

    
1978
void helper_fyl2x(void)
1979
{
1980
    CPU86_LDouble fptemp;
1981
    
1982
    fptemp = ST0;
1983
    if (fptemp>0.0){
1984
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1985
        ST1 *= fptemp;
1986
        fpop();
1987
    } else { 
1988
        env->fpus &= (~0x4700);
1989
        env->fpus |= 0x400;
1990
    }
1991
}
1992

    
1993
void helper_fptan(void)
1994
{
1995
    CPU86_LDouble fptemp;
1996

    
1997
    fptemp = ST0;
1998
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1999
        env->fpus |= 0x400;
2000
    } else {
2001
        ST0 = tan(fptemp);
2002
        fpush();
2003
        ST0 = 1.0;
2004
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2005
        /* the above code is for  |arg| < 2**52 only */
2006
    }
2007
}
2008

    
2009
void helper_fpatan(void)
2010
{
2011
    CPU86_LDouble fptemp, fpsrcop;
2012

    
2013
    fpsrcop = ST1;
2014
    fptemp = ST0;
2015
    ST1 = atan2(fpsrcop,fptemp);
2016
    fpop();
2017
}
2018

    
2019
void helper_fxtract(void)
2020
{
2021
    CPU86_LDoubleU temp;
2022
    unsigned int expdif;
2023

    
2024
    temp.d = ST0;
2025
    expdif = EXPD(temp) - EXPBIAS;
2026
    /*DP exponent bias*/
2027
    ST0 = expdif;
2028
    fpush();
2029
    BIASEXPONENT(temp);
2030
    ST0 = temp.d;
2031
}
2032

    
2033
void helper_fprem1(void)
2034
{
2035
    CPU86_LDouble dblq, fpsrcop, fptemp;
2036
    CPU86_LDoubleU fpsrcop1, fptemp1;
2037
    int expdif;
2038
    int q;
2039

    
2040
    fpsrcop = ST0;
2041
    fptemp = ST1;
2042
    fpsrcop1.d = fpsrcop;
2043
    fptemp1.d = fptemp;
2044
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2045
    if (expdif < 53) {
2046
        dblq = fpsrcop / fptemp;
2047
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2048
        ST0 = fpsrcop - fptemp*dblq;
2049
        q = (int)dblq; /* cutting off top bits is assumed here */
2050
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2051
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2052
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2053
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2054
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2055
    } else {
2056
        env->fpus |= 0x400;  /* C2 <-- 1 */
2057
        fptemp = pow(2.0, expdif-50);
2058
        fpsrcop = (ST0 / ST1) / fptemp;
2059
        /* fpsrcop = integer obtained by rounding to the nearest */
2060
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2061
            floor(fpsrcop): ceil(fpsrcop);
2062
        ST0 -= (ST1 * fpsrcop * fptemp);
2063
    }
2064
}
2065

    
2066
void helper_fprem(void)
2067
{
2068
    CPU86_LDouble dblq, fpsrcop, fptemp;
2069
    CPU86_LDoubleU fpsrcop1, fptemp1;
2070
    int expdif;
2071
    int q;
2072
    
2073
    fpsrcop = ST0;
2074
    fptemp = ST1;
2075
    fpsrcop1.d = fpsrcop;
2076
    fptemp1.d = fptemp;
2077
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2078
    if ( expdif < 53 ) {
2079
        dblq = fpsrcop / fptemp;
2080
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2081
        ST0 = fpsrcop - fptemp*dblq;
2082
        q = (int)dblq; /* cutting off top bits is assumed here */
2083
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2084
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2085
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2086
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2087
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2088
    } else {
2089
        env->fpus |= 0x400;  /* C2 <-- 1 */
2090
        fptemp = pow(2.0, expdif-50);
2091
        fpsrcop = (ST0 / ST1) / fptemp;
2092
        /* fpsrcop = integer obtained by chopping */
2093
        fpsrcop = (fpsrcop < 0.0)?
2094
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2095
        ST0 -= (ST1 * fpsrcop * fptemp);
2096
    }
2097
}
2098

    
2099
void helper_fyl2xp1(void)
2100
{
2101
    CPU86_LDouble fptemp;
2102

    
2103
    fptemp = ST0;
2104
    if ((fptemp+1.0)>0.0) {
2105
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2106
        ST1 *= fptemp;
2107
        fpop();
2108
    } else { 
2109
        env->fpus &= (~0x4700);
2110
        env->fpus |= 0x400;
2111
    }
2112
}
2113

    
2114
void helper_fsqrt(void)
2115
{
2116
    CPU86_LDouble fptemp;
2117

    
2118
    fptemp = ST0;
2119
    if (fptemp<0.0) { 
2120
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2121
        env->fpus |= 0x400;
2122
    }
2123
    ST0 = sqrt(fptemp);
2124
}
2125

    
2126
void helper_fsincos(void)
2127
{
2128
    CPU86_LDouble fptemp;
2129

    
2130
    fptemp = ST0;
2131
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2132
        env->fpus |= 0x400;
2133
    } else {
2134
        ST0 = sin(fptemp);
2135
        fpush();
2136
        ST0 = cos(fptemp);
2137
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2138
        /* the above code is for  |arg| < 2**63 only */
2139
    }
2140
}
2141

    
2142
void helper_frndint(void)
2143
{
2144
    CPU86_LDouble a;
2145

    
2146
    a = ST0;
2147
#ifdef __arm__
2148
    switch(env->fpuc & RC_MASK) {
2149
    default:
2150
    case RC_NEAR:
2151
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2152
        break;
2153
    case RC_DOWN:
2154
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2155
        break;
2156
    case RC_UP:
2157
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2158
        break;
2159
    case RC_CHOP:
2160
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2161
        break;
2162
    }
2163
#else
2164
    a = rint(a);
2165
#endif
2166
    ST0 = a;
2167
}
2168

    
2169
void helper_fscale(void)
2170
{
2171
    CPU86_LDouble fpsrcop, fptemp;
2172

    
2173
    fpsrcop = 2.0;
2174
    fptemp = pow(fpsrcop,ST1);
2175
    ST0 *= fptemp;
2176
}
2177

    
2178
void helper_fsin(void)
2179
{
2180
    CPU86_LDouble fptemp;
2181

    
2182
    fptemp = ST0;
2183
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2184
        env->fpus |= 0x400;
2185
    } else {
2186
        ST0 = sin(fptemp);
2187
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2188
        /* the above code is for  |arg| < 2**53 only */
2189
    }
2190
}
2191

    
2192
void helper_fcos(void)
2193
{
2194
    CPU86_LDouble fptemp;
2195

    
2196
    fptemp = ST0;
2197
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2198
        env->fpus |= 0x400;
2199
    } else {
2200
        ST0 = cos(fptemp);
2201
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2202
        /* the above code is for  |arg5 < 2**63 only */
2203
    }
2204
}
2205

    
2206
void helper_fxam_ST0(void)
2207
{
2208
    CPU86_LDoubleU temp;
2209
    int expdif;
2210

    
2211
    temp.d = ST0;
2212

    
2213
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2214
    if (SIGND(temp))
2215
        env->fpus |= 0x200; /* C1 <-- 1 */
2216

    
2217
    expdif = EXPD(temp);
2218
    if (expdif == MAXEXPD) {
2219
        if (MANTD(temp) == 0)
2220
            env->fpus |=  0x500 /*Infinity*/;
2221
        else
2222
            env->fpus |=  0x100 /*NaN*/;
2223
    } else if (expdif == 0) {
2224
        if (MANTD(temp) == 0)
2225
            env->fpus |=  0x4000 /*Zero*/;
2226
        else
2227
            env->fpus |= 0x4400 /*Denormal*/;
2228
    } else {
2229
        env->fpus |= 0x400;
2230
    }
2231
}
2232

    
2233
void helper_fstenv(uint8_t *ptr, int data32)
2234
{
2235
    int fpus, fptag, exp, i;
2236
    uint64_t mant;
2237
    CPU86_LDoubleU tmp;
2238

    
2239
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2240
    fptag = 0;
2241
    for (i=7; i>=0; i--) {
2242
        fptag <<= 2;
2243
        if (env->fptags[i]) {
2244
            fptag |= 3;
2245
        } else {
2246
            tmp.d = env->fpregs[i];
2247
            exp = EXPD(tmp);
2248
            mant = MANTD(tmp);
2249
            if (exp == 0 && mant == 0) {
2250
                /* zero */
2251
                fptag |= 1;
2252
            } else if (exp == 0 || exp == MAXEXPD
2253
#ifdef USE_X86LDOUBLE
2254
                       || (mant & (1LL << 63)) == 0
2255
#endif
2256
                       ) {
2257
                /* NaNs, infinity, denormal */
2258
                fptag |= 2;
2259
            }
2260
        }
2261
    }
2262
    if (data32) {
2263
        /* 32 bit */
2264
        stl(ptr, env->fpuc);
2265
        stl(ptr + 4, fpus);
2266
        stl(ptr + 8, fptag);
2267
        stl(ptr + 12, 0);
2268
        stl(ptr + 16, 0);
2269
        stl(ptr + 20, 0);
2270
        stl(ptr + 24, 0);
2271
    } else {
2272
        /* 16 bit */
2273
        stw(ptr, env->fpuc);
2274
        stw(ptr + 2, fpus);
2275
        stw(ptr + 4, fptag);
2276
        stw(ptr + 6, 0);
2277
        stw(ptr + 8, 0);
2278
        stw(ptr + 10, 0);
2279
        stw(ptr + 12, 0);
2280
    }
2281
}
2282

    
2283
void helper_fldenv(uint8_t *ptr, int data32)
2284
{
2285
    int i, fpus, fptag;
2286

    
2287
    if (data32) {
2288
        env->fpuc = lduw(ptr);
2289
        fpus = lduw(ptr + 4);
2290
        fptag = lduw(ptr + 8);
2291
    }
2292
    else {
2293
        env->fpuc = lduw(ptr);
2294
        fpus = lduw(ptr + 2);
2295
        fptag = lduw(ptr + 4);
2296
    }
2297
    env->fpstt = (fpus >> 11) & 7;
2298
    env->fpus = fpus & ~0x3800;
2299
    for(i = 0;i < 7; i++) {
2300
        env->fptags[i] = ((fptag & 3) == 3);
2301
        fptag >>= 2;
2302
    }
2303
}
2304

    
2305
void helper_fsave(uint8_t *ptr, int data32)
2306
{
2307
    CPU86_LDouble tmp;
2308
    int i;
2309

    
2310
    helper_fstenv(ptr, data32);
2311

    
2312
    ptr += (14 << data32);
2313
    for(i = 0;i < 8; i++) {
2314
        tmp = ST(i);
2315
        helper_fstt(tmp, ptr);
2316
        ptr += 10;
2317
    }
2318

    
2319
    /* fninit */
2320
    env->fpus = 0;
2321
    env->fpstt = 0;
2322
    env->fpuc = 0x37f;
2323
    env->fptags[0] = 1;
2324
    env->fptags[1] = 1;
2325
    env->fptags[2] = 1;
2326
    env->fptags[3] = 1;
2327
    env->fptags[4] = 1;
2328
    env->fptags[5] = 1;
2329
    env->fptags[6] = 1;
2330
    env->fptags[7] = 1;
2331
}
2332

    
2333
void helper_frstor(uint8_t *ptr, int data32)
2334
{
2335
    CPU86_LDouble tmp;
2336
    int i;
2337

    
2338
    helper_fldenv(ptr, data32);
2339
    ptr += (14 << data32);
2340

    
2341
    for(i = 0;i < 8; i++) {
2342
        tmp = helper_fldt(ptr);
2343
        ST(i) = tmp;
2344
        ptr += 10;
2345
    }
2346
}
2347

    
2348
#if !defined(CONFIG_USER_ONLY) 
2349

    
2350
#define MMUSUFFIX _mmu
2351
#define GETPC() (__builtin_return_address(0))
2352

    
2353
#define SHIFT 0
2354
#include "softmmu_template.h"
2355

    
2356
#define SHIFT 1
2357
#include "softmmu_template.h"
2358

    
2359
#define SHIFT 2
2360
#include "softmmu_template.h"
2361

    
2362
#define SHIFT 3
2363
#include "softmmu_template.h"
2364

    
2365
#endif
2366

    
2367
/* try to fill the TLB and return an exception if error. If retaddr is
2368
   NULL, it means that the function was called in C code (i.e. not
2369
   from generated code or from helper.c) */
2370
/* XXX: fix it to restore all registers */
2371
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2372
{
2373
    TranslationBlock *tb;
2374
    int ret;
2375
    unsigned long pc;
2376
    CPUX86State *saved_env;
2377

    
2378
    /* XXX: hack to restore env in all cases, even if not called from
2379
       generated code */
2380
    saved_env = env;
2381
    env = cpu_single_env;
2382
    if (is_write && page_unprotect(addr)) {
2383
        /* nothing more to do: the page was write protected because
2384
           there was code in it. page_unprotect() flushed the code. */
2385
    }
2386

    
2387
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2388
    if (ret) {
2389
        if (retaddr) {
2390
            /* now we have a real cpu fault */
2391
            pc = (unsigned long)retaddr;
2392
            tb = tb_find_pc(pc);
2393
            if (tb) {
2394
                /* the PC is inside the translated code. It means that we have
2395
                   a virtual CPU fault */
2396
                cpu_restore_state(tb, env, pc);
2397
            }
2398
        }
2399
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2400
    }
2401
    env = saved_env;
2402
}