Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 3ab493de

History | View | Annotate | Download (66.1 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
/* return non zero if error */
130
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
131
                               int selector)
132
{
133
    SegmentCache *dt;
134
    int index;
135
    uint8_t *ptr;
136

    
137
    if (selector & 0x4)
138
        dt = &env->ldt;
139
    else
140
        dt = &env->gdt;
141
    index = selector & ~7;
142
    if ((index + 7) > dt->limit)
143
        return -1;
144
    ptr = dt->base + index;
145
    *e1_ptr = ldl_kernel(ptr);
146
    *e2_ptr = ldl_kernel(ptr + 4);
147
    return 0;
148
}
149
                                     
150
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
151
{
152
    unsigned int limit;
153
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
154
    if (e2 & DESC_G_MASK)
155
        limit = (limit << 12) | 0xfff;
156
    return limit;
157
}
158

    
159
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
160
{
161
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
162
}
163

    
164
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
165
{
166
    sc->base = get_seg_base(e1, e2);
167
    sc->limit = get_seg_limit(e1, e2);
168
    sc->flags = e2;
169
}
170

    
171
/* init the segment cache in vm86 mode. */
172
static inline void load_seg_vm(int seg, int selector)
173
{
174
    selector &= 0xffff;
175
    cpu_x86_load_seg_cache(env, seg, selector, 
176
                           (uint8_t *)(selector << 4), 0xffff, 0);
177
}
178

    
179
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
180
                                       uint32_t *esp_ptr, int dpl)
181
{
182
    int type, index, shift;
183
    
184
#if 0
185
    {
186
        int i;
187
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
188
        for(i=0;i<env->tr.limit;i++) {
189
            printf("%02x ", env->tr.base[i]);
190
            if ((i & 7) == 7) printf("\n");
191
        }
192
        printf("\n");
193
    }
194
#endif
195

    
196
    if (!(env->tr.flags & DESC_P_MASK))
197
        cpu_abort(env, "invalid tss");
198
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
199
    if ((type & 7) != 1)
200
        cpu_abort(env, "invalid tss type");
201
    shift = type >> 3;
202
    index = (dpl * 4 + 2) << shift;
203
    if (index + (4 << shift) - 1 > env->tr.limit)
204
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
205
    if (shift == 0) {
206
        *esp_ptr = lduw_kernel(env->tr.base + index);
207
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
208
    } else {
209
        *esp_ptr = ldl_kernel(env->tr.base + index);
210
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
211
    }
212
}
213

    
214
/* XXX: merge with load_seg() */
215
static void tss_load_seg(int seg_reg, int selector)
216
{
217
    uint32_t e1, e2;
218
    int rpl, dpl, cpl;
219

    
220
    if ((selector & 0xfffc) != 0) {
221
        if (load_segment(&e1, &e2, selector) != 0)
222
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        if (!(e2 & DESC_S_MASK))
224
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225
        rpl = selector & 3;
226
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
227
        cpl = env->hflags & HF_CPL_MASK;
228
        if (seg_reg == R_CS) {
229
            if (!(e2 & DESC_CS_MASK))
230
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            if (dpl != rpl)
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            if ((e2 & DESC_C_MASK) && dpl > rpl)
234
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
                
236
        } else if (seg_reg == R_SS) {
237
            /* SS must be writable data */
238
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
239
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240
            if (dpl != cpl || dpl != rpl)
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
        } else {
243
            /* not readable code */
244
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
245
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            /* if data or non conforming code, checks the rights */
247
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
248
                if (dpl < cpl || dpl < rpl)
249
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250
            }
251
        }
252
        if (!(e2 & DESC_P_MASK))
253
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
254
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
255
                       get_seg_base(e1, e2),
256
                       get_seg_limit(e1, e2),
257
                       e2);
258
    } else {
259
        if (seg_reg == R_SS || seg_reg == R_CS) 
260
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
261
    }
262
}
263

    
264
#define SWITCH_TSS_JMP  0
265
#define SWITCH_TSS_IRET 1
266
#define SWITCH_TSS_CALL 2
267

    
268
/* XXX: restore CPU state in registers (PowerPC case) */
269
static void switch_tss(int tss_selector, 
270
                       uint32_t e1, uint32_t e2, int source)
271
{
272
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
273
    uint8_t *tss_base;
274
    uint32_t new_regs[8], new_segs[6];
275
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
276
    uint32_t old_eflags, eflags_mask;
277
    SegmentCache *dt;
278
    int index;
279
    uint8_t *ptr;
280

    
281
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282

    
283
    /* if task gate, we read the TSS segment and we load it */
284
    if (type == 5) {
285
        if (!(e2 & DESC_P_MASK))
286
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287
        tss_selector = e1 >> 16;
288
        if (tss_selector & 4)
289
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
290
        if (load_segment(&e1, &e2, tss_selector) != 0)
291
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
292
        if (e2 & DESC_S_MASK)
293
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
295
        if ((type & 7) != 1)
296
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297
    }
298

    
299
    if (!(e2 & DESC_P_MASK))
300
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301

    
302
    if (type & 8)
303
        tss_limit_max = 103;
304
    else
305
        tss_limit_max = 43;
306
    tss_limit = get_seg_limit(e1, e2);
307
    tss_base = get_seg_base(e1, e2);
308
    if ((tss_selector & 4) != 0 || 
309
        tss_limit < tss_limit_max)
310
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
311
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312
    if (old_type & 8)
313
        old_tss_limit_max = 103;
314
    else
315
        old_tss_limit_max = 43;
316

    
317
    /* read all the registers from the new TSS */
318
    if (type & 8) {
319
        /* 32 bit */
320
        new_cr3 = ldl_kernel(tss_base + 0x1c);
321
        new_eip = ldl_kernel(tss_base + 0x20);
322
        new_eflags = ldl_kernel(tss_base + 0x24);
323
        for(i = 0; i < 8; i++)
324
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
325
        for(i = 0; i < 6; i++)
326
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
327
        new_ldt = lduw_kernel(tss_base + 0x60);
328
        new_trap = ldl_kernel(tss_base + 0x64);
329
    } else {
330
        /* 16 bit */
331
        new_cr3 = 0;
332
        new_eip = lduw_kernel(tss_base + 0x0e);
333
        new_eflags = lduw_kernel(tss_base + 0x10);
334
        for(i = 0; i < 8; i++)
335
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
336
        for(i = 0; i < 4; i++)
337
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
338
        new_ldt = lduw_kernel(tss_base + 0x2a);
339
        new_segs[R_FS] = 0;
340
        new_segs[R_GS] = 0;
341
        new_trap = 0;
342
    }
343
    
344
    /* NOTE: we must avoid memory exceptions during the task switch,
345
       so we make dummy accesses before */
346
    /* XXX: it can still fail in some cases, so a bigger hack is
347
       necessary to valid the TLB after having done the accesses */
348

    
349
    v1 = ldub_kernel(env->tr.base);
350
    v2 = ldub(env->tr.base + old_tss_limit_max);
351
    stb_kernel(env->tr.base, v1);
352
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
353
    
354
    /* clear busy bit (it is restartable) */
355
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
356
        uint8_t *ptr;
357
        uint32_t e2;
358
        ptr = env->gdt.base + (env->tr.selector << 3);
359
        e2 = ldl_kernel(ptr + 4);
360
        e2 &= ~DESC_TSS_BUSY_MASK;
361
        stl_kernel(ptr + 4, e2);
362
    }
363
    old_eflags = compute_eflags();
364
    if (source == SWITCH_TSS_IRET)
365
        old_eflags &= ~NT_MASK;
366
    
367
    /* save the current state in the old TSS */
368
    if (type & 8) {
369
        /* 32 bit */
370
        stl_kernel(env->tr.base + 0x20, env->eip);
371
        stl_kernel(env->tr.base + 0x24, old_eflags);
372
        for(i = 0; i < 8; i++)
373
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
374
        for(i = 0; i < 6; i++)
375
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
376
    } else {
377
        /* 16 bit */
378
        stw_kernel(env->tr.base + 0x0e, new_eip);
379
        stw_kernel(env->tr.base + 0x10, old_eflags);
380
        for(i = 0; i < 8; i++)
381
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
382
        for(i = 0; i < 4; i++)
383
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
384
    }
385
    
386
    /* now if an exception occurs, it will occurs in the next task
387
       context */
388

    
389
    if (source == SWITCH_TSS_CALL) {
390
        stw_kernel(tss_base, env->tr.selector);
391
        new_eflags |= NT_MASK;
392
    }
393

    
394
    /* set busy bit */
395
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
396
        uint8_t *ptr;
397
        uint32_t e2;
398
        ptr = env->gdt.base + (tss_selector << 3);
399
        e2 = ldl_kernel(ptr + 4);
400
        e2 |= DESC_TSS_BUSY_MASK;
401
        stl_kernel(ptr + 4, e2);
402
    }
403

    
404
    /* set the new CPU state */
405
    /* from this point, any exception which occurs can give problems */
406
    env->cr[0] |= CR0_TS_MASK;
407
    env->tr.selector = tss_selector;
408
    env->tr.base = tss_base;
409
    env->tr.limit = tss_limit;
410
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411
    
412
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
413
        env->cr[3] = new_cr3;
414
        cpu_x86_update_cr3(env);
415
    }
416
    
417
    /* load all registers without an exception, then reload them with
418
       possible exception */
419
    env->eip = new_eip;
420
    eflags_mask = FL_UPDATE_CPL0_MASK;
421
    if (!(type & 8))
422
        eflags_mask &= 0xffff;
423
    load_eflags(new_eflags, eflags_mask);
424
    for(i = 0; i < 8; i++)
425
        env->regs[i] = new_regs[i];
426
    if (new_eflags & VM_MASK) {
427
        for(i = 0; i < 6; i++) 
428
            load_seg_vm(i, new_segs[i]);
429
        /* in vm86, CPL is always 3 */
430
        cpu_x86_set_cpl(env, 3);
431
    } else {
432
        /* CPL is set the RPL of CS */
433
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
434
        /* first just selectors as the rest may trigger exceptions */
435
        for(i = 0; i < 6; i++)
436
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
437
    }
438
    
439
    env->ldt.selector = new_ldt & ~4;
440
    env->ldt.base = NULL;
441
    env->ldt.limit = 0;
442
    env->ldt.flags = 0;
443

    
444
    /* load the LDT */
445
    if (new_ldt & 4)
446
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
447

    
448
    dt = &env->gdt;
449
    index = new_ldt & ~7;
450
    if ((index + 7) > dt->limit)
451
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452
    ptr = dt->base + index;
453
    e1 = ldl_kernel(ptr);
454
    e2 = ldl_kernel(ptr + 4);
455
    if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
456
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
457
    if (!(e2 & DESC_P_MASK))
458
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
    load_seg_cache_raw_dt(&env->ldt, e1, e2);
460
    
461
    /* load the segments */
462
    if (!(new_eflags & VM_MASK)) {
463
        tss_load_seg(R_CS, new_segs[R_CS]);
464
        tss_load_seg(R_SS, new_segs[R_SS]);
465
        tss_load_seg(R_ES, new_segs[R_ES]);
466
        tss_load_seg(R_DS, new_segs[R_DS]);
467
        tss_load_seg(R_FS, new_segs[R_FS]);
468
        tss_load_seg(R_GS, new_segs[R_GS]);
469
    }
470
    
471
    /* check that EIP is in the CS segment limits */
472
    if (new_eip > env->segs[R_CS].limit) {
473
        raise_exception_err(EXCP0D_GPF, 0);
474
    }
475
}
476

    
477
/* check if Port I/O is allowed in TSS */
478
static inline void check_io(int addr, int size)
479
{
480
    int io_offset, val, mask;
481
    
482
    /* TSS must be a valid 32 bit one */
483
    if (!(env->tr.flags & DESC_P_MASK) ||
484
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
485
        env->tr.limit < 103)
486
        goto fail;
487
    io_offset = lduw_kernel(env->tr.base + 0x66);
488
    io_offset += (addr >> 3);
489
    /* Note: the check needs two bytes */
490
    if ((io_offset + 1) > env->tr.limit)
491
        goto fail;
492
    val = lduw_kernel(env->tr.base + io_offset);
493
    val >>= (addr & 7);
494
    mask = (1 << size) - 1;
495
    /* all bits must be zero to allow the I/O */
496
    if ((val & mask) != 0) {
497
    fail:
498
        raise_exception_err(EXCP0D_GPF, 0);
499
    }
500
}
501

    
502
void check_iob_T0(void)
503
{
504
    check_io(T0, 1);
505
}
506

    
507
void check_iow_T0(void)
508
{
509
    check_io(T0, 2);
510
}
511

    
512
void check_iol_T0(void)
513
{
514
    check_io(T0, 4);
515
}
516

    
517
void check_iob_DX(void)
518
{
519
    check_io(EDX & 0xffff, 1);
520
}
521

    
522
void check_iow_DX(void)
523
{
524
    check_io(EDX & 0xffff, 2);
525
}
526

    
527
void check_iol_DX(void)
528
{
529
    check_io(EDX & 0xffff, 4);
530
}
531

    
532
/* protected mode interrupt */
533
static void do_interrupt_protected(int intno, int is_int, int error_code,
534
                                   unsigned int next_eip, int is_hw)
535
{
536
    SegmentCache *dt;
537
    uint8_t *ptr, *ssp;
538
    int type, dpl, selector, ss_dpl, cpl;
539
    int has_error_code, new_stack, shift;
540
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
541
    uint32_t old_cs, old_ss, old_esp, old_eip;
542

    
543
    has_error_code = 0;
544
    if (!is_int && !is_hw) {
545
        switch(intno) {
546
        case 8:
547
        case 10:
548
        case 11:
549
        case 12:
550
        case 13:
551
        case 14:
552
        case 17:
553
            has_error_code = 1;
554
            break;
555
        }
556
    }
557

    
558
    dt = &env->idt;
559
    if (intno * 8 + 7 > dt->limit)
560
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
561
    ptr = dt->base + intno * 8;
562
    e1 = ldl_kernel(ptr);
563
    e2 = ldl_kernel(ptr + 4);
564
    /* check gate type */
565
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
566
    switch(type) {
567
    case 5: /* task gate */
568
        /* must do that check here to return the correct error code */
569
        if (!(e2 & DESC_P_MASK))
570
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
571
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
572
        if (has_error_code) {
573
            int mask;
574
            /* push the error code */
575
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
576
            if (env->segs[R_SS].flags & DESC_B_MASK)
577
                mask = 0xffffffff;
578
            else
579
                mask = 0xffff;
580
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
581
            ssp = env->segs[R_SS].base + esp;
582
            if (shift)
583
                stl_kernel(ssp, error_code);
584
            else
585
                stw_kernel(ssp, error_code);
586
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
587
        }
588
        return;
589
    case 6: /* 286 interrupt gate */
590
    case 7: /* 286 trap gate */
591
    case 14: /* 386 interrupt gate */
592
    case 15: /* 386 trap gate */
593
        break;
594
    default:
595
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
596
        break;
597
    }
598
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
599
    cpl = env->hflags & HF_CPL_MASK;
600
    /* check privledge if software int */
601
    if (is_int && dpl < cpl)
602
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
603
    /* check valid bit */
604
    if (!(e2 & DESC_P_MASK))
605
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
606
    selector = e1 >> 16;
607
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
608
    if ((selector & 0xfffc) == 0)
609
        raise_exception_err(EXCP0D_GPF, 0);
610

    
611
    if (load_segment(&e1, &e2, selector) != 0)
612
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
613
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
614
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
615
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
616
    if (dpl > cpl)
617
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
618
    if (!(e2 & DESC_P_MASK))
619
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
620
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
621
        /* to inner priviledge */
622
        get_ss_esp_from_tss(&ss, &esp, dpl);
623
        if ((ss & 0xfffc) == 0)
624
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
625
        if ((ss & 3) != dpl)
626
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
627
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
628
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
629
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
630
        if (ss_dpl != dpl)
631
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
632
        if (!(ss_e2 & DESC_S_MASK) ||
633
            (ss_e2 & DESC_CS_MASK) ||
634
            !(ss_e2 & DESC_W_MASK))
635
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
636
        if (!(ss_e2 & DESC_P_MASK))
637
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
638
        new_stack = 1;
639
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
640
        /* to same priviledge */
641
        new_stack = 0;
642
    } else {
643
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
644
        new_stack = 0; /* avoid warning */
645
    }
646

    
647
    shift = type >> 3;
648
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
649
    if (env->eflags & VM_MASK)
650
        push_size += 8;
651
    push_size <<= shift;
652

    
653
    /* XXX: check that enough room is available */
654
    if (new_stack) {
655
        old_esp = ESP;
656
        old_ss = env->segs[R_SS].selector;
657
        ss = (ss & ~3) | dpl;
658
        cpu_x86_load_seg_cache(env, R_SS, ss, 
659
                       get_seg_base(ss_e1, ss_e2),
660
                       get_seg_limit(ss_e1, ss_e2),
661
                       ss_e2);
662
    } else {
663
        old_esp = 0;
664
        old_ss = 0;
665
        esp = ESP;
666
    }
667
    if (is_int)
668
        old_eip = next_eip;
669
    else
670
        old_eip = env->eip;
671
    old_cs = env->segs[R_CS].selector;
672
    selector = (selector & ~3) | dpl;
673
    cpu_x86_load_seg_cache(env, R_CS, selector, 
674
                   get_seg_base(e1, e2),
675
                   get_seg_limit(e1, e2),
676
                   e2);
677
    cpu_x86_set_cpl(env, dpl);
678
    env->eip = offset;
679
    ESP = esp - push_size;
680
    ssp = env->segs[R_SS].base + esp;
681
    if (shift == 1) {
682
        int old_eflags;
683
        if (env->eflags & VM_MASK) {
684
            ssp -= 4;
685
            stl_kernel(ssp, env->segs[R_GS].selector);
686
            ssp -= 4;
687
            stl_kernel(ssp, env->segs[R_FS].selector);
688
            ssp -= 4;
689
            stl_kernel(ssp, env->segs[R_DS].selector);
690
            ssp -= 4;
691
            stl_kernel(ssp, env->segs[R_ES].selector);
692
        }
693
        if (new_stack) {
694
            ssp -= 4;
695
            stl_kernel(ssp, old_ss);
696
            ssp -= 4;
697
            stl_kernel(ssp, old_esp);
698
        }
699
        ssp -= 4;
700
        old_eflags = compute_eflags();
701
        stl_kernel(ssp, old_eflags);
702
        ssp -= 4;
703
        stl_kernel(ssp, old_cs);
704
        ssp -= 4;
705
        stl_kernel(ssp, old_eip);
706
        if (has_error_code) {
707
            ssp -= 4;
708
            stl_kernel(ssp, error_code);
709
        }
710
    } else {
711
        if (new_stack) {
712
            ssp -= 2;
713
            stw_kernel(ssp, old_ss);
714
            ssp -= 2;
715
            stw_kernel(ssp, old_esp);
716
        }
717
        ssp -= 2;
718
        stw_kernel(ssp, compute_eflags());
719
        ssp -= 2;
720
        stw_kernel(ssp, old_cs);
721
        ssp -= 2;
722
        stw_kernel(ssp, old_eip);
723
        if (has_error_code) {
724
            ssp -= 2;
725
            stw_kernel(ssp, error_code);
726
        }
727
    }
728
    
729
    /* interrupt gate clear IF mask */
730
    if ((type & 1) == 0) {
731
        env->eflags &= ~IF_MASK;
732
    }
733
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
734
}
735

    
736
/* real mode interrupt */
737
static void do_interrupt_real(int intno, int is_int, int error_code,
738
                                 unsigned int next_eip)
739
{
740
    SegmentCache *dt;
741
    uint8_t *ptr, *ssp;
742
    int selector;
743
    uint32_t offset, esp;
744
    uint32_t old_cs, old_eip;
745

    
746
    /* real mode (simpler !) */
747
    dt = &env->idt;
748
    if (intno * 4 + 3 > dt->limit)
749
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
750
    ptr = dt->base + intno * 4;
751
    offset = lduw_kernel(ptr);
752
    selector = lduw_kernel(ptr + 2);
753
    esp = ESP;
754
    ssp = env->segs[R_SS].base;
755
    if (is_int)
756
        old_eip = next_eip;
757
    else
758
        old_eip = env->eip;
759
    old_cs = env->segs[R_CS].selector;
760
    esp -= 2;
761
    stw_kernel(ssp + (esp & 0xffff), compute_eflags());
762
    esp -= 2;
763
    stw_kernel(ssp + (esp & 0xffff), old_cs);
764
    esp -= 2;
765
    stw_kernel(ssp + (esp & 0xffff), old_eip);
766
    
767
    /* update processor state */
768
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
769
    env->eip = offset;
770
    env->segs[R_CS].selector = selector;
771
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
772
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
773
}
774

    
775
/* fake user mode interrupt */
776
void do_interrupt_user(int intno, int is_int, int error_code, 
777
                       unsigned int next_eip)
778
{
779
    SegmentCache *dt;
780
    uint8_t *ptr;
781
    int dpl, cpl;
782
    uint32_t e2;
783

    
784
    dt = &env->idt;
785
    ptr = dt->base + (intno * 8);
786
    e2 = ldl_kernel(ptr + 4);
787
    
788
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
789
    cpl = env->hflags & HF_CPL_MASK;
790
    /* check privledge if software int */
791
    if (is_int && dpl < cpl)
792
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
793

    
794
    /* Since we emulate only user space, we cannot do more than
795
       exiting the emulation with the suitable exception and error
796
       code */
797
    if (is_int)
798
        EIP = next_eip;
799
}
800

    
801
/*
802
 * Begin excution of an interruption. is_int is TRUE if coming from
803
 * the int instruction. next_eip is the EIP value AFTER the interrupt
804
 * instruction. It is only relevant if is_int is TRUE.  
805
 */
806
void do_interrupt(int intno, int is_int, int error_code, 
807
                  unsigned int next_eip, int is_hw)
808
{
809
    if (env->cr[0] & CR0_PE_MASK) {
810
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
811
    } else {
812
        do_interrupt_real(intno, is_int, error_code, next_eip);
813
    }
814
}
815

    
816
/*
817
 * Signal an interruption. It is executed in the main CPU loop.
818
 * is_int is TRUE if coming from the int instruction. next_eip is the
819
 * EIP value AFTER the interrupt instruction. It is only relevant if
820
 * is_int is TRUE.  
821
 */
822
void raise_interrupt(int intno, int is_int, int error_code, 
823
                     unsigned int next_eip)
824
{
825
    env->exception_index = intno;
826
    env->error_code = error_code;
827
    env->exception_is_int = is_int;
828
    env->exception_next_eip = next_eip;
829
    cpu_loop_exit();
830
}
831

    
832
/* shortcuts to generate exceptions */
833
void raise_exception_err(int exception_index, int error_code)
834
{
835
    raise_interrupt(exception_index, 0, error_code, 0);
836
}
837

    
838
void raise_exception(int exception_index)
839
{
840
    raise_interrupt(exception_index, 0, 0, 0);
841
}
842

    
843
#ifdef BUGGY_GCC_DIV64
844
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
845
   call it from another function */
846
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
847
{
848
    *q_ptr = num / den;
849
    return num % den;
850
}
851

    
852
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
853
{
854
    *q_ptr = num / den;
855
    return num % den;
856
}
857
#endif
858

    
859
void helper_divl_EAX_T0(uint32_t eip)
860
{
861
    unsigned int den, q, r;
862
    uint64_t num;
863
    
864
    num = EAX | ((uint64_t)EDX << 32);
865
    den = T0;
866
    if (den == 0) {
867
        EIP = eip;
868
        raise_exception(EXCP00_DIVZ);
869
    }
870
#ifdef BUGGY_GCC_DIV64
871
    r = div64(&q, num, den);
872
#else
873
    q = (num / den);
874
    r = (num % den);
875
#endif
876
    EAX = q;
877
    EDX = r;
878
}
879

    
880
void helper_idivl_EAX_T0(uint32_t eip)
881
{
882
    int den, q, r;
883
    int64_t num;
884
    
885
    num = EAX | ((uint64_t)EDX << 32);
886
    den = T0;
887
    if (den == 0) {
888
        EIP = eip;
889
        raise_exception(EXCP00_DIVZ);
890
    }
891
#ifdef BUGGY_GCC_DIV64
892
    r = idiv64(&q, num, den);
893
#else
894
    q = (num / den);
895
    r = (num % den);
896
#endif
897
    EAX = q;
898
    EDX = r;
899
}
900

    
901
void helper_cmpxchg8b(void)
902
{
903
    uint64_t d;
904
    int eflags;
905

    
906
    eflags = cc_table[CC_OP].compute_all();
907
    d = ldq((uint8_t *)A0);
908
    if (d == (((uint64_t)EDX << 32) | EAX)) {
909
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
910
        eflags |= CC_Z;
911
    } else {
912
        EDX = d >> 32;
913
        EAX = d;
914
        eflags &= ~CC_Z;
915
    }
916
    CC_SRC = eflags;
917
}
918

    
919
/* We simulate a pre-MMX pentium as in valgrind */
920
#define CPUID_FP87 (1 << 0)
921
#define CPUID_VME  (1 << 1)
922
#define CPUID_DE   (1 << 2)
923
#define CPUID_PSE  (1 << 3)
924
#define CPUID_TSC  (1 << 4)
925
#define CPUID_MSR  (1 << 5)
926
#define CPUID_PAE  (1 << 6)
927
#define CPUID_MCE  (1 << 7)
928
#define CPUID_CX8  (1 << 8)
929
#define CPUID_APIC (1 << 9)
930
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
931
#define CPUID_MTRR (1 << 12)
932
#define CPUID_PGE  (1 << 13)
933
#define CPUID_MCA  (1 << 14)
934
#define CPUID_CMOV (1 << 15)
935
/* ... */
936
#define CPUID_MMX  (1 << 23)
937
#define CPUID_FXSR (1 << 24)
938
#define CPUID_SSE  (1 << 25)
939
#define CPUID_SSE2 (1 << 26)
940

    
941
void helper_cpuid(void)
942
{
943
    if (EAX == 0) {
944
        EAX = 1; /* max EAX index supported */
945
        EBX = 0x756e6547;
946
        ECX = 0x6c65746e;
947
        EDX = 0x49656e69;
948
    } else if (EAX == 1) {
949
        int family, model, stepping;
950
        /* EAX = 1 info */
951
#if 0
952
        /* pentium 75-200 */
953
        family = 5;
954
        model = 2;
955
        stepping = 11;
956
#else
957
        /* pentium pro */
958
        family = 6;
959
        model = 1;
960
        stepping = 3;
961
#endif
962
        EAX = (family << 8) | (model << 4) | stepping;
963
        EBX = 0;
964
        ECX = 0;
965
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
966
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
967
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
968
    }
969
}
970

    
971
void helper_lldt_T0(void)
972
{
973
    int selector;
974
    SegmentCache *dt;
975
    uint32_t e1, e2;
976
    int index;
977
    uint8_t *ptr;
978
    
979
    selector = T0 & 0xffff;
980
    if ((selector & 0xfffc) == 0) {
981
        /* XXX: NULL selector case: invalid LDT */
982
        env->ldt.base = NULL;
983
        env->ldt.limit = 0;
984
    } else {
985
        if (selector & 0x4)
986
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
987
        dt = &env->gdt;
988
        index = selector & ~7;
989
        if ((index + 7) > dt->limit)
990
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
991
        ptr = dt->base + index;
992
        e1 = ldl_kernel(ptr);
993
        e2 = ldl_kernel(ptr + 4);
994
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
995
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
996
        if (!(e2 & DESC_P_MASK))
997
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
998
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
999
    }
1000
    env->ldt.selector = selector;
1001
}
1002

    
1003
void helper_ltr_T0(void)
1004
{
1005
    int selector;
1006
    SegmentCache *dt;
1007
    uint32_t e1, e2;
1008
    int index, type;
1009
    uint8_t *ptr;
1010
    
1011
    selector = T0 & 0xffff;
1012
    if ((selector & 0xfffc) == 0) {
1013
        /* NULL selector case: invalid LDT */
1014
        env->tr.base = NULL;
1015
        env->tr.limit = 0;
1016
        env->tr.flags = 0;
1017
    } else {
1018
        if (selector & 0x4)
1019
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1020
        dt = &env->gdt;
1021
        index = selector & ~7;
1022
        if ((index + 7) > dt->limit)
1023
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1024
        ptr = dt->base + index;
1025
        e1 = ldl_kernel(ptr);
1026
        e2 = ldl_kernel(ptr + 4);
1027
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1028
        if ((e2 & DESC_S_MASK) || 
1029
            (type != 1 && type != 9))
1030
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1031
        if (!(e2 & DESC_P_MASK))
1032
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1033
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1034
        e2 |= 0x00000200; /* set the busy bit */
1035
        stl_kernel(ptr + 4, e2);
1036
    }
1037
    env->tr.selector = selector;
1038
}
1039

    
1040
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1041
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1042
{
1043
    uint32_t e1, e2;
1044
    int cpl, dpl, rpl;
1045
    SegmentCache *dt;
1046
    int index;
1047
    uint8_t *ptr;
1048

    
1049
    if ((selector & 0xfffc) == 0) {
1050
        /* null selector case */
1051
        if (seg_reg == R_SS) {
1052
            EIP = cur_eip;
1053
            raise_exception_err(EXCP0D_GPF, 0);
1054
        } else {
1055
            cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1056
        }
1057
    } else {
1058
        
1059
        if (selector & 0x4)
1060
            dt = &env->ldt;
1061
        else
1062
            dt = &env->gdt;
1063
        index = selector & ~7;
1064
        if ((index + 7) > dt->limit) {
1065
            EIP = cur_eip;
1066
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1067
        }
1068
        ptr = dt->base + index;
1069
        e1 = ldl_kernel(ptr);
1070
        e2 = ldl_kernel(ptr + 4);
1071

    
1072
        if (!(e2 & DESC_S_MASK)) {
1073
            EIP = cur_eip;
1074
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1075
        }
1076
        rpl = selector & 3;
1077
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1078
        cpl = env->hflags & HF_CPL_MASK;
1079
        if (seg_reg == R_SS) {
1080
            /* must be writable segment */
1081
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1082
                EIP = cur_eip;
1083
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1084
            }
1085
            if (rpl != cpl || dpl != cpl) {
1086
                EIP = cur_eip;
1087
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1088
            }
1089
        } else {
1090
            /* must be readable segment */
1091
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1092
                EIP = cur_eip;
1093
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1094
            }
1095
            
1096
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1097
                /* if not conforming code, test rights */
1098
                if (dpl < cpl || dpl < rpl) {
1099
                    EIP = cur_eip;
1100
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1101
                }
1102
            }
1103
        }
1104

    
1105
        if (!(e2 & DESC_P_MASK)) {
1106
            EIP = cur_eip;
1107
            if (seg_reg == R_SS)
1108
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1109
            else
1110
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1111
        }
1112

    
1113
        /* set the access bit if not already set */
1114
        if (!(e2 & DESC_A_MASK)) {
1115
            e2 |= DESC_A_MASK;
1116
            stl_kernel(ptr + 4, e2);
1117
        }
1118

    
1119
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1120
                       get_seg_base(e1, e2),
1121
                       get_seg_limit(e1, e2),
1122
                       e2);
1123
#if 0
1124
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1125
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1126
#endif
1127
    }
1128
}
1129

    
1130
/* protected mode jump */
1131
void helper_ljmp_protected_T0_T1(void)
1132
{
1133
    int new_cs, new_eip, gate_cs, type;
1134
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1135

    
1136
    new_cs = T0;
1137
    new_eip = T1;
1138
    if ((new_cs & 0xfffc) == 0)
1139
        raise_exception_err(EXCP0D_GPF, 0);
1140
    if (load_segment(&e1, &e2, new_cs) != 0)
1141
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1142
    cpl = env->hflags & HF_CPL_MASK;
1143
    if (e2 & DESC_S_MASK) {
1144
        if (!(e2 & DESC_CS_MASK))
1145
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1146
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1147
        if (e2 & DESC_C_MASK) {
1148
            /* conforming code segment */
1149
            if (dpl > cpl)
1150
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1151
        } else {
1152
            /* non conforming code segment */
1153
            rpl = new_cs & 3;
1154
            if (rpl > cpl)
1155
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1156
            if (dpl != cpl)
1157
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1158
        }
1159
        if (!(e2 & DESC_P_MASK))
1160
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1161
        limit = get_seg_limit(e1, e2);
1162
        if (new_eip > limit)
1163
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1164
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1165
                       get_seg_base(e1, e2), limit, e2);
1166
        EIP = new_eip;
1167
    } else {
1168
        /* jump to call or task gate */
1169
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1170
        rpl = new_cs & 3;
1171
        cpl = env->hflags & HF_CPL_MASK;
1172
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1173
        switch(type) {
1174
        case 1: /* 286 TSS */
1175
        case 9: /* 386 TSS */
1176
        case 5: /* task gate */
1177
            if (dpl < cpl || dpl < rpl)
1178
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1179
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1180
            break;
1181
        case 4: /* 286 call gate */
1182
        case 12: /* 386 call gate */
1183
            if ((dpl < cpl) || (dpl < rpl))
1184
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1185
            if (!(e2 & DESC_P_MASK))
1186
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1187
            gate_cs = e1 >> 16;
1188
            if (load_segment(&e1, &e2, gate_cs) != 0)
1189
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1190
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1191
            /* must be code segment */
1192
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1193
                 (DESC_S_MASK | DESC_CS_MASK)))
1194
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1195
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1196
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1197
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1198
            if (!(e2 & DESC_P_MASK))
1199
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1200
            new_eip = (e1 & 0xffff);
1201
            if (type == 12)
1202
                new_eip |= (e2 & 0xffff0000);
1203
            limit = get_seg_limit(e1, e2);
1204
            if (new_eip > limit)
1205
                raise_exception_err(EXCP0D_GPF, 0);
1206
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1207
                                   get_seg_base(e1, e2), limit, e2);
1208
            EIP = new_eip;
1209
            break;
1210
        default:
1211
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1212
            break;
1213
        }
1214
    }
1215
}
1216

    
1217
/* real mode call */
1218
void helper_lcall_real_T0_T1(int shift, int next_eip)
1219
{
1220
    int new_cs, new_eip;
1221
    uint32_t esp, esp_mask;
1222
    uint8_t *ssp;
1223

    
1224
    new_cs = T0;
1225
    new_eip = T1;
1226
    esp = ESP;
1227
    esp_mask = 0xffffffff;
1228
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1229
        esp_mask = 0xffff;
1230
    ssp = env->segs[R_SS].base;
1231
    if (shift) {
1232
        esp -= 4;
1233
        stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
1234
        esp -= 4;
1235
        stl_kernel(ssp + (esp & esp_mask), next_eip);
1236
    } else {
1237
        esp -= 2;
1238
        stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
1239
        esp -= 2;
1240
        stw_kernel(ssp + (esp & esp_mask), next_eip);
1241
    }
1242

    
1243
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1244
        ESP = (ESP & ~0xffff) | (esp & 0xffff);
1245
    else
1246
        ESP = esp;
1247
    env->eip = new_eip;
1248
    env->segs[R_CS].selector = new_cs;
1249
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1250
}
1251

    
1252
/* protected mode call */
1253
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1254
{
1255
    int new_cs, new_eip;
1256
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1257
    uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
1258
    uint32_t old_ss, old_esp, val, i, limit;
1259
    uint8_t *ssp, *old_ssp;
1260
    
1261
    new_cs = T0;
1262
    new_eip = T1;
1263
    if ((new_cs & 0xfffc) == 0)
1264
        raise_exception_err(EXCP0D_GPF, 0);
1265
    if (load_segment(&e1, &e2, new_cs) != 0)
1266
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1267
    cpl = env->hflags & HF_CPL_MASK;
1268
    if (e2 & DESC_S_MASK) {
1269
        if (!(e2 & DESC_CS_MASK))
1270
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1271
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1272
        if (e2 & DESC_C_MASK) {
1273
            /* conforming code segment */
1274
            if (dpl > cpl)
1275
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1276
        } else {
1277
            /* non conforming code segment */
1278
            rpl = new_cs & 3;
1279
            if (rpl > cpl)
1280
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1281
            if (dpl != cpl)
1282
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1283
        }
1284
        if (!(e2 & DESC_P_MASK))
1285
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1286

    
1287
        sp = ESP;
1288
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1289
            sp &= 0xffff;
1290
        ssp = env->segs[R_SS].base + sp;
1291
        if (shift) {
1292
            ssp -= 4;
1293
            stl_kernel(ssp, env->segs[R_CS].selector);
1294
            ssp -= 4;
1295
            stl_kernel(ssp, next_eip);
1296
        } else {
1297
            ssp -= 2;
1298
            stw_kernel(ssp, env->segs[R_CS].selector);
1299
            ssp -= 2;
1300
            stw_kernel(ssp, next_eip);
1301
        }
1302
        sp -= (4 << shift);
1303
        
1304
        limit = get_seg_limit(e1, e2);
1305
        if (new_eip > limit)
1306
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1307
        /* from this point, not restartable */
1308
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1309
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
1310
        else
1311
            ESP = sp;
1312
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1313
                       get_seg_base(e1, e2), limit, e2);
1314
        EIP = new_eip;
1315
    } else {
1316
        /* check gate type */
1317
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1318
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1319
        rpl = new_cs & 3;
1320
        switch(type) {
1321
        case 1: /* available 286 TSS */
1322
        case 9: /* available 386 TSS */
1323
        case 5: /* task gate */
1324
            if (dpl < cpl || dpl < rpl)
1325
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1326
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
1327
            break;
1328
        case 4: /* 286 call gate */
1329
        case 12: /* 386 call gate */
1330
            break;
1331
        default:
1332
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1333
            break;
1334
        }
1335
        shift = type >> 3;
1336

    
1337
        if (dpl < cpl || dpl < rpl)
1338
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1339
        /* check valid bit */
1340
        if (!(e2 & DESC_P_MASK))
1341
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1342
        selector = e1 >> 16;
1343
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1344
        if ((selector & 0xfffc) == 0)
1345
            raise_exception_err(EXCP0D_GPF, 0);
1346

    
1347
        if (load_segment(&e1, &e2, selector) != 0)
1348
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1349
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1350
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1351
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1352
        if (dpl > cpl)
1353
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1354
        if (!(e2 & DESC_P_MASK))
1355
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1356

    
1357
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1358
            /* to inner priviledge */
1359
            get_ss_esp_from_tss(&ss, &sp, dpl);
1360
            if ((ss & 0xfffc) == 0)
1361
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1362
            if ((ss & 3) != dpl)
1363
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1364
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1365
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1366
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1367
            if (ss_dpl != dpl)
1368
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1369
            if (!(ss_e2 & DESC_S_MASK) ||
1370
                (ss_e2 & DESC_CS_MASK) ||
1371
                !(ss_e2 & DESC_W_MASK))
1372
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1373
            if (!(ss_e2 & DESC_P_MASK))
1374
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1375
            
1376
            param_count = e2 & 0x1f;
1377
            push_size = ((param_count * 2) + 8) << shift;
1378

    
1379
            old_esp = ESP;
1380
            old_ss = env->segs[R_SS].selector;
1381
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
1382
                old_esp &= 0xffff;
1383
            old_ssp = env->segs[R_SS].base + old_esp;
1384
            
1385
            /* XXX: from this point not restartable */
1386
            ss = (ss & ~3) | dpl;
1387
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1388
                           get_seg_base(ss_e1, ss_e2),
1389
                           get_seg_limit(ss_e1, ss_e2),
1390
                           ss_e2);
1391

    
1392
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
1393
                sp &= 0xffff;
1394
            ssp = env->segs[R_SS].base + sp;
1395
            if (shift) {
1396
                ssp -= 4;
1397
                stl_kernel(ssp, old_ss);
1398
                ssp -= 4;
1399
                stl_kernel(ssp, old_esp);
1400
                ssp -= 4 * param_count;
1401
                for(i = 0; i < param_count; i++) {
1402
                    val = ldl_kernel(old_ssp + i * 4);
1403
                    stl_kernel(ssp + i * 4, val);
1404
                }
1405
            } else {
1406
                ssp -= 2;
1407
                stw_kernel(ssp, old_ss);
1408
                ssp -= 2;
1409
                stw_kernel(ssp, old_esp);
1410
                ssp -= 2 * param_count;
1411
                for(i = 0; i < param_count; i++) {
1412
                    val = lduw_kernel(old_ssp + i * 2);
1413
                    stw_kernel(ssp + i * 2, val);
1414
                }
1415
            }
1416
        } else {
1417
            /* to same priviledge */
1418
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
1419
                sp &= 0xffff;
1420
            ssp = env->segs[R_SS].base + sp;
1421
            push_size = (4 << shift);
1422
        }
1423

    
1424
        if (shift) {
1425
            ssp -= 4;
1426
            stl_kernel(ssp, env->segs[R_CS].selector);
1427
            ssp -= 4;
1428
            stl_kernel(ssp, next_eip);
1429
        } else {
1430
            ssp -= 2;
1431
            stw_kernel(ssp, env->segs[R_CS].selector);
1432
            ssp -= 2;
1433
            stw_kernel(ssp, next_eip);
1434
        }
1435

    
1436
        sp -= push_size;
1437
        selector = (selector & ~3) | dpl;
1438
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1439
                       get_seg_base(e1, e2),
1440
                       get_seg_limit(e1, e2),
1441
                       e2);
1442
        cpu_x86_set_cpl(env, dpl);
1443
        
1444
        /* from this point, not restartable if same priviledge */
1445
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1446
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
1447
        else
1448
            ESP = sp;
1449
        EIP = offset;
1450
    }
1451
}
1452

    
1453
/* real and vm86 mode iret */
1454
void helper_iret_real(int shift)
1455
{
1456
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1457
    uint8_t *ssp;
1458
    int eflags_mask;
1459

    
1460
    sp = ESP & 0xffff;
1461
    ssp = env->segs[R_SS].base + sp;
1462
    if (shift == 1) {
1463
        /* 32 bits */
1464
        new_eflags = ldl_kernel(ssp + 8);
1465
        new_cs = ldl_kernel(ssp + 4) & 0xffff;
1466
        new_eip = ldl_kernel(ssp) & 0xffff;
1467
    } else {
1468
        /* 16 bits */
1469
        new_eflags = lduw_kernel(ssp + 4);
1470
        new_cs = lduw_kernel(ssp + 2);
1471
        new_eip = lduw_kernel(ssp);
1472
    }
1473
    new_esp = sp + (6 << shift);
1474
    ESP = (ESP & 0xffff0000) | 
1475
        (new_esp & 0xffff);
1476
    load_seg_vm(R_CS, new_cs);
1477
    env->eip = new_eip;
1478
    if (env->eflags & VM_MASK)
1479
        eflags_mask = FL_UPDATE_MASK32 | IF_MASK | RF_MASK;
1480
    else
1481
        eflags_mask = FL_UPDATE_CPL0_MASK;
1482
    if (shift == 0)
1483
        eflags_mask &= 0xffff;
1484
    load_eflags(new_eflags, eflags_mask);
1485
}
1486

    
1487
/* protected mode iret */
1488
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1489
{
1490
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1491
    uint32_t new_es, new_ds, new_fs, new_gs;
1492
    uint32_t e1, e2, ss_e1, ss_e2;
1493
    int cpl, dpl, rpl, eflags_mask;
1494
    uint8_t *ssp;
1495
    
1496
    sp = ESP;
1497
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1498
        sp &= 0xffff;
1499
    ssp = env->segs[R_SS].base + sp;
1500
    if (shift == 1) {
1501
        /* 32 bits */
1502
        if (is_iret)
1503
            new_eflags = ldl_kernel(ssp + 8);
1504
        new_cs = ldl_kernel(ssp + 4) & 0xffff;
1505
        new_eip = ldl_kernel(ssp);
1506
        if (is_iret && (new_eflags & VM_MASK))
1507
            goto return_to_vm86;
1508
    } else {
1509
        /* 16 bits */
1510
        if (is_iret)
1511
            new_eflags = lduw_kernel(ssp + 4);
1512
        new_cs = lduw_kernel(ssp + 2);
1513
        new_eip = lduw_kernel(ssp);
1514
    }
1515
    if ((new_cs & 0xfffc) == 0)
1516
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1517
    if (load_segment(&e1, &e2, new_cs) != 0)
1518
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1519
    if (!(e2 & DESC_S_MASK) ||
1520
        !(e2 & DESC_CS_MASK))
1521
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1522
    cpl = env->hflags & HF_CPL_MASK;
1523
    rpl = new_cs & 3; 
1524
    if (rpl < cpl)
1525
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1526
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1527
    if (e2 & DESC_C_MASK) {
1528
        if (dpl > rpl)
1529
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1530
    } else {
1531
        if (dpl != rpl)
1532
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1533
    }
1534
    if (!(e2 & DESC_P_MASK))
1535
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1536
    
1537
    if (rpl == cpl) {
1538
        /* return to same priledge level */
1539
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1540
                       get_seg_base(e1, e2),
1541
                       get_seg_limit(e1, e2),
1542
                       e2);
1543
        new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1544
    } else {
1545
        /* return to different priviledge level */
1546
        ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1547
        if (shift == 1) {
1548
            /* 32 bits */
1549
            new_esp = ldl_kernel(ssp);
1550
            new_ss = ldl_kernel(ssp + 4) & 0xffff;
1551
        } else {
1552
            /* 16 bits */
1553
            new_esp = lduw_kernel(ssp);
1554
            new_ss = lduw_kernel(ssp + 2);
1555
        }
1556
        
1557
        if ((new_ss & 3) != rpl)
1558
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1559
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1560
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1561
        if (!(ss_e2 & DESC_S_MASK) ||
1562
            (ss_e2 & DESC_CS_MASK) ||
1563
            !(ss_e2 & DESC_W_MASK))
1564
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1565
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1566
        if (dpl != rpl)
1567
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1568
        if (!(ss_e2 & DESC_P_MASK))
1569
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1570

    
1571
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1572
                       get_seg_base(e1, e2),
1573
                       get_seg_limit(e1, e2),
1574
                       e2);
1575
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1576
                       get_seg_base(ss_e1, ss_e2),
1577
                       get_seg_limit(ss_e1, ss_e2),
1578
                       ss_e2);
1579
        cpu_x86_set_cpl(env, rpl);
1580
    }
1581
    if (env->segs[R_SS].flags & DESC_B_MASK)
1582
        ESP = new_esp;
1583
    else
1584
        ESP = (ESP & 0xffff0000) | 
1585
            (new_esp & 0xffff);
1586
    env->eip = new_eip;
1587
    if (is_iret) {
1588
        /* NOTE: 'cpl' can be different from the current CPL */
1589
        if (cpl == 0)
1590
            eflags_mask = FL_UPDATE_CPL0_MASK;
1591
        else
1592
            eflags_mask = FL_UPDATE_MASK32;
1593
        if (shift == 0)
1594
            eflags_mask &= 0xffff;
1595
        load_eflags(new_eflags, eflags_mask);
1596
    }
1597
    return;
1598

    
1599
 return_to_vm86:
1600
    new_esp = ldl_kernel(ssp + 12);
1601
    new_ss = ldl_kernel(ssp + 16);
1602
    new_es = ldl_kernel(ssp + 20);
1603
    new_ds = ldl_kernel(ssp + 24);
1604
    new_fs = ldl_kernel(ssp + 28);
1605
    new_gs = ldl_kernel(ssp + 32);
1606
    
1607
    /* modify processor state */
1608
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1609
    load_seg_vm(R_CS, new_cs);
1610
    cpu_x86_set_cpl(env, 3);
1611
    load_seg_vm(R_SS, new_ss);
1612
    load_seg_vm(R_ES, new_es);
1613
    load_seg_vm(R_DS, new_ds);
1614
    load_seg_vm(R_FS, new_fs);
1615
    load_seg_vm(R_GS, new_gs);
1616

    
1617
    env->eip = new_eip;
1618
    ESP = new_esp;
1619
}
1620

    
1621
void helper_iret_protected(int shift)
1622
{
1623
    int tss_selector, type;
1624
    uint32_t e1, e2;
1625
    
1626
    /* specific case for TSS */
1627
    if (env->eflags & NT_MASK) {
1628
        tss_selector = lduw_kernel(env->tr.base + 0);
1629
        if (tss_selector & 4)
1630
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1631
        if (load_segment(&e1, &e2, tss_selector) != 0)
1632
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1633
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1634
        /* NOTE: we check both segment and busy TSS */
1635
        if (type != 3)
1636
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1637
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1638
    } else {
1639
        helper_ret_protected(shift, 1, 0);
1640
    }
1641
}
1642

    
1643
void helper_lret_protected(int shift, int addend)
1644
{
1645
    helper_ret_protected(shift, 0, addend);
1646
}
1647

    
1648
void helper_movl_crN_T0(int reg)
1649
{
1650
    env->cr[reg] = T0;
1651
    switch(reg) {
1652
    case 0:
1653
        cpu_x86_update_cr0(env);
1654
        break;
1655
    case 3:
1656
        cpu_x86_update_cr3(env);
1657
        break;
1658
    }
1659
}
1660

    
1661
/* XXX: do more */
1662
void helper_movl_drN_T0(int reg)
1663
{
1664
    env->dr[reg] = T0;
1665
}
1666

    
1667
void helper_invlpg(unsigned int addr)
1668
{
1669
    cpu_x86_flush_tlb(env, addr);
1670
}
1671

    
1672
/* rdtsc */
1673
#ifndef __i386__
1674
uint64_t emu_time;
1675
#endif
1676

    
1677
void helper_rdtsc(void)
1678
{
1679
    uint64_t val;
1680
#ifdef __i386__
1681
    asm("rdtsc" : "=A" (val));
1682
#else
1683
    /* better than nothing: the time increases */
1684
    val = emu_time++;
1685
#endif
1686
    EAX = val;
1687
    EDX = val >> 32;
1688
}
1689

    
1690
void helper_wrmsr(void)
1691
{
1692
    switch(ECX) {
1693
    case MSR_IA32_SYSENTER_CS:
1694
        env->sysenter_cs = EAX & 0xffff;
1695
        break;
1696
    case MSR_IA32_SYSENTER_ESP:
1697
        env->sysenter_esp = EAX;
1698
        break;
1699
    case MSR_IA32_SYSENTER_EIP:
1700
        env->sysenter_eip = EAX;
1701
        break;
1702
    default:
1703
        /* XXX: exception ? */
1704
        break; 
1705
    }
1706
}
1707

    
1708
void helper_rdmsr(void)
1709
{
1710
    switch(ECX) {
1711
    case MSR_IA32_SYSENTER_CS:
1712
        EAX = env->sysenter_cs;
1713
        EDX = 0;
1714
        break;
1715
    case MSR_IA32_SYSENTER_ESP:
1716
        EAX = env->sysenter_esp;
1717
        EDX = 0;
1718
        break;
1719
    case MSR_IA32_SYSENTER_EIP:
1720
        EAX = env->sysenter_eip;
1721
        EDX = 0;
1722
        break;
1723
    default:
1724
        /* XXX: exception ? */
1725
        break; 
1726
    }
1727
}
1728

    
1729
void helper_lsl(void)
1730
{
1731
    unsigned int selector, limit;
1732
    uint32_t e1, e2;
1733
    int rpl, dpl, cpl, type;
1734

    
1735
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1736
    selector = T0 & 0xffff;
1737
    if (load_segment(&e1, &e2, selector) != 0)
1738
        return;
1739
    rpl = selector & 3;
1740
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1741
    cpl = env->hflags & HF_CPL_MASK;
1742
    if (e2 & DESC_S_MASK) {
1743
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1744
            /* conforming */
1745
        } else {
1746
            if (dpl < cpl || dpl < rpl)
1747
                return;
1748
        }
1749
    } else {
1750
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1751
        switch(type) {
1752
        case 1:
1753
        case 2:
1754
        case 3:
1755
        case 9:
1756
        case 11:
1757
            break;
1758
        default:
1759
            return;
1760
        }
1761
        if (dpl < cpl || dpl < rpl)
1762
            return;
1763
    }
1764
    limit = get_seg_limit(e1, e2);
1765
    T1 = limit;
1766
    CC_SRC |= CC_Z;
1767
}
1768

    
1769
void helper_lar(void)
1770
{
1771
    unsigned int selector;
1772
    uint32_t e1, e2;
1773
    int rpl, dpl, cpl, type;
1774

    
1775
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1776
    selector = T0 & 0xffff;
1777
    if ((selector & 0xfffc) == 0)
1778
        return;
1779
    if (load_segment(&e1, &e2, selector) != 0)
1780
        return;
1781
    rpl = selector & 3;
1782
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1783
    cpl = env->hflags & HF_CPL_MASK;
1784
    if (e2 & DESC_S_MASK) {
1785
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1786
            /* conforming */
1787
        } else {
1788
            if (dpl < cpl || dpl < rpl)
1789
                return;
1790
        }
1791
    } else {
1792
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1793
        switch(type) {
1794
        case 1:
1795
        case 2:
1796
        case 3:
1797
        case 4:
1798
        case 5:
1799
        case 9:
1800
        case 11:
1801
        case 12:
1802
            break;
1803
        default:
1804
            return;
1805
        }
1806
        if (dpl < cpl || dpl < rpl)
1807
            return;
1808
    }
1809
    T1 = e2 & 0x00f0ff00;
1810
    CC_SRC |= CC_Z;
1811
}
1812

    
1813
void helper_verr(void)
1814
{
1815
    unsigned int selector;
1816
    uint32_t e1, e2;
1817
    int rpl, dpl, cpl;
1818

    
1819
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1820
    selector = T0 & 0xffff;
1821
    if ((selector & 0xfffc) == 0)
1822
        return;
1823
    if (load_segment(&e1, &e2, selector) != 0)
1824
        return;
1825
    if (!(e2 & DESC_S_MASK))
1826
        return;
1827
    rpl = selector & 3;
1828
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1829
    cpl = env->hflags & HF_CPL_MASK;
1830
    if (e2 & DESC_CS_MASK) {
1831
        if (!(e2 & DESC_R_MASK))
1832
            return;
1833
        if (!(e2 & DESC_C_MASK)) {
1834
            if (dpl < cpl || dpl < rpl)
1835
                return;
1836
        }
1837
    } else {
1838
        if (dpl < cpl || dpl < rpl)
1839
            return;
1840
    }
1841
    /* ok */
1842
}
1843

    
1844
void helper_verw(void)
1845
{
1846
    unsigned int selector;
1847
    uint32_t e1, e2;
1848
    int rpl, dpl, cpl;
1849

    
1850
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1851
    selector = T0 & 0xffff;
1852
    if ((selector & 0xfffc) == 0)
1853
        return;
1854
    if (load_segment(&e1, &e2, selector) != 0)
1855
        return;
1856
    if (!(e2 & DESC_S_MASK))
1857
        return;
1858
    rpl = selector & 3;
1859
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1860
    cpl = env->hflags & HF_CPL_MASK;
1861
    if (e2 & DESC_CS_MASK) {
1862
        return;
1863
    } else {
1864
        if (dpl < cpl || dpl < rpl)
1865
            return;
1866
        if (!(e2 & DESC_W_MASK))
1867
            return;
1868
    }
1869
    /* ok */
1870
}
1871

    
1872
/* FPU helpers */
1873

    
1874
void helper_fldt_ST0_A0(void)
1875
{
1876
    int new_fpstt;
1877
    new_fpstt = (env->fpstt - 1) & 7;
1878
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1879
    env->fpstt = new_fpstt;
1880
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1881
}
1882

    
1883
void helper_fstt_ST0_A0(void)
1884
{
1885
    helper_fstt(ST0, (uint8_t *)A0);
1886
}
1887

    
1888
/* BCD ops */
1889

    
1890
#define MUL10(iv) ( iv + iv + (iv << 3) )
1891

    
1892
void helper_fbld_ST0_A0(void)
1893
{
1894
    CPU86_LDouble tmp;
1895
    uint64_t val;
1896
    unsigned int v;
1897
    int i;
1898

    
1899
    val = 0;
1900
    for(i = 8; i >= 0; i--) {
1901
        v = ldub((uint8_t *)A0 + i);
1902
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1903
    }
1904
    tmp = val;
1905
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1906
        tmp = -tmp;
1907
    fpush();
1908
    ST0 = tmp;
1909
}
1910

    
1911
void helper_fbst_ST0_A0(void)
1912
{
1913
    CPU86_LDouble tmp;
1914
    int v;
1915
    uint8_t *mem_ref, *mem_end;
1916
    int64_t val;
1917

    
1918
    tmp = rint(ST0);
1919
    val = (int64_t)tmp;
1920
    mem_ref = (uint8_t *)A0;
1921
    mem_end = mem_ref + 9;
1922
    if (val < 0) {
1923
        stb(mem_end, 0x80);
1924
        val = -val;
1925
    } else {
1926
        stb(mem_end, 0x00);
1927
    }
1928
    while (mem_ref < mem_end) {
1929
        if (val == 0)
1930
            break;
1931
        v = val % 100;
1932
        val = val / 100;
1933
        v = ((v / 10) << 4) | (v % 10);
1934
        stb(mem_ref++, v);
1935
    }
1936
    while (mem_ref < mem_end) {
1937
        stb(mem_ref++, 0);
1938
    }
1939
}
1940

    
1941
void helper_f2xm1(void)
1942
{
1943
    ST0 = pow(2.0,ST0) - 1.0;
1944
}
1945

    
1946
void helper_fyl2x(void)
1947
{
1948
    CPU86_LDouble fptemp;
1949
    
1950
    fptemp = ST0;
1951
    if (fptemp>0.0){
1952
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1953
        ST1 *= fptemp;
1954
        fpop();
1955
    } else { 
1956
        env->fpus &= (~0x4700);
1957
        env->fpus |= 0x400;
1958
    }
1959
}
1960

    
1961
void helper_fptan(void)
1962
{
1963
    CPU86_LDouble fptemp;
1964

    
1965
    fptemp = ST0;
1966
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1967
        env->fpus |= 0x400;
1968
    } else {
1969
        ST0 = tan(fptemp);
1970
        fpush();
1971
        ST0 = 1.0;
1972
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1973
        /* the above code is for  |arg| < 2**52 only */
1974
    }
1975
}
1976

    
1977
void helper_fpatan(void)
1978
{
1979
    CPU86_LDouble fptemp, fpsrcop;
1980

    
1981
    fpsrcop = ST1;
1982
    fptemp = ST0;
1983
    ST1 = atan2(fpsrcop,fptemp);
1984
    fpop();
1985
}
1986

    
1987
void helper_fxtract(void)
1988
{
1989
    CPU86_LDoubleU temp;
1990
    unsigned int expdif;
1991

    
1992
    temp.d = ST0;
1993
    expdif = EXPD(temp) - EXPBIAS;
1994
    /*DP exponent bias*/
1995
    ST0 = expdif;
1996
    fpush();
1997
    BIASEXPONENT(temp);
1998
    ST0 = temp.d;
1999
}
2000

    
2001
void helper_fprem1(void)
2002
{
2003
    CPU86_LDouble dblq, fpsrcop, fptemp;
2004
    CPU86_LDoubleU fpsrcop1, fptemp1;
2005
    int expdif;
2006
    int q;
2007

    
2008
    fpsrcop = ST0;
2009
    fptemp = ST1;
2010
    fpsrcop1.d = fpsrcop;
2011
    fptemp1.d = fptemp;
2012
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2013
    if (expdif < 53) {
2014
        dblq = fpsrcop / fptemp;
2015
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2016
        ST0 = fpsrcop - fptemp*dblq;
2017
        q = (int)dblq; /* cutting off top bits is assumed here */
2018
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2019
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2020
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2021
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2022
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2023
    } else {
2024
        env->fpus |= 0x400;  /* C2 <-- 1 */
2025
        fptemp = pow(2.0, expdif-50);
2026
        fpsrcop = (ST0 / ST1) / fptemp;
2027
        /* fpsrcop = integer obtained by rounding to the nearest */
2028
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2029
            floor(fpsrcop): ceil(fpsrcop);
2030
        ST0 -= (ST1 * fpsrcop * fptemp);
2031
    }
2032
}
2033

    
2034
void helper_fprem(void)
2035
{
2036
    CPU86_LDouble dblq, fpsrcop, fptemp;
2037
    CPU86_LDoubleU fpsrcop1, fptemp1;
2038
    int expdif;
2039
    int q;
2040
    
2041
    fpsrcop = ST0;
2042
    fptemp = ST1;
2043
    fpsrcop1.d = fpsrcop;
2044
    fptemp1.d = fptemp;
2045
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2046
    if ( expdif < 53 ) {
2047
        dblq = fpsrcop / fptemp;
2048
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2049
        ST0 = fpsrcop - fptemp*dblq;
2050
        q = (int)dblq; /* cutting off top bits is assumed here */
2051
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2052
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2053
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2054
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2055
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2056
    } else {
2057
        env->fpus |= 0x400;  /* C2 <-- 1 */
2058
        fptemp = pow(2.0, expdif-50);
2059
        fpsrcop = (ST0 / ST1) / fptemp;
2060
        /* fpsrcop = integer obtained by chopping */
2061
        fpsrcop = (fpsrcop < 0.0)?
2062
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2063
        ST0 -= (ST1 * fpsrcop * fptemp);
2064
    }
2065
}
2066

    
2067
void helper_fyl2xp1(void)
2068
{
2069
    CPU86_LDouble fptemp;
2070

    
2071
    fptemp = ST0;
2072
    if ((fptemp+1.0)>0.0) {
2073
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2074
        ST1 *= fptemp;
2075
        fpop();
2076
    } else { 
2077
        env->fpus &= (~0x4700);
2078
        env->fpus |= 0x400;
2079
    }
2080
}
2081

    
2082
void helper_fsqrt(void)
2083
{
2084
    CPU86_LDouble fptemp;
2085

    
2086
    fptemp = ST0;
2087
    if (fptemp<0.0) { 
2088
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2089
        env->fpus |= 0x400;
2090
    }
2091
    ST0 = sqrt(fptemp);
2092
}
2093

    
2094
void helper_fsincos(void)
2095
{
2096
    CPU86_LDouble fptemp;
2097

    
2098
    fptemp = ST0;
2099
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2100
        env->fpus |= 0x400;
2101
    } else {
2102
        ST0 = sin(fptemp);
2103
        fpush();
2104
        ST0 = cos(fptemp);
2105
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2106
        /* the above code is for  |arg| < 2**63 only */
2107
    }
2108
}
2109

    
2110
void helper_frndint(void)
2111
{
2112
    CPU86_LDouble a;
2113

    
2114
    a = ST0;
2115
#ifdef __arm__
2116
    switch(env->fpuc & RC_MASK) {
2117
    default:
2118
    case RC_NEAR:
2119
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2120
        break;
2121
    case RC_DOWN:
2122
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2123
        break;
2124
    case RC_UP:
2125
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2126
        break;
2127
    case RC_CHOP:
2128
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2129
        break;
2130
    }
2131
#else
2132
    a = rint(a);
2133
#endif
2134
    ST0 = a;
2135
}
2136

    
2137
void helper_fscale(void)
2138
{
2139
    CPU86_LDouble fpsrcop, fptemp;
2140

    
2141
    fpsrcop = 2.0;
2142
    fptemp = pow(fpsrcop,ST1);
2143
    ST0 *= fptemp;
2144
}
2145

    
2146
void helper_fsin(void)
2147
{
2148
    CPU86_LDouble fptemp;
2149

    
2150
    fptemp = ST0;
2151
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2152
        env->fpus |= 0x400;
2153
    } else {
2154
        ST0 = sin(fptemp);
2155
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2156
        /* the above code is for  |arg| < 2**53 only */
2157
    }
2158
}
2159

    
2160
void helper_fcos(void)
2161
{
2162
    CPU86_LDouble fptemp;
2163

    
2164
    fptemp = ST0;
2165
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2166
        env->fpus |= 0x400;
2167
    } else {
2168
        ST0 = cos(fptemp);
2169
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2170
        /* the above code is for  |arg5 < 2**63 only */
2171
    }
2172
}
2173

    
2174
void helper_fxam_ST0(void)
2175
{
2176
    CPU86_LDoubleU temp;
2177
    int expdif;
2178

    
2179
    temp.d = ST0;
2180

    
2181
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2182
    if (SIGND(temp))
2183
        env->fpus |= 0x200; /* C1 <-- 1 */
2184

    
2185
    expdif = EXPD(temp);
2186
    if (expdif == MAXEXPD) {
2187
        if (MANTD(temp) == 0)
2188
            env->fpus |=  0x500 /*Infinity*/;
2189
        else
2190
            env->fpus |=  0x100 /*NaN*/;
2191
    } else if (expdif == 0) {
2192
        if (MANTD(temp) == 0)
2193
            env->fpus |=  0x4000 /*Zero*/;
2194
        else
2195
            env->fpus |= 0x4400 /*Denormal*/;
2196
    } else {
2197
        env->fpus |= 0x400;
2198
    }
2199
}
2200

    
2201
void helper_fstenv(uint8_t *ptr, int data32)
2202
{
2203
    int fpus, fptag, exp, i;
2204
    uint64_t mant;
2205
    CPU86_LDoubleU tmp;
2206

    
2207
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2208
    fptag = 0;
2209
    for (i=7; i>=0; i--) {
2210
        fptag <<= 2;
2211
        if (env->fptags[i]) {
2212
            fptag |= 3;
2213
        } else {
2214
            tmp.d = env->fpregs[i];
2215
            exp = EXPD(tmp);
2216
            mant = MANTD(tmp);
2217
            if (exp == 0 && mant == 0) {
2218
                /* zero */
2219
                fptag |= 1;
2220
            } else if (exp == 0 || exp == MAXEXPD
2221
#ifdef USE_X86LDOUBLE
2222
                       || (mant & (1LL << 63)) == 0
2223
#endif
2224
                       ) {
2225
                /* NaNs, infinity, denormal */
2226
                fptag |= 2;
2227
            }
2228
        }
2229
    }
2230
    if (data32) {
2231
        /* 32 bit */
2232
        stl(ptr, env->fpuc);
2233
        stl(ptr + 4, fpus);
2234
        stl(ptr + 8, fptag);
2235
        stl(ptr + 12, 0);
2236
        stl(ptr + 16, 0);
2237
        stl(ptr + 20, 0);
2238
        stl(ptr + 24, 0);
2239
    } else {
2240
        /* 16 bit */
2241
        stw(ptr, env->fpuc);
2242
        stw(ptr + 2, fpus);
2243
        stw(ptr + 4, fptag);
2244
        stw(ptr + 6, 0);
2245
        stw(ptr + 8, 0);
2246
        stw(ptr + 10, 0);
2247
        stw(ptr + 12, 0);
2248
    }
2249
}
2250

    
2251
void helper_fldenv(uint8_t *ptr, int data32)
2252
{
2253
    int i, fpus, fptag;
2254

    
2255
    if (data32) {
2256
        env->fpuc = lduw(ptr);
2257
        fpus = lduw(ptr + 4);
2258
        fptag = lduw(ptr + 8);
2259
    }
2260
    else {
2261
        env->fpuc = lduw(ptr);
2262
        fpus = lduw(ptr + 2);
2263
        fptag = lduw(ptr + 4);
2264
    }
2265
    env->fpstt = (fpus >> 11) & 7;
2266
    env->fpus = fpus & ~0x3800;
2267
    for(i = 0;i < 7; i++) {
2268
        env->fptags[i] = ((fptag & 3) == 3);
2269
        fptag >>= 2;
2270
    }
2271
}
2272

    
2273
void helper_fsave(uint8_t *ptr, int data32)
2274
{
2275
    CPU86_LDouble tmp;
2276
    int i;
2277

    
2278
    helper_fstenv(ptr, data32);
2279

    
2280
    ptr += (14 << data32);
2281
    for(i = 0;i < 8; i++) {
2282
        tmp = ST(i);
2283
        helper_fstt(tmp, ptr);
2284
        ptr += 10;
2285
    }
2286

    
2287
    /* fninit */
2288
    env->fpus = 0;
2289
    env->fpstt = 0;
2290
    env->fpuc = 0x37f;
2291
    env->fptags[0] = 1;
2292
    env->fptags[1] = 1;
2293
    env->fptags[2] = 1;
2294
    env->fptags[3] = 1;
2295
    env->fptags[4] = 1;
2296
    env->fptags[5] = 1;
2297
    env->fptags[6] = 1;
2298
    env->fptags[7] = 1;
2299
}
2300

    
2301
void helper_frstor(uint8_t *ptr, int data32)
2302
{
2303
    CPU86_LDouble tmp;
2304
    int i;
2305

    
2306
    helper_fldenv(ptr, data32);
2307
    ptr += (14 << data32);
2308

    
2309
    for(i = 0;i < 8; i++) {
2310
        tmp = helper_fldt(ptr);
2311
        ST(i) = tmp;
2312
        ptr += 10;
2313
    }
2314
}
2315

    
2316
#if !defined(CONFIG_USER_ONLY) 
2317

    
2318
#define MMUSUFFIX _mmu
2319
#define GETPC() (__builtin_return_address(0))
2320

    
2321
#define SHIFT 0
2322
#include "softmmu_template.h"
2323

    
2324
#define SHIFT 1
2325
#include "softmmu_template.h"
2326

    
2327
#define SHIFT 2
2328
#include "softmmu_template.h"
2329

    
2330
#define SHIFT 3
2331
#include "softmmu_template.h"
2332

    
2333
#endif
2334

    
2335
/* try to fill the TLB and return an exception if error. If retaddr is
2336
   NULL, it means that the function was called in C code (i.e. not
2337
   from generated code or from helper.c) */
2338
/* XXX: fix it to restore all registers */
2339
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2340
{
2341
    TranslationBlock *tb;
2342
    int ret;
2343
    unsigned long pc;
2344
    CPUX86State *saved_env;
2345

    
2346
    /* XXX: hack to restore env in all cases, even if not called from
2347
       generated code */
2348
    saved_env = env;
2349
    env = cpu_single_env;
2350
    if (is_write && page_unprotect(addr)) {
2351
        /* nothing more to do: the page was write protected because
2352
           there was code in it. page_unprotect() flushed the code. */
2353
    }
2354

    
2355
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2356
    if (ret) {
2357
        if (retaddr) {
2358
            /* now we have a real cpu fault */
2359
            pc = (unsigned long)retaddr;
2360
            tb = tb_find_pc(pc);
2361
            if (tb) {
2362
                /* the PC is inside the translated code. It means that we have
2363
                   a virtual CPU fault */
2364
                cpu_restore_state(tb, env, pc);
2365
            }
2366
        }
2367
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2368
    }
2369
    env = saved_env;
2370
}