Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 9951bf39

History | View | Annotate | Download (48.8 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
const uint8_t parity_table[256] = {
23
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
};
56

    
57
/* modulo 17 table */
58
const uint8_t rclw_table[32] = {
59
    0, 1, 2, 3, 4, 5, 6, 7, 
60
    8, 9,10,11,12,13,14,15,
61
   16, 0, 1, 2, 3, 4, 5, 6,
62
    7, 8, 9,10,11,12,13,14,
63
};
64

    
65
/* modulo 9 table */
66
const uint8_t rclb_table[32] = {
67
    0, 1, 2, 3, 4, 5, 6, 7, 
68
    8, 0, 1, 2, 3, 4, 5, 6,
69
    7, 8, 0, 1, 2, 3, 4, 5, 
70
    6, 7, 8, 0, 1, 2, 3, 4,
71
};
72

    
73
const CPU86_LDouble f15rk[7] =
74
{
75
    0.00000000000000000000L,
76
    1.00000000000000000000L,
77
    3.14159265358979323851L,  /*pi*/
78
    0.30102999566398119523L,  /*lg2*/
79
    0.69314718055994530943L,  /*ln2*/
80
    1.44269504088896340739L,  /*l2e*/
81
    3.32192809488736234781L,  /*l2t*/
82
};
83
    
84
/* thread support */
85

    
86
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87

    
88
void cpu_lock(void)
89
{
90
    spin_lock(&global_cpu_lock);
91
}
92

    
93
void cpu_unlock(void)
94
{
95
    spin_unlock(&global_cpu_lock);
96
}
97

    
98
void cpu_loop_exit(void)
99
{
100
    /* NOTE: the register at this point must be saved by hand because
101
       longjmp restore them */
102
#ifdef reg_EAX
103
    env->regs[R_EAX] = EAX;
104
#endif
105
#ifdef reg_ECX
106
    env->regs[R_ECX] = ECX;
107
#endif
108
#ifdef reg_EDX
109
    env->regs[R_EDX] = EDX;
110
#endif
111
#ifdef reg_EBX
112
    env->regs[R_EBX] = EBX;
113
#endif
114
#ifdef reg_ESP
115
    env->regs[R_ESP] = ESP;
116
#endif
117
#ifdef reg_EBP
118
    env->regs[R_EBP] = EBP;
119
#endif
120
#ifdef reg_ESI
121
    env->regs[R_ESI] = ESI;
122
#endif
123
#ifdef reg_EDI
124
    env->regs[R_EDI] = EDI;
125
#endif
126
    longjmp(env->jmp_env, 1);
127
}
128

    
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
134
#if 0
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

    
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw_kernel(env->tr.base + index);
157
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl_kernel(env->tr.base + index);
160
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
161
    }
162
}
163

    
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
167
{
168
    SegmentCache *dt;
169
    int index;
170
    uint8_t *ptr;
171

    
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl_kernel(ptr);
181
    *e2_ptr = ldl_kernel(ptr + 4);
182
    return 0;
183
}
184
                                     
185
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
186
{
187
    unsigned int limit;
188
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
189
    if (e2 & DESC_G_MASK)
190
        limit = (limit << 12) | 0xfff;
191
    return limit;
192
}
193

    
194
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
195
{
196
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
197
}
198

    
199
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
200
{
201
    sc->base = get_seg_base(e1, e2);
202
    sc->limit = get_seg_limit(e1, e2);
203
    sc->flags = e2;
204
}
205

    
206
/* init the segment cache in vm86 mode. */
207
static inline void load_seg_vm(int seg, int selector)
208
{
209
    selector &= 0xffff;
210
    cpu_x86_load_seg_cache(env, seg, selector, 
211
                           (uint8_t *)(selector << 4), 0xffff, 0);
212
}
213

    
214
/* protected mode interrupt */
215
static void do_interrupt_protected(int intno, int is_int, int error_code,
216
                                   unsigned int next_eip, int is_hw)
217
{
218
    SegmentCache *dt;
219
    uint8_t *ptr, *ssp;
220
    int type, dpl, selector, ss_dpl, cpl;
221
    int has_error_code, new_stack, shift;
222
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
223
    uint32_t old_cs, old_ss, old_esp, old_eip;
224

    
225
    dt = &env->idt;
226
    if (intno * 8 + 7 > dt->limit)
227
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
228
    ptr = dt->base + intno * 8;
229
    e1 = ldl_kernel(ptr);
230
    e2 = ldl_kernel(ptr + 4);
231
    /* check gate type */
232
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
233
    switch(type) {
234
    case 5: /* task gate */
235
        cpu_abort(env, "task gate not supported");
236
        break;
237
    case 6: /* 286 interrupt gate */
238
    case 7: /* 286 trap gate */
239
    case 14: /* 386 interrupt gate */
240
    case 15: /* 386 trap gate */
241
        break;
242
    default:
243
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
244
        break;
245
    }
246
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
247
    cpl = env->hflags & HF_CPL_MASK;
248
    /* check privledge if software int */
249
    if (is_int && dpl < cpl)
250
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
251
    /* check valid bit */
252
    if (!(e2 & DESC_P_MASK))
253
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
254
    selector = e1 >> 16;
255
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
256
    if ((selector & 0xfffc) == 0)
257
        raise_exception_err(EXCP0D_GPF, 0);
258

    
259
    if (load_segment(&e1, &e2, selector) != 0)
260
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
261
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
262
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
263
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
264
    if (dpl > cpl)
265
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
266
    if (!(e2 & DESC_P_MASK))
267
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
268
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
269
        /* to inner priviledge */
270
        get_ss_esp_from_tss(&ss, &esp, dpl);
271
        if ((ss & 0xfffc) == 0)
272
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
273
        if ((ss & 3) != dpl)
274
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
275
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
276
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
277
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
278
        if (ss_dpl != dpl)
279
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
280
        if (!(ss_e2 & DESC_S_MASK) ||
281
            (ss_e2 & DESC_CS_MASK) ||
282
            !(ss_e2 & DESC_W_MASK))
283
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
284
        if (!(ss_e2 & DESC_P_MASK))
285
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
286
        new_stack = 1;
287
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
288
        /* to same priviledge */
289
        new_stack = 0;
290
    } else {
291
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
292
        new_stack = 0; /* avoid warning */
293
    }
294

    
295
    shift = type >> 3;
296
    has_error_code = 0;
297
    if (!is_int && !is_hw) {
298
        switch(intno) {
299
        case 8:
300
        case 10:
301
        case 11:
302
        case 12:
303
        case 13:
304
        case 14:
305
        case 17:
306
            has_error_code = 1;
307
            break;
308
        }
309
    }
310
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
311
    if (env->eflags & VM_MASK)
312
        push_size += 8;
313
    push_size <<= shift;
314

    
315
    /* XXX: check that enough room is available */
316
    if (new_stack) {
317
        old_esp = ESP;
318
        old_ss = env->segs[R_SS].selector;
319
        ss = (ss & ~3) | dpl;
320
        cpu_x86_load_seg_cache(env, R_SS, ss, 
321
                       get_seg_base(ss_e1, ss_e2),
322
                       get_seg_limit(ss_e1, ss_e2),
323
                       ss_e2);
324
    } else {
325
        old_esp = 0;
326
        old_ss = 0;
327
        esp = ESP;
328
    }
329
    if (is_int)
330
        old_eip = next_eip;
331
    else
332
        old_eip = env->eip;
333
    old_cs = env->segs[R_CS].selector;
334
    selector = (selector & ~3) | dpl;
335
    cpu_x86_load_seg_cache(env, R_CS, selector, 
336
                   get_seg_base(e1, e2),
337
                   get_seg_limit(e1, e2),
338
                   e2);
339
    cpu_x86_set_cpl(env, dpl);
340
    env->eip = offset;
341
    ESP = esp - push_size;
342
    ssp = env->segs[R_SS].base + esp;
343
    if (shift == 1) {
344
        int old_eflags;
345
        if (env->eflags & VM_MASK) {
346
            ssp -= 4;
347
            stl_kernel(ssp, env->segs[R_GS].selector);
348
            ssp -= 4;
349
            stl_kernel(ssp, env->segs[R_FS].selector);
350
            ssp -= 4;
351
            stl_kernel(ssp, env->segs[R_DS].selector);
352
            ssp -= 4;
353
            stl_kernel(ssp, env->segs[R_ES].selector);
354
        }
355
        if (new_stack) {
356
            ssp -= 4;
357
            stl_kernel(ssp, old_ss);
358
            ssp -= 4;
359
            stl_kernel(ssp, old_esp);
360
        }
361
        ssp -= 4;
362
        old_eflags = compute_eflags();
363
        stl_kernel(ssp, old_eflags);
364
        ssp -= 4;
365
        stl_kernel(ssp, old_cs);
366
        ssp -= 4;
367
        stl_kernel(ssp, old_eip);
368
        if (has_error_code) {
369
            ssp -= 4;
370
            stl_kernel(ssp, error_code);
371
        }
372
    } else {
373
        if (new_stack) {
374
            ssp -= 2;
375
            stw_kernel(ssp, old_ss);
376
            ssp -= 2;
377
            stw_kernel(ssp, old_esp);
378
        }
379
        ssp -= 2;
380
        stw_kernel(ssp, compute_eflags());
381
        ssp -= 2;
382
        stw_kernel(ssp, old_cs);
383
        ssp -= 2;
384
        stw_kernel(ssp, old_eip);
385
        if (has_error_code) {
386
            ssp -= 2;
387
            stw_kernel(ssp, error_code);
388
        }
389
    }
390
    
391
    /* interrupt gate clear IF mask */
392
    if ((type & 1) == 0) {
393
        env->eflags &= ~IF_MASK;
394
    }
395
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
396
}
397

    
398
/* real mode interrupt */
399
static void do_interrupt_real(int intno, int is_int, int error_code,
400
                                 unsigned int next_eip)
401
{
402
    SegmentCache *dt;
403
    uint8_t *ptr, *ssp;
404
    int selector;
405
    uint32_t offset, esp;
406
    uint32_t old_cs, old_eip;
407

    
408
    /* real mode (simpler !) */
409
    dt = &env->idt;
410
    if (intno * 4 + 3 > dt->limit)
411
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
412
    ptr = dt->base + intno * 4;
413
    offset = lduw_kernel(ptr);
414
    selector = lduw_kernel(ptr + 2);
415
    esp = ESP;
416
    ssp = env->segs[R_SS].base;
417
    if (is_int)
418
        old_eip = next_eip;
419
    else
420
        old_eip = env->eip;
421
    old_cs = env->segs[R_CS].selector;
422
    esp -= 2;
423
    stw_kernel(ssp + (esp & 0xffff), compute_eflags());
424
    esp -= 2;
425
    stw_kernel(ssp + (esp & 0xffff), old_cs);
426
    esp -= 2;
427
    stw_kernel(ssp + (esp & 0xffff), old_eip);
428
    
429
    /* update processor state */
430
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
431
    env->eip = offset;
432
    env->segs[R_CS].selector = selector;
433
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
434
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
435
}
436

    
437
/* fake user mode interrupt */
438
void do_interrupt_user(int intno, int is_int, int error_code, 
439
                       unsigned int next_eip)
440
{
441
    SegmentCache *dt;
442
    uint8_t *ptr;
443
    int dpl, cpl;
444
    uint32_t e2;
445

    
446
    dt = &env->idt;
447
    ptr = dt->base + (intno * 8);
448
    e2 = ldl_kernel(ptr + 4);
449
    
450
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
451
    cpl = env->hflags & HF_CPL_MASK;
452
    /* check privledge if software int */
453
    if (is_int && dpl < cpl)
454
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
455

    
456
    /* Since we emulate only user space, we cannot do more than
457
       exiting the emulation with the suitable exception and error
458
       code */
459
    if (is_int)
460
        EIP = next_eip;
461
}
462

    
463
/*
464
 * Begin excution of an interruption. is_int is TRUE if coming from
465
 * the int instruction. next_eip is the EIP value AFTER the interrupt
466
 * instruction. It is only relevant if is_int is TRUE.  
467
 */
468
void do_interrupt(int intno, int is_int, int error_code, 
469
                  unsigned int next_eip, int is_hw)
470
{
471
    if (env->cr[0] & CR0_PE_MASK) {
472
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
473
    } else {
474
        do_interrupt_real(intno, is_int, error_code, next_eip);
475
    }
476
}
477

    
478
/*
479
 * Signal an interruption. It is executed in the main CPU loop.
480
 * is_int is TRUE if coming from the int instruction. next_eip is the
481
 * EIP value AFTER the interrupt instruction. It is only relevant if
482
 * is_int is TRUE.  
483
 */
484
void raise_interrupt(int intno, int is_int, int error_code, 
485
                     unsigned int next_eip)
486
{
487
    env->exception_index = intno;
488
    env->error_code = error_code;
489
    env->exception_is_int = is_int;
490
    env->exception_next_eip = next_eip;
491
    cpu_loop_exit();
492
}
493

    
494
/* shortcuts to generate exceptions */
495
void raise_exception_err(int exception_index, int error_code)
496
{
497
    raise_interrupt(exception_index, 0, error_code, 0);
498
}
499

    
500
void raise_exception(int exception_index)
501
{
502
    raise_interrupt(exception_index, 0, 0, 0);
503
}
504

    
505
#ifdef BUGGY_GCC_DIV64
506
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
507
   call it from another function */
508
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
509
{
510
    *q_ptr = num / den;
511
    return num % den;
512
}
513

    
514
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
515
{
516
    *q_ptr = num / den;
517
    return num % den;
518
}
519
#endif
520

    
521
void helper_divl_EAX_T0(uint32_t eip)
522
{
523
    unsigned int den, q, r;
524
    uint64_t num;
525
    
526
    num = EAX | ((uint64_t)EDX << 32);
527
    den = T0;
528
    if (den == 0) {
529
        EIP = eip;
530
        raise_exception(EXCP00_DIVZ);
531
    }
532
#ifdef BUGGY_GCC_DIV64
533
    r = div64(&q, num, den);
534
#else
535
    q = (num / den);
536
    r = (num % den);
537
#endif
538
    EAX = q;
539
    EDX = r;
540
}
541

    
542
void helper_idivl_EAX_T0(uint32_t eip)
543
{
544
    int den, q, r;
545
    int64_t num;
546
    
547
    num = EAX | ((uint64_t)EDX << 32);
548
    den = T0;
549
    if (den == 0) {
550
        EIP = eip;
551
        raise_exception(EXCP00_DIVZ);
552
    }
553
#ifdef BUGGY_GCC_DIV64
554
    r = idiv64(&q, num, den);
555
#else
556
    q = (num / den);
557
    r = (num % den);
558
#endif
559
    EAX = q;
560
    EDX = r;
561
}
562

    
563
void helper_cmpxchg8b(void)
564
{
565
    uint64_t d;
566
    int eflags;
567

    
568
    eflags = cc_table[CC_OP].compute_all();
569
    d = ldq((uint8_t *)A0);
570
    if (d == (((uint64_t)EDX << 32) | EAX)) {
571
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
572
        eflags |= CC_Z;
573
    } else {
574
        EDX = d >> 32;
575
        EAX = d;
576
        eflags &= ~CC_Z;
577
    }
578
    CC_SRC = eflags;
579
}
580

    
581
/* We simulate a pre-MMX pentium as in valgrind */
582
#define CPUID_FP87 (1 << 0)
583
#define CPUID_VME  (1 << 1)
584
#define CPUID_DE   (1 << 2)
585
#define CPUID_PSE  (1 << 3)
586
#define CPUID_TSC  (1 << 4)
587
#define CPUID_MSR  (1 << 5)
588
#define CPUID_PAE  (1 << 6)
589
#define CPUID_MCE  (1 << 7)
590
#define CPUID_CX8  (1 << 8)
591
#define CPUID_APIC (1 << 9)
592
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
593
#define CPUID_MTRR (1 << 12)
594
#define CPUID_PGE  (1 << 13)
595
#define CPUID_MCA  (1 << 14)
596
#define CPUID_CMOV (1 << 15)
597
/* ... */
598
#define CPUID_MMX  (1 << 23)
599
#define CPUID_FXSR (1 << 24)
600
#define CPUID_SSE  (1 << 25)
601
#define CPUID_SSE2 (1 << 26)
602

    
603
void helper_cpuid(void)
604
{
605
    if (EAX == 0) {
606
        EAX = 1; /* max EAX index supported */
607
        EBX = 0x756e6547;
608
        ECX = 0x6c65746e;
609
        EDX = 0x49656e69;
610
    } else if (EAX == 1) {
611
        int family, model, stepping;
612
        /* EAX = 1 info */
613
#if 0
614
        /* pentium 75-200 */
615
        family = 5;
616
        model = 2;
617
        stepping = 11;
618
#else
619
        /* pentium pro */
620
        family = 6;
621
        model = 1;
622
        stepping = 3;
623
#endif
624
        EAX = (family << 8) | (model << 4) | stepping;
625
        EBX = 0;
626
        ECX = 0;
627
        EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
628
            CPUID_TSC | CPUID_MSR | CPUID_MCE |
629
            CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
630
    }
631
}
632

    
633
void helper_lldt_T0(void)
634
{
635
    int selector;
636
    SegmentCache *dt;
637
    uint32_t e1, e2;
638
    int index;
639
    uint8_t *ptr;
640
    
641
    selector = T0 & 0xffff;
642
    if ((selector & 0xfffc) == 0) {
643
        /* XXX: NULL selector case: invalid LDT */
644
        env->ldt.base = NULL;
645
        env->ldt.limit = 0;
646
    } else {
647
        if (selector & 0x4)
648
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
649
        dt = &env->gdt;
650
        index = selector & ~7;
651
        if ((index + 7) > dt->limit)
652
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
653
        ptr = dt->base + index;
654
        e1 = ldl_kernel(ptr);
655
        e2 = ldl_kernel(ptr + 4);
656
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
657
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658
        if (!(e2 & DESC_P_MASK))
659
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
660
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
661
    }
662
    env->ldt.selector = selector;
663
}
664

    
665
void helper_ltr_T0(void)
666
{
667
    int selector;
668
    SegmentCache *dt;
669
    uint32_t e1, e2;
670
    int index, type;
671
    uint8_t *ptr;
672
    
673
    selector = T0 & 0xffff;
674
    if ((selector & 0xfffc) == 0) {
675
        /* NULL selector case: invalid LDT */
676
        env->tr.base = NULL;
677
        env->tr.limit = 0;
678
        env->tr.flags = 0;
679
    } else {
680
        if (selector & 0x4)
681
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
682
        dt = &env->gdt;
683
        index = selector & ~7;
684
        if ((index + 7) > dt->limit)
685
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
        ptr = dt->base + index;
687
        e1 = ldl_kernel(ptr);
688
        e2 = ldl_kernel(ptr + 4);
689
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
690
        if ((e2 & DESC_S_MASK) || 
691
            (type != 2 && type != 9))
692
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
693
        if (!(e2 & DESC_P_MASK))
694
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
695
        load_seg_cache_raw_dt(&env->tr, e1, e2);
696
        e2 |= 0x00000200; /* set the busy bit */
697
        stl_kernel(ptr + 4, e2);
698
    }
699
    env->tr.selector = selector;
700
}
701

    
702
/* only works if protected mode and not VM86. Calling load_seg with
703
   seg_reg == R_CS is discouraged */
704
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
705
{
706
    uint32_t e1, e2;
707
    
708
    if ((selector & 0xfffc) == 0) {
709
        /* null selector case */
710
        if (seg_reg == R_SS) {
711
            EIP = cur_eip;
712
            raise_exception_err(EXCP0D_GPF, 0);
713
        } else {
714
            cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
715
        }
716
    } else {
717
        if (load_segment(&e1, &e2, selector) != 0) {
718
            EIP = cur_eip;
719
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
720
        }
721
        if (!(e2 & DESC_S_MASK) ||
722
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
723
            EIP = cur_eip;
724
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        }
726

    
727
        if (seg_reg == R_SS) {
728
            if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
729
                EIP = cur_eip;
730
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731
            }
732
        } else {
733
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
734
                EIP = cur_eip;
735
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
            }
737
        }
738

    
739
        if (!(e2 & DESC_P_MASK)) {
740
            EIP = cur_eip;
741
            if (seg_reg == R_SS)
742
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
743
            else
744
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
745
        }
746
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
747
                       get_seg_base(e1, e2),
748
                       get_seg_limit(e1, e2),
749
                       e2);
750
#if 0
751
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
752
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
753
#endif
754
    }
755
}
756

    
757
/* protected mode jump */
758
void helper_ljmp_protected_T0_T1(void)
759
{
760
    int new_cs, new_eip;
761
    uint32_t e1, e2, cpl, dpl, rpl, limit;
762

    
763
    new_cs = T0;
764
    new_eip = T1;
765
    if ((new_cs & 0xfffc) == 0)
766
        raise_exception_err(EXCP0D_GPF, 0);
767
    if (load_segment(&e1, &e2, new_cs) != 0)
768
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769
    cpl = env->hflags & HF_CPL_MASK;
770
    if (e2 & DESC_S_MASK) {
771
        if (!(e2 & DESC_CS_MASK))
772
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
773
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
774
        if (e2 & DESC_CS_MASK) {
775
            /* conforming code segment */
776
            if (dpl > cpl)
777
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
778
        } else {
779
            /* non conforming code segment */
780
            rpl = new_cs & 3;
781
            if (rpl > cpl)
782
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
783
            if (dpl != cpl)
784
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
785
        }
786
        if (!(e2 & DESC_P_MASK))
787
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
788
        limit = get_seg_limit(e1, e2);
789
        if (new_eip > limit)
790
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
791
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
792
                       get_seg_base(e1, e2), limit, e2);
793
        EIP = new_eip;
794
    } else {
795
        cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 
796
                  new_cs, new_eip);
797
    }
798
}
799

    
800
/* real mode call */
801
void helper_lcall_real_T0_T1(int shift, int next_eip)
802
{
803
    int new_cs, new_eip;
804
    uint32_t esp, esp_mask;
805
    uint8_t *ssp;
806

    
807
    new_cs = T0;
808
    new_eip = T1;
809
    esp = ESP;
810
    esp_mask = 0xffffffff;
811
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
812
        esp_mask = 0xffff;
813
    ssp = env->segs[R_SS].base;
814
    if (shift) {
815
        esp -= 4;
816
        stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
817
        esp -= 4;
818
        stl_kernel(ssp + (esp & esp_mask), next_eip);
819
    } else {
820
        esp -= 2;
821
        stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
822
        esp -= 2;
823
        stw_kernel(ssp + (esp & esp_mask), next_eip);
824
    }
825

    
826
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
827
        ESP = (ESP & ~0xffff) | (esp & 0xffff);
828
    else
829
        ESP = esp;
830
    env->eip = new_eip;
831
    env->segs[R_CS].selector = new_cs;
832
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
833
}
834

    
835
/* protected mode call */
836
void helper_lcall_protected_T0_T1(int shift, int next_eip)
837
{
838
    int new_cs, new_eip;
839
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
840
    uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
841
    uint32_t old_ss, old_esp, val, i, limit;
842
    uint8_t *ssp, *old_ssp;
843
    
844
    new_cs = T0;
845
    new_eip = T1;
846
    if ((new_cs & 0xfffc) == 0)
847
        raise_exception_err(EXCP0D_GPF, 0);
848
    if (load_segment(&e1, &e2, new_cs) != 0)
849
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
850
    cpl = env->hflags & HF_CPL_MASK;
851
    if (e2 & DESC_S_MASK) {
852
        if (!(e2 & DESC_CS_MASK))
853
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
854
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
855
        if (e2 & DESC_CS_MASK) {
856
            /* conforming code segment */
857
            if (dpl > cpl)
858
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
859
        } else {
860
            /* non conforming code segment */
861
            rpl = new_cs & 3;
862
            if (rpl > cpl)
863
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
864
            if (dpl != cpl)
865
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
866
        }
867
        if (!(e2 & DESC_P_MASK))
868
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
869

    
870
        sp = ESP;
871
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
872
            sp &= 0xffff;
873
        ssp = env->segs[R_SS].base + sp;
874
        if (shift) {
875
            ssp -= 4;
876
            stl_kernel(ssp, env->segs[R_CS].selector);
877
            ssp -= 4;
878
            stl_kernel(ssp, next_eip);
879
        } else {
880
            ssp -= 2;
881
            stw_kernel(ssp, env->segs[R_CS].selector);
882
            ssp -= 2;
883
            stw_kernel(ssp, next_eip);
884
        }
885
        sp -= (4 << shift);
886
        
887
        limit = get_seg_limit(e1, e2);
888
        if (new_eip > limit)
889
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
890
        /* from this point, not restartable */
891
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
892
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
893
        else
894
            ESP = sp;
895
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
896
                       get_seg_base(e1, e2), limit, e2);
897
        EIP = new_eip;
898
    } else {
899
        /* check gate type */
900
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
901
        switch(type) {
902
        case 1: /* available 286 TSS */
903
        case 9: /* available 386 TSS */
904
        case 5: /* task gate */
905
            cpu_abort(env, "task gate not supported");
906
            break;
907
        case 4: /* 286 call gate */
908
        case 12: /* 386 call gate */
909
            break;
910
        default:
911
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
912
            break;
913
        }
914
        shift = type >> 3;
915

    
916
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
        rpl = new_cs & 3;
918
        if (dpl < cpl || dpl < rpl)
919
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
920
        /* check valid bit */
921
        if (!(e2 & DESC_P_MASK))
922
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
923
        selector = e1 >> 16;
924
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925
        if ((selector & 0xfffc) == 0)
926
            raise_exception_err(EXCP0D_GPF, 0);
927

    
928
        if (load_segment(&e1, &e2, selector) != 0)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
931
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
933
        if (dpl > cpl)
934
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935
        if (!(e2 & DESC_P_MASK))
936
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
937

    
938
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
939
            /* to inner priviledge */
940
            get_ss_esp_from_tss(&ss, &sp, dpl);
941
            if ((ss & 0xfffc) == 0)
942
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
943
            if ((ss & 3) != dpl)
944
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
945
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
946
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
947
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
948
            if (ss_dpl != dpl)
949
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950
            if (!(ss_e2 & DESC_S_MASK) ||
951
                (ss_e2 & DESC_CS_MASK) ||
952
                !(ss_e2 & DESC_W_MASK))
953
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
954
            if (!(ss_e2 & DESC_P_MASK))
955
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
956
            
957
            param_count = e2 & 0x1f;
958
            push_size = ((param_count * 2) + 8) << shift;
959

    
960
            old_esp = ESP;
961
            old_ss = env->segs[R_SS].selector;
962
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
963
                old_esp &= 0xffff;
964
            old_ssp = env->segs[R_SS].base + old_esp;
965
            
966
            /* XXX: from this point not restartable */
967
            ss = (ss & ~3) | dpl;
968
            cpu_x86_load_seg_cache(env, R_SS, ss, 
969
                           get_seg_base(ss_e1, ss_e2),
970
                           get_seg_limit(ss_e1, ss_e2),
971
                           ss_e2);
972

    
973
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
974
                sp &= 0xffff;
975
            ssp = env->segs[R_SS].base + sp;
976
            if (shift) {
977
                ssp -= 4;
978
                stl_kernel(ssp, old_ss);
979
                ssp -= 4;
980
                stl_kernel(ssp, old_esp);
981
                ssp -= 4 * param_count;
982
                for(i = 0; i < param_count; i++) {
983
                    val = ldl_kernel(old_ssp + i * 4);
984
                    stl_kernel(ssp + i * 4, val);
985
                }
986
            } else {
987
                ssp -= 2;
988
                stw_kernel(ssp, old_ss);
989
                ssp -= 2;
990
                stw_kernel(ssp, old_esp);
991
                ssp -= 2 * param_count;
992
                for(i = 0; i < param_count; i++) {
993
                    val = lduw_kernel(old_ssp + i * 2);
994
                    stw_kernel(ssp + i * 2, val);
995
                }
996
            }
997
        } else {
998
            /* to same priviledge */
999
            if (!(env->segs[R_SS].flags & DESC_B_MASK))
1000
                sp &= 0xffff;
1001
            ssp = env->segs[R_SS].base + sp;
1002
            push_size = (4 << shift);
1003
        }
1004

    
1005
        if (shift) {
1006
            ssp -= 4;
1007
            stl_kernel(ssp, env->segs[R_CS].selector);
1008
            ssp -= 4;
1009
            stl_kernel(ssp, next_eip);
1010
        } else {
1011
            ssp -= 2;
1012
            stw_kernel(ssp, env->segs[R_CS].selector);
1013
            ssp -= 2;
1014
            stw_kernel(ssp, next_eip);
1015
        }
1016

    
1017
        sp -= push_size;
1018
        selector = (selector & ~3) | dpl;
1019
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1020
                       get_seg_base(e1, e2),
1021
                       get_seg_limit(e1, e2),
1022
                       e2);
1023
        cpu_x86_set_cpl(env, dpl);
1024
        
1025
        /* from this point, not restartable if same priviledge */
1026
        if (!(env->segs[R_SS].flags & DESC_B_MASK))
1027
            ESP = (ESP & 0xffff0000) | (sp & 0xffff);
1028
        else
1029
            ESP = sp;
1030
        EIP = offset;
1031
    }
1032
}
1033

    
1034
/* real mode iret */
1035
void helper_iret_real(int shift)
1036
{
1037
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1038
    uint8_t *ssp;
1039
    int eflags_mask;
1040
    
1041
    sp = ESP & 0xffff;
1042
    ssp = env->segs[R_SS].base + sp;
1043
    if (shift == 1) {
1044
        /* 32 bits */
1045
        new_eflags = ldl_kernel(ssp + 8);
1046
        new_cs = ldl_kernel(ssp + 4) & 0xffff;
1047
        new_eip = ldl_kernel(ssp) & 0xffff;
1048
    } else {
1049
        /* 16 bits */
1050
        new_eflags = lduw_kernel(ssp + 4);
1051
        new_cs = lduw_kernel(ssp + 2);
1052
        new_eip = lduw_kernel(ssp);
1053
    }
1054
    new_esp = sp + (6 << shift);
1055
    ESP = (ESP & 0xffff0000) | 
1056
        (new_esp & 0xffff);
1057
    load_seg_vm(R_CS, new_cs);
1058
    env->eip = new_eip;
1059
    eflags_mask = FL_UPDATE_CPL0_MASK;
1060
    if (shift == 0)
1061
        eflags_mask &= 0xffff;
1062
    load_eflags(new_eflags, eflags_mask);
1063
}
1064

    
1065
/* protected mode iret */
1066
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1067
{
1068
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1069
    uint32_t new_es, new_ds, new_fs, new_gs;
1070
    uint32_t e1, e2, ss_e1, ss_e2;
1071
    int cpl, dpl, rpl, eflags_mask;
1072
    uint8_t *ssp;
1073
    
1074
    sp = ESP;
1075
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
1076
        sp &= 0xffff;
1077
    ssp = env->segs[R_SS].base + sp;
1078
    if (shift == 1) {
1079
        /* 32 bits */
1080
        if (is_iret)
1081
            new_eflags = ldl_kernel(ssp + 8);
1082
        new_cs = ldl_kernel(ssp + 4) & 0xffff;
1083
        new_eip = ldl_kernel(ssp);
1084
        if (is_iret && (new_eflags & VM_MASK))
1085
            goto return_to_vm86;
1086
    } else {
1087
        /* 16 bits */
1088
        if (is_iret)
1089
            new_eflags = lduw_kernel(ssp + 4);
1090
        new_cs = lduw_kernel(ssp + 2);
1091
        new_eip = lduw_kernel(ssp);
1092
    }
1093
    if ((new_cs & 0xfffc) == 0)
1094
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1095
    if (load_segment(&e1, &e2, new_cs) != 0)
1096
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1097
    if (!(e2 & DESC_S_MASK) ||
1098
        !(e2 & DESC_CS_MASK))
1099
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1100
    cpl = env->hflags & HF_CPL_MASK;
1101
    rpl = new_cs & 3; 
1102
    if (rpl < cpl)
1103
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1104
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1105
    if (e2 & DESC_CS_MASK) {
1106
        if (dpl > rpl)
1107
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1108
    } else {
1109
        if (dpl != rpl)
1110
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1111
    }
1112
    if (!(e2 & DESC_P_MASK))
1113
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1114
    
1115
    if (rpl == cpl) {
1116
        /* return to same priledge level */
1117
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1118
                       get_seg_base(e1, e2),
1119
                       get_seg_limit(e1, e2),
1120
                       e2);
1121
        new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1122
    } else {
1123
        /* return to different priviledge level */
1124
        ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1125
        if (shift == 1) {
1126
            /* 32 bits */
1127
            new_esp = ldl_kernel(ssp);
1128
            new_ss = ldl_kernel(ssp + 4) & 0xffff;
1129
        } else {
1130
            /* 16 bits */
1131
            new_esp = lduw_kernel(ssp);
1132
            new_ss = lduw_kernel(ssp + 2);
1133
        }
1134
        
1135
        if ((new_ss & 3) != rpl)
1136
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1137
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1138
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1139
        if (!(ss_e2 & DESC_S_MASK) ||
1140
            (ss_e2 & DESC_CS_MASK) ||
1141
            !(ss_e2 & DESC_W_MASK))
1142
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1143
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1144
        if (dpl != rpl)
1145
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1146
        if (!(ss_e2 & DESC_P_MASK))
1147
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1148

    
1149
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1150
                       get_seg_base(e1, e2),
1151
                       get_seg_limit(e1, e2),
1152
                       e2);
1153
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1154
                       get_seg_base(ss_e1, ss_e2),
1155
                       get_seg_limit(ss_e1, ss_e2),
1156
                       ss_e2);
1157
        cpu_x86_set_cpl(env, rpl);
1158
    }
1159
    if (env->segs[R_SS].flags & DESC_B_MASK)
1160
        ESP = new_esp;
1161
    else
1162
        ESP = (ESP & 0xffff0000) | 
1163
            (new_esp & 0xffff);
1164
    env->eip = new_eip;
1165
    if (is_iret) {
1166
        /* NOTE: 'cpl' can be different from the current CPL */
1167
        if (cpl == 0)
1168
            eflags_mask = FL_UPDATE_CPL0_MASK;
1169
        else
1170
            eflags_mask = FL_UPDATE_MASK32;
1171
        if (shift == 0)
1172
            eflags_mask &= 0xffff;
1173
        load_eflags(new_eflags, eflags_mask);
1174
    }
1175
    return;
1176

    
1177
 return_to_vm86:
1178
    new_esp = ldl_kernel(ssp + 12);
1179
    new_ss = ldl_kernel(ssp + 16);
1180
    new_es = ldl_kernel(ssp + 20);
1181
    new_ds = ldl_kernel(ssp + 24);
1182
    new_fs = ldl_kernel(ssp + 28);
1183
    new_gs = ldl_kernel(ssp + 32);
1184
    
1185
    /* modify processor state */
1186
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1187
    load_seg_vm(R_CS, new_cs);
1188
    cpu_x86_set_cpl(env, 3);
1189
    load_seg_vm(R_SS, new_ss);
1190
    load_seg_vm(R_ES, new_es);
1191
    load_seg_vm(R_DS, new_ds);
1192
    load_seg_vm(R_FS, new_fs);
1193
    load_seg_vm(R_GS, new_gs);
1194

    
1195
    env->eip = new_eip;
1196
    ESP = new_esp;
1197
}
1198

    
1199
void helper_iret_protected(int shift)
1200
{
1201
    helper_ret_protected(shift, 1, 0);
1202
}
1203

    
1204
void helper_lret_protected(int shift, int addend)
1205
{
1206
    helper_ret_protected(shift, 0, addend);
1207
}
1208

    
1209
void helper_movl_crN_T0(int reg)
1210
{
1211
    env->cr[reg] = T0;
1212
    switch(reg) {
1213
    case 0:
1214
        cpu_x86_update_cr0(env);
1215
        break;
1216
    case 3:
1217
        cpu_x86_update_cr3(env);
1218
        break;
1219
    }
1220
}
1221

    
1222
/* XXX: do more */
1223
void helper_movl_drN_T0(int reg)
1224
{
1225
    env->dr[reg] = T0;
1226
}
1227

    
1228
void helper_invlpg(unsigned int addr)
1229
{
1230
    cpu_x86_flush_tlb(env, addr);
1231
}
1232

    
1233
/* rdtsc */
1234
#ifndef __i386__
1235
uint64_t emu_time;
1236
#endif
1237

    
1238
void helper_rdtsc(void)
1239
{
1240
    uint64_t val;
1241
#ifdef __i386__
1242
    asm("rdtsc" : "=A" (val));
1243
#else
1244
    /* better than nothing: the time increases */
1245
    val = emu_time++;
1246
#endif
1247
    EAX = val;
1248
    EDX = val >> 32;
1249
}
1250

    
1251
void helper_wrmsr(void)
1252
{
1253
    switch(ECX) {
1254
    case MSR_IA32_SYSENTER_CS:
1255
        env->sysenter_cs = EAX & 0xffff;
1256
        break;
1257
    case MSR_IA32_SYSENTER_ESP:
1258
        env->sysenter_esp = EAX;
1259
        break;
1260
    case MSR_IA32_SYSENTER_EIP:
1261
        env->sysenter_eip = EAX;
1262
        break;
1263
    default:
1264
        /* XXX: exception ? */
1265
        break; 
1266
    }
1267
}
1268

    
1269
void helper_rdmsr(void)
1270
{
1271
    switch(ECX) {
1272
    case MSR_IA32_SYSENTER_CS:
1273
        EAX = env->sysenter_cs;
1274
        EDX = 0;
1275
        break;
1276
    case MSR_IA32_SYSENTER_ESP:
1277
        EAX = env->sysenter_esp;
1278
        EDX = 0;
1279
        break;
1280
    case MSR_IA32_SYSENTER_EIP:
1281
        EAX = env->sysenter_eip;
1282
        EDX = 0;
1283
        break;
1284
    default:
1285
        /* XXX: exception ? */
1286
        break; 
1287
    }
1288
}
1289

    
1290
void helper_lsl(void)
1291
{
1292
    unsigned int selector, limit;
1293
    uint32_t e1, e2;
1294

    
1295
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1296
    selector = T0 & 0xffff;
1297
    if (load_segment(&e1, &e2, selector) != 0)
1298
        return;
1299
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1300
    if (e2 & (1 << 23))
1301
        limit = (limit << 12) | 0xfff;
1302
    T1 = limit;
1303
    CC_SRC |= CC_Z;
1304
}
1305

    
1306
void helper_lar(void)
1307
{
1308
    unsigned int selector;
1309
    uint32_t e1, e2;
1310

    
1311
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1312
    selector = T0 & 0xffff;
1313
    if (load_segment(&e1, &e2, selector) != 0)
1314
        return;
1315
    T1 = e2 & 0x00f0ff00;
1316
    CC_SRC |= CC_Z;
1317
}
1318

    
1319
/* FPU helpers */
1320

    
1321
void helper_fldt_ST0_A0(void)
1322
{
1323
    int new_fpstt;
1324
    new_fpstt = (env->fpstt - 1) & 7;
1325
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1326
    env->fpstt = new_fpstt;
1327
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1328
}
1329

    
1330
void helper_fstt_ST0_A0(void)
1331
{
1332
    helper_fstt(ST0, (uint8_t *)A0);
1333
}
1334

    
1335
/* BCD ops */
1336

    
1337
#define MUL10(iv) ( iv + iv + (iv << 3) )
1338

    
1339
void helper_fbld_ST0_A0(void)
1340
{
1341
    CPU86_LDouble tmp;
1342
    uint64_t val;
1343
    unsigned int v;
1344
    int i;
1345

    
1346
    val = 0;
1347
    for(i = 8; i >= 0; i--) {
1348
        v = ldub((uint8_t *)A0 + i);
1349
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1350
    }
1351
    tmp = val;
1352
    if (ldub((uint8_t *)A0 + 9) & 0x80)
1353
        tmp = -tmp;
1354
    fpush();
1355
    ST0 = tmp;
1356
}
1357

    
1358
void helper_fbst_ST0_A0(void)
1359
{
1360
    CPU86_LDouble tmp;
1361
    int v;
1362
    uint8_t *mem_ref, *mem_end;
1363
    int64_t val;
1364

    
1365
    tmp = rint(ST0);
1366
    val = (int64_t)tmp;
1367
    mem_ref = (uint8_t *)A0;
1368
    mem_end = mem_ref + 9;
1369
    if (val < 0) {
1370
        stb(mem_end, 0x80);
1371
        val = -val;
1372
    } else {
1373
        stb(mem_end, 0x00);
1374
    }
1375
    while (mem_ref < mem_end) {
1376
        if (val == 0)
1377
            break;
1378
        v = val % 100;
1379
        val = val / 100;
1380
        v = ((v / 10) << 4) | (v % 10);
1381
        stb(mem_ref++, v);
1382
    }
1383
    while (mem_ref < mem_end) {
1384
        stb(mem_ref++, 0);
1385
    }
1386
}
1387

    
1388
void helper_f2xm1(void)
1389
{
1390
    ST0 = pow(2.0,ST0) - 1.0;
1391
}
1392

    
1393
void helper_fyl2x(void)
1394
{
1395
    CPU86_LDouble fptemp;
1396
    
1397
    fptemp = ST0;
1398
    if (fptemp>0.0){
1399
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
1400
        ST1 *= fptemp;
1401
        fpop();
1402
    } else { 
1403
        env->fpus &= (~0x4700);
1404
        env->fpus |= 0x400;
1405
    }
1406
}
1407

    
1408
void helper_fptan(void)
1409
{
1410
    CPU86_LDouble fptemp;
1411

    
1412
    fptemp = ST0;
1413
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1414
        env->fpus |= 0x400;
1415
    } else {
1416
        ST0 = tan(fptemp);
1417
        fpush();
1418
        ST0 = 1.0;
1419
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1420
        /* the above code is for  |arg| < 2**52 only */
1421
    }
1422
}
1423

    
1424
void helper_fpatan(void)
1425
{
1426
    CPU86_LDouble fptemp, fpsrcop;
1427

    
1428
    fpsrcop = ST1;
1429
    fptemp = ST0;
1430
    ST1 = atan2(fpsrcop,fptemp);
1431
    fpop();
1432
}
1433

    
1434
void helper_fxtract(void)
1435
{
1436
    CPU86_LDoubleU temp;
1437
    unsigned int expdif;
1438

    
1439
    temp.d = ST0;
1440
    expdif = EXPD(temp) - EXPBIAS;
1441
    /*DP exponent bias*/
1442
    ST0 = expdif;
1443
    fpush();
1444
    BIASEXPONENT(temp);
1445
    ST0 = temp.d;
1446
}
1447

    
1448
void helper_fprem1(void)
1449
{
1450
    CPU86_LDouble dblq, fpsrcop, fptemp;
1451
    CPU86_LDoubleU fpsrcop1, fptemp1;
1452
    int expdif;
1453
    int q;
1454

    
1455
    fpsrcop = ST0;
1456
    fptemp = ST1;
1457
    fpsrcop1.d = fpsrcop;
1458
    fptemp1.d = fptemp;
1459
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1460
    if (expdif < 53) {
1461
        dblq = fpsrcop / fptemp;
1462
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1463
        ST0 = fpsrcop - fptemp*dblq;
1464
        q = (int)dblq; /* cutting off top bits is assumed here */
1465
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1466
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1467
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1468
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1469
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1470
    } else {
1471
        env->fpus |= 0x400;  /* C2 <-- 1 */
1472
        fptemp = pow(2.0, expdif-50);
1473
        fpsrcop = (ST0 / ST1) / fptemp;
1474
        /* fpsrcop = integer obtained by rounding to the nearest */
1475
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1476
            floor(fpsrcop): ceil(fpsrcop);
1477
        ST0 -= (ST1 * fpsrcop * fptemp);
1478
    }
1479
}
1480

    
1481
void helper_fprem(void)
1482
{
1483
    CPU86_LDouble dblq, fpsrcop, fptemp;
1484
    CPU86_LDoubleU fpsrcop1, fptemp1;
1485
    int expdif;
1486
    int q;
1487
    
1488
    fpsrcop = ST0;
1489
    fptemp = ST1;
1490
    fpsrcop1.d = fpsrcop;
1491
    fptemp1.d = fptemp;
1492
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1493
    if ( expdif < 53 ) {
1494
        dblq = fpsrcop / fptemp;
1495
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1496
        ST0 = fpsrcop - fptemp*dblq;
1497
        q = (int)dblq; /* cutting off top bits is assumed here */
1498
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1499
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
1500
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1501
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1502
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1503
    } else {
1504
        env->fpus |= 0x400;  /* C2 <-- 1 */
1505
        fptemp = pow(2.0, expdif-50);
1506
        fpsrcop = (ST0 / ST1) / fptemp;
1507
        /* fpsrcop = integer obtained by chopping */
1508
        fpsrcop = (fpsrcop < 0.0)?
1509
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
1510
        ST0 -= (ST1 * fpsrcop * fptemp);
1511
    }
1512
}
1513

    
1514
void helper_fyl2xp1(void)
1515
{
1516
    CPU86_LDouble fptemp;
1517

    
1518
    fptemp = ST0;
1519
    if ((fptemp+1.0)>0.0) {
1520
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1521
        ST1 *= fptemp;
1522
        fpop();
1523
    } else { 
1524
        env->fpus &= (~0x4700);
1525
        env->fpus |= 0x400;
1526
    }
1527
}
1528

    
1529
void helper_fsqrt(void)
1530
{
1531
    CPU86_LDouble fptemp;
1532

    
1533
    fptemp = ST0;
1534
    if (fptemp<0.0) { 
1535
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1536
        env->fpus |= 0x400;
1537
    }
1538
    ST0 = sqrt(fptemp);
1539
}
1540

    
1541
void helper_fsincos(void)
1542
{
1543
    CPU86_LDouble fptemp;
1544

    
1545
    fptemp = ST0;
1546
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1547
        env->fpus |= 0x400;
1548
    } else {
1549
        ST0 = sin(fptemp);
1550
        fpush();
1551
        ST0 = cos(fptemp);
1552
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1553
        /* the above code is for  |arg| < 2**63 only */
1554
    }
1555
}
1556

    
1557
void helper_frndint(void)
1558
{
1559
    CPU86_LDouble a;
1560

    
1561
    a = ST0;
1562
#ifdef __arm__
1563
    switch(env->fpuc & RC_MASK) {
1564
    default:
1565
    case RC_NEAR:
1566
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
1567
        break;
1568
    case RC_DOWN:
1569
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1570
        break;
1571
    case RC_UP:
1572
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1573
        break;
1574
    case RC_CHOP:
1575
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1576
        break;
1577
    }
1578
#else
1579
    a = rint(a);
1580
#endif
1581
    ST0 = a;
1582
}
1583

    
1584
void helper_fscale(void)
1585
{
1586
    CPU86_LDouble fpsrcop, fptemp;
1587

    
1588
    fpsrcop = 2.0;
1589
    fptemp = pow(fpsrcop,ST1);
1590
    ST0 *= fptemp;
1591
}
1592

    
1593
void helper_fsin(void)
1594
{
1595
    CPU86_LDouble fptemp;
1596

    
1597
    fptemp = ST0;
1598
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1599
        env->fpus |= 0x400;
1600
    } else {
1601
        ST0 = sin(fptemp);
1602
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1603
        /* the above code is for  |arg| < 2**53 only */
1604
    }
1605
}
1606

    
1607
void helper_fcos(void)
1608
{
1609
    CPU86_LDouble fptemp;
1610

    
1611
    fptemp = ST0;
1612
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1613
        env->fpus |= 0x400;
1614
    } else {
1615
        ST0 = cos(fptemp);
1616
        env->fpus &= (~0x400);  /* C2 <-- 0 */
1617
        /* the above code is for  |arg5 < 2**63 only */
1618
    }
1619
}
1620

    
1621
void helper_fxam_ST0(void)
1622
{
1623
    CPU86_LDoubleU temp;
1624
    int expdif;
1625

    
1626
    temp.d = ST0;
1627

    
1628
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
1629
    if (SIGND(temp))
1630
        env->fpus |= 0x200; /* C1 <-- 1 */
1631

    
1632
    expdif = EXPD(temp);
1633
    if (expdif == MAXEXPD) {
1634
        if (MANTD(temp) == 0)
1635
            env->fpus |=  0x500 /*Infinity*/;
1636
        else
1637
            env->fpus |=  0x100 /*NaN*/;
1638
    } else if (expdif == 0) {
1639
        if (MANTD(temp) == 0)
1640
            env->fpus |=  0x4000 /*Zero*/;
1641
        else
1642
            env->fpus |= 0x4400 /*Denormal*/;
1643
    } else {
1644
        env->fpus |= 0x400;
1645
    }
1646
}
1647

    
1648
void helper_fstenv(uint8_t *ptr, int data32)
1649
{
1650
    int fpus, fptag, exp, i;
1651
    uint64_t mant;
1652
    CPU86_LDoubleU tmp;
1653

    
1654
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1655
    fptag = 0;
1656
    for (i=7; i>=0; i--) {
1657
        fptag <<= 2;
1658
        if (env->fptags[i]) {
1659
            fptag |= 3;
1660
        } else {
1661
            tmp.d = env->fpregs[i];
1662
            exp = EXPD(tmp);
1663
            mant = MANTD(tmp);
1664
            if (exp == 0 && mant == 0) {
1665
                /* zero */
1666
                fptag |= 1;
1667
            } else if (exp == 0 || exp == MAXEXPD
1668
#ifdef USE_X86LDOUBLE
1669
                       || (mant & (1LL << 63)) == 0
1670
#endif
1671
                       ) {
1672
                /* NaNs, infinity, denormal */
1673
                fptag |= 2;
1674
            }
1675
        }
1676
    }
1677
    if (data32) {
1678
        /* 32 bit */
1679
        stl(ptr, env->fpuc);
1680
        stl(ptr + 4, fpus);
1681
        stl(ptr + 8, fptag);
1682
        stl(ptr + 12, 0);
1683
        stl(ptr + 16, 0);
1684
        stl(ptr + 20, 0);
1685
        stl(ptr + 24, 0);
1686
    } else {
1687
        /* 16 bit */
1688
        stw(ptr, env->fpuc);
1689
        stw(ptr + 2, fpus);
1690
        stw(ptr + 4, fptag);
1691
        stw(ptr + 6, 0);
1692
        stw(ptr + 8, 0);
1693
        stw(ptr + 10, 0);
1694
        stw(ptr + 12, 0);
1695
    }
1696
}
1697

    
1698
void helper_fldenv(uint8_t *ptr, int data32)
1699
{
1700
    int i, fpus, fptag;
1701

    
1702
    if (data32) {
1703
        env->fpuc = lduw(ptr);
1704
        fpus = lduw(ptr + 4);
1705
        fptag = lduw(ptr + 8);
1706
    }
1707
    else {
1708
        env->fpuc = lduw(ptr);
1709
        fpus = lduw(ptr + 2);
1710
        fptag = lduw(ptr + 4);
1711
    }
1712
    env->fpstt = (fpus >> 11) & 7;
1713
    env->fpus = fpus & ~0x3800;
1714
    for(i = 0;i < 7; i++) {
1715
        env->fptags[i] = ((fptag & 3) == 3);
1716
        fptag >>= 2;
1717
    }
1718
}
1719

    
1720
void helper_fsave(uint8_t *ptr, int data32)
1721
{
1722
    CPU86_LDouble tmp;
1723
    int i;
1724

    
1725
    helper_fstenv(ptr, data32);
1726

    
1727
    ptr += (14 << data32);
1728
    for(i = 0;i < 8; i++) {
1729
        tmp = ST(i);
1730
        helper_fstt(tmp, ptr);
1731
        ptr += 10;
1732
    }
1733

    
1734
    /* fninit */
1735
    env->fpus = 0;
1736
    env->fpstt = 0;
1737
    env->fpuc = 0x37f;
1738
    env->fptags[0] = 1;
1739
    env->fptags[1] = 1;
1740
    env->fptags[2] = 1;
1741
    env->fptags[3] = 1;
1742
    env->fptags[4] = 1;
1743
    env->fptags[5] = 1;
1744
    env->fptags[6] = 1;
1745
    env->fptags[7] = 1;
1746
}
1747

    
1748
void helper_frstor(uint8_t *ptr, int data32)
1749
{
1750
    CPU86_LDouble tmp;
1751
    int i;
1752

    
1753
    helper_fldenv(ptr, data32);
1754
    ptr += (14 << data32);
1755

    
1756
    for(i = 0;i < 8; i++) {
1757
        tmp = helper_fldt(ptr);
1758
        ST(i) = tmp;
1759
        ptr += 10;
1760
    }
1761
}
1762

    
1763
#if !defined(CONFIG_USER_ONLY) 
1764

    
1765
#define MMUSUFFIX _mmu
1766
#define GETPC() (__builtin_return_address(0))
1767

    
1768
#define SHIFT 0
1769
#include "softmmu_template.h"
1770

    
1771
#define SHIFT 1
1772
#include "softmmu_template.h"
1773

    
1774
#define SHIFT 2
1775
#include "softmmu_template.h"
1776

    
1777
#define SHIFT 3
1778
#include "softmmu_template.h"
1779

    
1780
#endif
1781

    
1782
/* try to fill the TLB and return an exception if error. If retaddr is
1783
   NULL, it means that the function was called in C code (i.e. not
1784
   from generated code or from helper.c) */
1785
/* XXX: fix it to restore all registers */
1786
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
1787
{
1788
    TranslationBlock *tb;
1789
    int ret;
1790
    unsigned long pc;
1791
    CPUX86State *saved_env;
1792

    
1793
    /* XXX: hack to restore env in all cases, even if not called from
1794
       generated code */
1795
    saved_env = env;
1796
    env = cpu_single_env;
1797
    if (is_write && page_unprotect(addr)) {
1798
        /* nothing more to do: the page was write protected because
1799
           there was code in it. page_unprotect() flushed the code. */
1800
    }
1801

    
1802
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
1803
    if (ret) {
1804
        if (retaddr) {
1805
            /* now we have a real cpu fault */
1806
            pc = (unsigned long)retaddr;
1807
            tb = tb_find_pc(pc);
1808
            if (tb) {
1809
                /* the PC is inside the translated code. It means that we have
1810
                   a virtual CPU fault */
1811
                cpu_restore_state(tb, env, pc);
1812
            }
1813
        }
1814
        raise_exception_err(EXCP0E_PAGE, env->error_code);
1815
    }
1816
    env = saved_env;
1817
}