Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 883da8e2

History | View | Annotate | Download (70.3 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    printf("raise_exception line=%d\n", __LINE__);\
28
    (raise_exception_err)(a, b);\
29
} while (0)
30
#endif
31

    
32
const uint8_t parity_table[256] = {
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
};
66

    
67
/* modulo 17 table */
68
const uint8_t rclw_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 9,10,11,12,13,14,15,
71
   16, 0, 1, 2, 3, 4, 5, 6,
72
    7, 8, 9,10,11,12,13,14,
73
};
74

    
75
/* modulo 9 table */
76
const uint8_t rclb_table[32] = {
77
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    8, 0, 1, 2, 3, 4, 5, 6,
79
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    6, 7, 8, 0, 1, 2, 3, 4,
81
};
82

    
83
const CPU86_LDouble f15rk[7] =
84
{
85
    0.00000000000000000000L,
86
    1.00000000000000000000L,
87
    3.14159265358979323851L,  /*pi*/
88
    0.30102999566398119523L,  /*lg2*/
89
    0.69314718055994530943L,  /*ln2*/
90
    1.44269504088896340739L,  /*l2e*/
91
    3.32192809488736234781L,  /*l2t*/
92
};
93
    
94
/* thread support */
95

    
96
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97

    
98
void cpu_lock(void)
99
{
100
    spin_lock(&global_cpu_lock);
101
}
102

    
103
void cpu_unlock(void)
104
{
105
    spin_unlock(&global_cpu_lock);
106
}
107

    
108
void cpu_loop_exit(void)
109
{
110
    /* NOTE: the register at this point must be saved by hand because
111
       longjmp restore them */
112
#ifdef reg_EAX
113
    env->regs[R_EAX] = EAX;
114
#endif
115
#ifdef reg_ECX
116
    env->regs[R_ECX] = ECX;
117
#endif
118
#ifdef reg_EDX
119
    env->regs[R_EDX] = EDX;
120
#endif
121
#ifdef reg_EBX
122
    env->regs[R_EBX] = EBX;
123
#endif
124
#ifdef reg_ESP
125
    env->regs[R_ESP] = ESP;
126
#endif
127
#ifdef reg_EBP
128
    env->regs[R_EBP] = EBP;
129
#endif
130
#ifdef reg_ESI
131
    env->regs[R_ESI] = ESI;
132
#endif
133
#ifdef reg_EDI
134
    env->regs[R_EDI] = EDI;
135
#endif
136
    longjmp(env->jmp_env, 1);
137
}
138

    
139
/* return non zero if error */
140
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
141
                               int selector)
142
{
143
    SegmentCache *dt;
144
    int index;
145
    uint8_t *ptr;
146

    
147
    if (selector & 0x4)
148
        dt = &env->ldt;
149
    else
150
        dt = &env->gdt;
151
    index = selector & ~7;
152
    if ((index + 7) > dt->limit)
153
        return -1;
154
    ptr = dt->base + index;
155
    *e1_ptr = ldl_kernel(ptr);
156
    *e2_ptr = ldl_kernel(ptr + 4);
157
    return 0;
158
}
159
                                     
160
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161
{
162
    unsigned int limit;
163
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
164
    if (e2 & DESC_G_MASK)
165
        limit = (limit << 12) | 0xfff;
166
    return limit;
167
}
168

    
169
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
170
{
171
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
172
}
173

    
174
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
175
{
176
    sc->base = get_seg_base(e1, e2);
177
    sc->limit = get_seg_limit(e1, e2);
178
    sc->flags = e2;
179
}
180

    
181
/* init the segment cache in vm86 mode. */
182
static inline void load_seg_vm(int seg, int selector)
183
{
184
    selector &= 0xffff;
185
    cpu_x86_load_seg_cache(env, seg, selector, 
186
                           (uint8_t *)(selector << 4), 0xffff, 0);
187
}
188

    
189
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
190
                                       uint32_t *esp_ptr, int dpl)
191
{
192
    int type, index, shift;
193
    
194
#if 0
195
    {
196
        int i;
197
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
198
        for(i=0;i<env->tr.limit;i++) {
199
            printf("%02x ", env->tr.base[i]);
200
            if ((i & 7) == 7) printf("\n");
201
        }
202
        printf("\n");
203
    }
204
#endif
205

    
206
    if (!(env->tr.flags & DESC_P_MASK))
207
        cpu_abort(env, "invalid tss");
208
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
209
    if ((type & 7) != 1)
210
        cpu_abort(env, "invalid tss type");
211
    shift = type >> 3;
212
    index = (dpl * 4 + 2) << shift;
213
    if (index + (4 << shift) - 1 > env->tr.limit)
214
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
215
    if (shift == 0) {
216
        *esp_ptr = lduw_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
218
    } else {
219
        *esp_ptr = ldl_kernel(env->tr.base + index);
220
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221
    }
222
}
223

    
224
/* XXX: merge with load_seg() */
225
static void tss_load_seg(int seg_reg, int selector)
226
{
227
    uint32_t e1, e2;
228
    int rpl, dpl, cpl;
229

    
230
    if ((selector & 0xfffc) != 0) {
231
        if (load_segment(&e1, &e2, selector) != 0)
232
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
        if (!(e2 & DESC_S_MASK))
234
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
        rpl = selector & 3;
236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
237
        cpl = env->hflags & HF_CPL_MASK;
238
        if (seg_reg == R_CS) {
239
            if (!(e2 & DESC_CS_MASK))
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if (dpl != rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
            if ((e2 & DESC_C_MASK) && dpl > rpl)
244
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245
                
246
        } else if (seg_reg == R_SS) {
247
            /* SS must be writable data */
248
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
249
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250
            if (dpl != cpl || dpl != rpl)
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
        } else {
253
            /* not readable code */
254
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
255
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            /* if data or non conforming code, checks the rights */
257
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
258
                if (dpl < cpl || dpl < rpl)
259
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260
            }
261
        }
262
        if (!(e2 & DESC_P_MASK))
263
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
264
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
265
                       get_seg_base(e1, e2),
266
                       get_seg_limit(e1, e2),
267
                       e2);
268
    } else {
269
        if (seg_reg == R_SS || seg_reg == R_CS) 
270
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271
    }
272
}
273

    
274
#define SWITCH_TSS_JMP  0
275
#define SWITCH_TSS_IRET 1
276
#define SWITCH_TSS_CALL 2
277

    
278
/* XXX: restore CPU state in registers (PowerPC case) */
279
static void switch_tss(int tss_selector, 
280
                       uint32_t e1, uint32_t e2, int source,
281
                       uint32_t next_eip)
282
{
283
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
284
    uint8_t *tss_base;
285
    uint32_t new_regs[8], new_segs[6];
286
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
287
    uint32_t old_eflags, eflags_mask;
288
    SegmentCache *dt;
289
    int index;
290
    uint8_t *ptr;
291

    
292
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
293
#ifdef DEBUG_PCALL
294
    if (loglevel & CPU_LOG_PCALL)
295
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
296
#endif
297

    
298
    /* if task gate, we read the TSS segment and we load it */
299
    if (type == 5) {
300
        if (!(e2 & DESC_P_MASK))
301
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
        tss_selector = e1 >> 16;
303
        if (tss_selector & 4)
304
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
305
        if (load_segment(&e1, &e2, tss_selector) != 0)
306
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307
        if (e2 & DESC_S_MASK)
308
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
309
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
310
        if ((type & 7) != 1)
311
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312
    }
313

    
314
    if (!(e2 & DESC_P_MASK))
315
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
316

    
317
    if (type & 8)
318
        tss_limit_max = 103;
319
    else
320
        tss_limit_max = 43;
321
    tss_limit = get_seg_limit(e1, e2);
322
    tss_base = get_seg_base(e1, e2);
323
    if ((tss_selector & 4) != 0 || 
324
        tss_limit < tss_limit_max)
325
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
326
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
327
    if (old_type & 8)
328
        old_tss_limit_max = 103;
329
    else
330
        old_tss_limit_max = 43;
331

    
332
    /* read all the registers from the new TSS */
333
    if (type & 8) {
334
        /* 32 bit */
335
        new_cr3 = ldl_kernel(tss_base + 0x1c);
336
        new_eip = ldl_kernel(tss_base + 0x20);
337
        new_eflags = ldl_kernel(tss_base + 0x24);
338
        for(i = 0; i < 8; i++)
339
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
340
        for(i = 0; i < 6; i++)
341
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
342
        new_ldt = lduw_kernel(tss_base + 0x60);
343
        new_trap = ldl_kernel(tss_base + 0x64);
344
    } else {
345
        /* 16 bit */
346
        new_cr3 = 0;
347
        new_eip = lduw_kernel(tss_base + 0x0e);
348
        new_eflags = lduw_kernel(tss_base + 0x10);
349
        for(i = 0; i < 8; i++)
350
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
351
        for(i = 0; i < 4; i++)
352
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
353
        new_ldt = lduw_kernel(tss_base + 0x2a);
354
        new_segs[R_FS] = 0;
355
        new_segs[R_GS] = 0;
356
        new_trap = 0;
357
    }
358
    
359
    /* NOTE: we must avoid memory exceptions during the task switch,
360
       so we make dummy accesses before */
361
    /* XXX: it can still fail in some cases, so a bigger hack is
362
       necessary to valid the TLB after having done the accesses */
363

    
364
    v1 = ldub_kernel(env->tr.base);
365
    v2 = ldub(env->tr.base + old_tss_limit_max);
366
    stb_kernel(env->tr.base, v1);
367
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
368
    
369
    /* clear busy bit (it is restartable) */
370
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
371
        uint8_t *ptr;
372
        uint32_t e2;
373
        ptr = env->gdt.base + (env->tr.selector & ~7);
374
        e2 = ldl_kernel(ptr + 4);
375
        e2 &= ~DESC_TSS_BUSY_MASK;
376
        stl_kernel(ptr + 4, e2);
377
    }
378
    old_eflags = compute_eflags();
379
    if (source == SWITCH_TSS_IRET)
380
        old_eflags &= ~NT_MASK;
381
    
382
    /* save the current state in the old TSS */
383
    if (type & 8) {
384
        /* 32 bit */
385
        stl_kernel(env->tr.base + 0x20, next_eip);
386
        stl_kernel(env->tr.base + 0x24, old_eflags);
387
        for(i = 0; i < 8; i++)
388
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        for(i = 0; i < 8; i++)
396
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
397
        for(i = 0; i < 4; i++)
398
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399
    }
400
    
401
    /* now if an exception occurs, it will occurs in the next task
402
       context */
403

    
404
    if (source == SWITCH_TSS_CALL) {
405
        stw_kernel(tss_base, env->tr.selector);
406
        new_eflags |= NT_MASK;
407
    }
408

    
409
    /* set busy bit */
410
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
411
        uint8_t *ptr;
412
        uint32_t e2;
413
        ptr = env->gdt.base + (tss_selector & ~7);
414
        e2 = ldl_kernel(ptr + 4);
415
        e2 |= DESC_TSS_BUSY_MASK;
416
        stl_kernel(ptr + 4, e2);
417
    }
418

    
419
    /* set the new CPU state */
420
    /* from this point, any exception which occurs can give problems */
421
    env->cr[0] |= CR0_TS_MASK;
422
    env->hflags |= HF_TS_MASK;
423
    env->tr.selector = tss_selector;
424
    env->tr.base = tss_base;
425
    env->tr.limit = tss_limit;
426
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
427
    
428
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
429
        cpu_x86_update_cr3(env, new_cr3);
430
    }
431
    
432
    /* load all registers without an exception, then reload them with
433
       possible exception */
434
    env->eip = new_eip;
435
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
436
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
437
    if (!(type & 8))
438
        eflags_mask &= 0xffff;
439
    load_eflags(new_eflags, eflags_mask);
440
    for(i = 0; i < 8; i++)
441
        env->regs[i] = new_regs[i];
442
    if (new_eflags & VM_MASK) {
443
        for(i = 0; i < 6; i++) 
444
            load_seg_vm(i, new_segs[i]);
445
        /* in vm86, CPL is always 3 */
446
        cpu_x86_set_cpl(env, 3);
447
    } else {
448
        /* CPL is set the RPL of CS */
449
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
450
        /* first just selectors as the rest may trigger exceptions */
451
        for(i = 0; i < 6; i++)
452
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
453
    }
454
    
455
    env->ldt.selector = new_ldt & ~4;
456
    env->ldt.base = NULL;
457
    env->ldt.limit = 0;
458
    env->ldt.flags = 0;
459

    
460
    /* load the LDT */
461
    if (new_ldt & 4)
462
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463

    
464
    if ((new_ldt & 0xfffc) != 0) {
465
        dt = &env->gdt;
466
        index = new_ldt & ~7;
467
        if ((index + 7) > dt->limit)
468
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469
        ptr = dt->base + index;
470
        e1 = ldl_kernel(ptr);
471
        e2 = ldl_kernel(ptr + 4);
472
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        if (!(e2 & DESC_P_MASK))
475
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
477
    }
478
    
479
    /* load the segments */
480
    if (!(new_eflags & VM_MASK)) {
481
        tss_load_seg(R_CS, new_segs[R_CS]);
482
        tss_load_seg(R_SS, new_segs[R_SS]);
483
        tss_load_seg(R_ES, new_segs[R_ES]);
484
        tss_load_seg(R_DS, new_segs[R_DS]);
485
        tss_load_seg(R_FS, new_segs[R_FS]);
486
        tss_load_seg(R_GS, new_segs[R_GS]);
487
    }
488
    
489
    /* check that EIP is in the CS segment limits */
490
    if (new_eip > env->segs[R_CS].limit) {
491
        /* XXX: different exception if CALL ? */
492
        raise_exception_err(EXCP0D_GPF, 0);
493
    }
494
}
495

    
496
/* check if Port I/O is allowed in TSS */
497
static inline void check_io(int addr, int size)
498
{
499
    int io_offset, val, mask;
500
    
501
    /* TSS must be a valid 32 bit one */
502
    if (!(env->tr.flags & DESC_P_MASK) ||
503
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
504
        env->tr.limit < 103)
505
        goto fail;
506
    io_offset = lduw_kernel(env->tr.base + 0x66);
507
    io_offset += (addr >> 3);
508
    /* Note: the check needs two bytes */
509
    if ((io_offset + 1) > env->tr.limit)
510
        goto fail;
511
    val = lduw_kernel(env->tr.base + io_offset);
512
    val >>= (addr & 7);
513
    mask = (1 << size) - 1;
514
    /* all bits must be zero to allow the I/O */
515
    if ((val & mask) != 0) {
516
    fail:
517
        raise_exception_err(EXCP0D_GPF, 0);
518
    }
519
}
520

    
521
void check_iob_T0(void)
522
{
523
    check_io(T0, 1);
524
}
525

    
526
void check_iow_T0(void)
527
{
528
    check_io(T0, 2);
529
}
530

    
531
void check_iol_T0(void)
532
{
533
    check_io(T0, 4);
534
}
535

    
536
void check_iob_DX(void)
537
{
538
    check_io(EDX & 0xffff, 1);
539
}
540

    
541
void check_iow_DX(void)
542
{
543
    check_io(EDX & 0xffff, 2);
544
}
545

    
546
void check_iol_DX(void)
547
{
548
    check_io(EDX & 0xffff, 4);
549
}
550

    
551
static inline unsigned int get_sp_mask(unsigned int e2)
552
{
553
    if (e2 & DESC_B_MASK)
554
        return 0xffffffff;
555
    else
556
        return 0xffff;
557
}
558

    
559
/* XXX: add a is_user flag to have proper security support */
560
#define PUSHW(ssp, sp, sp_mask, val)\
561
{\
562
    sp -= 2;\
563
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
564
}
565

    
566
#define PUSHL(ssp, sp, sp_mask, val)\
567
{\
568
    sp -= 4;\
569
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
570
}
571

    
572
#define POPW(ssp, sp, sp_mask, val)\
573
{\
574
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
575
    sp += 2;\
576
}
577

    
578
#define POPL(ssp, sp, sp_mask, val)\
579
{\
580
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
581
    sp += 4;\
582
}
583

    
584
/* protected mode interrupt */
585
static void do_interrupt_protected(int intno, int is_int, int error_code,
586
                                   unsigned int next_eip, int is_hw)
587
{
588
    SegmentCache *dt;
589
    uint8_t *ptr, *ssp;
590
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
591
    int has_error_code, new_stack, shift;
592
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
593
    uint32_t old_eip;
594

    
595
    has_error_code = 0;
596
    if (!is_int && !is_hw) {
597
        switch(intno) {
598
        case 8:
599
        case 10:
600
        case 11:
601
        case 12:
602
        case 13:
603
        case 14:
604
        case 17:
605
            has_error_code = 1;
606
            break;
607
        }
608
    }
609
    if (is_int)
610
        old_eip = next_eip;
611
    else
612
        old_eip = env->eip;
613

    
614
    dt = &env->idt;
615
    if (intno * 8 + 7 > dt->limit)
616
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
617
    ptr = dt->base + intno * 8;
618
    e1 = ldl_kernel(ptr);
619
    e2 = ldl_kernel(ptr + 4);
620
    /* check gate type */
621
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
622
    switch(type) {
623
    case 5: /* task gate */
624
        /* must do that check here to return the correct error code */
625
        if (!(e2 & DESC_P_MASK))
626
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
627
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
628
        if (has_error_code) {
629
            int mask;
630
            /* push the error code */
631
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
632
            if (env->segs[R_SS].flags & DESC_B_MASK)
633
                mask = 0xffffffff;
634
            else
635
                mask = 0xffff;
636
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
637
            ssp = env->segs[R_SS].base + esp;
638
            if (shift)
639
                stl_kernel(ssp, error_code);
640
            else
641
                stw_kernel(ssp, error_code);
642
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
643
        }
644
        return;
645
    case 6: /* 286 interrupt gate */
646
    case 7: /* 286 trap gate */
647
    case 14: /* 386 interrupt gate */
648
    case 15: /* 386 trap gate */
649
        break;
650
    default:
651
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
652
        break;
653
    }
654
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
655
    cpl = env->hflags & HF_CPL_MASK;
656
    /* check privledge if software int */
657
    if (is_int && dpl < cpl)
658
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
659
    /* check valid bit */
660
    if (!(e2 & DESC_P_MASK))
661
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
662
    selector = e1 >> 16;
663
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
664
    if ((selector & 0xfffc) == 0)
665
        raise_exception_err(EXCP0D_GPF, 0);
666

    
667
    if (load_segment(&e1, &e2, selector) != 0)
668
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
670
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    if (dpl > cpl)
673
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674
    if (!(e2 & DESC_P_MASK))
675
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
676
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
677
        /* to inner priviledge */
678
        get_ss_esp_from_tss(&ss, &esp, dpl);
679
        if ((ss & 0xfffc) == 0)
680
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681
        if ((ss & 3) != dpl)
682
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
683
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
684
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
685
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
686
        if (ss_dpl != dpl)
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (!(ss_e2 & DESC_S_MASK) ||
689
            (ss_e2 & DESC_CS_MASK) ||
690
            !(ss_e2 & DESC_W_MASK))
691
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
692
        if (!(ss_e2 & DESC_P_MASK))
693
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
694
        new_stack = 1;
695
        sp_mask = get_sp_mask(ss_e2);
696
        ssp = get_seg_base(ss_e1, ss_e2);
697
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
698
        /* to same priviledge */
699
        if (env->eflags & VM_MASK)
700
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701
        new_stack = 0;
702
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
703
        ssp = env->segs[R_SS].base;
704
        esp = ESP;
705
        dpl = cpl;
706
    } else {
707
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
708
        new_stack = 0; /* avoid warning */
709
        sp_mask = 0; /* avoid warning */
710
        ssp = NULL; /* avoid warning */
711
        esp = 0; /* avoid warning */
712
    }
713

    
714
    shift = type >> 3;
715

    
716
#if 0
717
    /* XXX: check that enough room is available */
718
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
719
    if (env->eflags & VM_MASK)
720
        push_size += 8;
721
    push_size <<= shift;
722
#endif
723
    if (shift == 1) {
724
        if (new_stack) {
725
            if (env->eflags & VM_MASK) {
726
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
727
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
728
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
729
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
730
            }
731
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
732
            PUSHL(ssp, esp, sp_mask, ESP);
733
        }
734
        PUSHL(ssp, esp, sp_mask, compute_eflags());
735
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
736
        PUSHL(ssp, esp, sp_mask, old_eip);
737
        if (has_error_code) {
738
            PUSHL(ssp, esp, sp_mask, error_code);
739
        }
740
    } else {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHW(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHW(ssp, esp, sp_mask, compute_eflags());
752
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHW(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHW(ssp, esp, sp_mask, error_code);
756
        }
757
    }
758
    
759
    if (new_stack) {
760
        if (env->eflags & VM_MASK) {
761
            /* XXX: explain me why W2K hangs if the whole segment cache is
762
               reset ? */
763
#if 1
764
            env->segs[R_ES].selector = 0;
765
            env->segs[R_ES].flags = 0;
766
            env->segs[R_DS].selector = 0;
767
            env->segs[R_DS].flags = 0;
768
            env->segs[R_FS].selector = 0;
769
            env->segs[R_FS].flags = 0;
770
            env->segs[R_GS].selector = 0;
771
            env->segs[R_GS].flags = 0;
772
#else
773
            cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
774
            cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
775
            cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
776
            cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
777
#endif
778
        }
779
        ss = (ss & ~3) | dpl;
780
        cpu_x86_load_seg_cache(env, R_SS, ss, 
781
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
782
    }
783
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
784

    
785
    selector = (selector & ~3) | dpl;
786
    cpu_x86_load_seg_cache(env, R_CS, selector, 
787
                   get_seg_base(e1, e2),
788
                   get_seg_limit(e1, e2),
789
                   e2);
790
    cpu_x86_set_cpl(env, dpl);
791
    env->eip = offset;
792

    
793
    /* interrupt gate clear IF mask */
794
    if ((type & 1) == 0) {
795
        env->eflags &= ~IF_MASK;
796
    }
797
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
798
}
799

    
800
/* real mode interrupt */
801
static void do_interrupt_real(int intno, int is_int, int error_code,
802
                              unsigned int next_eip)
803
{
804
    SegmentCache *dt;
805
    uint8_t *ptr, *ssp;
806
    int selector;
807
    uint32_t offset, esp;
808
    uint32_t old_cs, old_eip;
809

    
810
    /* real mode (simpler !) */
811
    dt = &env->idt;
812
    if (intno * 4 + 3 > dt->limit)
813
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
814
    ptr = dt->base + intno * 4;
815
    offset = lduw_kernel(ptr);
816
    selector = lduw_kernel(ptr + 2);
817
    esp = ESP;
818
    ssp = env->segs[R_SS].base;
819
    if (is_int)
820
        old_eip = next_eip;
821
    else
822
        old_eip = env->eip;
823
    old_cs = env->segs[R_CS].selector;
824
    /* XXX: use SS segment size ? */
825
    PUSHW(ssp, esp, 0xffff, compute_eflags());
826
    PUSHW(ssp, esp, 0xffff, old_cs);
827
    PUSHW(ssp, esp, 0xffff, old_eip);
828
    
829
    /* update processor state */
830
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
831
    env->eip = offset;
832
    env->segs[R_CS].selector = selector;
833
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
834
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
835
}
836

    
837
/* fake user mode interrupt */
838
void do_interrupt_user(int intno, int is_int, int error_code, 
839
                       unsigned int next_eip)
840
{
841
    SegmentCache *dt;
842
    uint8_t *ptr;
843
    int dpl, cpl;
844
    uint32_t e2;
845

    
846
    dt = &env->idt;
847
    ptr = dt->base + (intno * 8);
848
    e2 = ldl_kernel(ptr + 4);
849
    
850
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
851
    cpl = env->hflags & HF_CPL_MASK;
852
    /* check privledge if software int */
853
    if (is_int && dpl < cpl)
854
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855

    
856
    /* Since we emulate only user space, we cannot do more than
857
       exiting the emulation with the suitable exception and error
858
       code */
859
    if (is_int)
860
        EIP = next_eip;
861
}
862

    
863
/*
864
 * Begin execution of an interruption. is_int is TRUE if coming from
865
 * the int instruction. next_eip is the EIP value AFTER the interrupt
866
 * instruction. It is only relevant if is_int is TRUE.  
867
 */
868
void do_interrupt(int intno, int is_int, int error_code, 
869
                  unsigned int next_eip, int is_hw)
870
{
871
#ifdef DEBUG_PCALL
872
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
873
        if ((env->cr[0] & CR0_PE_MASK)) {
874
            static int count;
875
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x SP=%04x:%08x",
876
                    count, intno, error_code, is_int,
877
                    env->hflags & HF_CPL_MASK,
878
                    env->segs[R_CS].selector, EIP,
879
                    env->segs[R_SS].selector, ESP);
880
            if (intno == 0x0e) {
881
                fprintf(logfile, " CR2=%08x", env->cr[2]);
882
            } else {
883
                fprintf(logfile, " EAX=%08x", env->regs[R_EAX]);
884
            }
885
            fprintf(logfile, "\n");
886
#if 0
887
            cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
888
            {
889
                int i;
890
                uint8_t *ptr;
891
                fprintf(logfile, "       code=");
892
                ptr = env->segs[R_CS].base + env->eip;
893
                for(i = 0; i < 16; i++) {
894
                    fprintf(logfile, " %02x", ldub(ptr + i));
895
                }
896
                fprintf(logfile, "\n");
897
            }
898
#endif
899
            count++;
900
        }
901
    }
902
#endif
903
    if (env->cr[0] & CR0_PE_MASK) {
904
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
905
    } else {
906
        do_interrupt_real(intno, is_int, error_code, next_eip);
907
    }
908
}
909

    
910
/*
911
 * Signal an interruption. It is executed in the main CPU loop.
912
 * is_int is TRUE if coming from the int instruction. next_eip is the
913
 * EIP value AFTER the interrupt instruction. It is only relevant if
914
 * is_int is TRUE.  
915
 */
916
void raise_interrupt(int intno, int is_int, int error_code, 
917
                     unsigned int next_eip)
918
{
919
    env->exception_index = intno;
920
    env->error_code = error_code;
921
    env->exception_is_int = is_int;
922
    env->exception_next_eip = next_eip;
923
    cpu_loop_exit();
924
}
925

    
926
/* shortcuts to generate exceptions */
927

    
928
void (raise_exception_err)(int exception_index, int error_code)
929
{
930
    raise_interrupt(exception_index, 0, error_code, 0);
931
}
932

    
933
void raise_exception(int exception_index)
934
{
935
    raise_interrupt(exception_index, 0, 0, 0);
936
}
937

    
938
#ifdef BUGGY_GCC_DIV64
939
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
940
   call it from another function */
941
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
942
{
943
    *q_ptr = num / den;
944
    return num % den;
945
}
946

    
947
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
948
{
949
    *q_ptr = num / den;
950
    return num % den;
951
}
952
#endif
953

    
954
void helper_divl_EAX_T0(uint32_t eip)
955
{
956
    unsigned int den, q, r;
957
    uint64_t num;
958
    
959
    num = EAX | ((uint64_t)EDX << 32);
960
    den = T0;
961
    if (den == 0) {
962
        EIP = eip;
963
        raise_exception(EXCP00_DIVZ);
964
    }
965
#ifdef BUGGY_GCC_DIV64
966
    r = div64(&q, num, den);
967
#else
968
    q = (num / den);
969
    r = (num % den);
970
#endif
971
    EAX = q;
972
    EDX = r;
973
}
974

    
975
void helper_idivl_EAX_T0(uint32_t eip)
976
{
977
    int den, q, r;
978
    int64_t num;
979
    
980
    num = EAX | ((uint64_t)EDX << 32);
981
    den = T0;
982
    if (den == 0) {
983
        EIP = eip;
984
        raise_exception(EXCP00_DIVZ);
985
    }
986
#ifdef BUGGY_GCC_DIV64
987
    r = idiv64(&q, num, den);
988
#else
989
    q = (num / den);
990
    r = (num % den);
991
#endif
992
    EAX = q;
993
    EDX = r;
994
}
995

    
996
void helper_cmpxchg8b(void)
997
{
998
    uint64_t d;
999
    int eflags;
1000

    
1001
    eflags = cc_table[CC_OP].compute_all();
1002
    d = ldq((uint8_t *)A0);
1003
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1004
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1005
        eflags |= CC_Z;
1006
    } else {
1007
        EDX = d >> 32;
1008
        EAX = d;
1009
        eflags &= ~CC_Z;
1010
    }
1011
    CC_SRC = eflags;
1012
}
1013

    
1014
#define CPUID_FP87 (1 << 0)
1015
#define CPUID_VME  (1 << 1)
1016
#define CPUID_DE   (1 << 2)
1017
#define CPUID_PSE  (1 << 3)
1018
#define CPUID_TSC  (1 << 4)
1019
#define CPUID_MSR  (1 << 5)
1020
#define CPUID_PAE  (1 << 6)
1021
#define CPUID_MCE  (1 << 7)
1022
#define CPUID_CX8  (1 << 8)
1023
#define CPUID_APIC (1 << 9)
1024
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1025
#define CPUID_MTRR (1 << 12)
1026
#define CPUID_PGE  (1 << 13)
1027
#define CPUID_MCA  (1 << 14)
1028
#define CPUID_CMOV (1 << 15)
1029
/* ... */
1030
#define CPUID_MMX  (1 << 23)
1031
#define CPUID_FXSR (1 << 24)
1032
#define CPUID_SSE  (1 << 25)
1033
#define CPUID_SSE2 (1 << 26)
1034

    
1035
void helper_cpuid(void)
1036
{
1037
    switch(EAX) {
1038
    case 0:
1039
        EAX = 2; /* max EAX index supported */
1040
        EBX = 0x756e6547;
1041
        ECX = 0x6c65746e;
1042
        EDX = 0x49656e69;
1043
        break;
1044
    case 1:
1045
        {
1046
            int family, model, stepping;
1047
            /* EAX = 1 info */
1048
#if 0
1049
            /* pentium 75-200 */
1050
            family = 5;
1051
            model = 2;
1052
            stepping = 11;
1053
#else
1054
            /* pentium pro */
1055
            family = 6;
1056
            model = 1;
1057
            stepping = 3;
1058
#endif
1059
            EAX = (family << 8) | (model << 4) | stepping;
1060
            EBX = 0;
1061
            ECX = 0;
1062
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1063
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1064
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1065
        }
1066
        break;
1067
    default:
1068
        /* cache info: needed for Pentium Pro compatibility */
1069
        EAX = 0x410601;
1070
        EBX = 0;
1071
        ECX = 0;
1072
        EDX = 0;
1073
        break;
1074
    }
1075
}
1076

    
1077
void helper_lldt_T0(void)
1078
{
1079
    int selector;
1080
    SegmentCache *dt;
1081
    uint32_t e1, e2;
1082
    int index;
1083
    uint8_t *ptr;
1084
    
1085
    selector = T0 & 0xffff;
1086
    if ((selector & 0xfffc) == 0) {
1087
        /* XXX: NULL selector case: invalid LDT */
1088
        env->ldt.base = NULL;
1089
        env->ldt.limit = 0;
1090
    } else {
1091
        if (selector & 0x4)
1092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1093
        dt = &env->gdt;
1094
        index = selector & ~7;
1095
        if ((index + 7) > dt->limit)
1096
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1097
        ptr = dt->base + index;
1098
        e1 = ldl_kernel(ptr);
1099
        e2 = ldl_kernel(ptr + 4);
1100
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1101
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1102
        if (!(e2 & DESC_P_MASK))
1103
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1104
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1105
    }
1106
    env->ldt.selector = selector;
1107
}
1108

    
1109
void helper_ltr_T0(void)
1110
{
1111
    int selector;
1112
    SegmentCache *dt;
1113
    uint32_t e1, e2;
1114
    int index, type;
1115
    uint8_t *ptr;
1116
    
1117
    selector = T0 & 0xffff;
1118
    if ((selector & 0xfffc) == 0) {
1119
        /* NULL selector case: invalid LDT */
1120
        env->tr.base = NULL;
1121
        env->tr.limit = 0;
1122
        env->tr.flags = 0;
1123
    } else {
1124
        if (selector & 0x4)
1125
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1126
        dt = &env->gdt;
1127
        index = selector & ~7;
1128
        if ((index + 7) > dt->limit)
1129
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1130
        ptr = dt->base + index;
1131
        e1 = ldl_kernel(ptr);
1132
        e2 = ldl_kernel(ptr + 4);
1133
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1134
        if ((e2 & DESC_S_MASK) || 
1135
            (type != 1 && type != 9))
1136
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1137
        if (!(e2 & DESC_P_MASK))
1138
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1139
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1140
        e2 |= DESC_TSS_BUSY_MASK;
1141
        stl_kernel(ptr + 4, e2);
1142
    }
1143
    env->tr.selector = selector;
1144
}
1145

    
1146
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1147
void load_seg(int seg_reg, int selector)
1148
{
1149
    uint32_t e1, e2;
1150
    int cpl, dpl, rpl;
1151
    SegmentCache *dt;
1152
    int index;
1153
    uint8_t *ptr;
1154

    
1155
    selector &= 0xffff;
1156
    if ((selector & 0xfffc) == 0) {
1157
        /* null selector case */
1158
        if (seg_reg == R_SS)
1159
            raise_exception_err(EXCP0D_GPF, 0);
1160
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1161
    } else {
1162
        
1163
        if (selector & 0x4)
1164
            dt = &env->ldt;
1165
        else
1166
            dt = &env->gdt;
1167
        index = selector & ~7;
1168
        if ((index + 7) > dt->limit)
1169
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1170
        ptr = dt->base + index;
1171
        e1 = ldl_kernel(ptr);
1172
        e2 = ldl_kernel(ptr + 4);
1173

    
1174
        if (!(e2 & DESC_S_MASK))
1175
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1176
        rpl = selector & 3;
1177
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
        cpl = env->hflags & HF_CPL_MASK;
1179
        if (seg_reg == R_SS) {
1180
            /* must be writable segment */
1181
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1182
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1183
            if (rpl != cpl || dpl != cpl)
1184
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1185
        } else {
1186
            /* must be readable segment */
1187
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1188
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1189
            
1190
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1191
                /* if not conforming code, test rights */
1192
                if (dpl < cpl || dpl < rpl)
1193
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1194
            }
1195
        }
1196

    
1197
        if (!(e2 & DESC_P_MASK)) {
1198
            if (seg_reg == R_SS)
1199
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1200
            else
1201
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1202
        }
1203

    
1204
        /* set the access bit if not already set */
1205
        if (!(e2 & DESC_A_MASK)) {
1206
            e2 |= DESC_A_MASK;
1207
            stl_kernel(ptr + 4, e2);
1208
        }
1209

    
1210
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1211
                       get_seg_base(e1, e2),
1212
                       get_seg_limit(e1, e2),
1213
                       e2);
1214
#if 0
1215
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1216
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1217
#endif
1218
    }
1219
}
1220

    
1221
/* protected mode jump */
1222
void helper_ljmp_protected_T0_T1(void)
1223
{
1224
    int new_cs, new_eip, gate_cs, type;
1225
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1226

    
1227
    new_cs = T0;
1228
    new_eip = T1;
1229
    if ((new_cs & 0xfffc) == 0)
1230
        raise_exception_err(EXCP0D_GPF, 0);
1231
    if (load_segment(&e1, &e2, new_cs) != 0)
1232
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1233
    cpl = env->hflags & HF_CPL_MASK;
1234
    if (e2 & DESC_S_MASK) {
1235
        if (!(e2 & DESC_CS_MASK))
1236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1237
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1238
        if (e2 & DESC_C_MASK) {
1239
            /* conforming code segment */
1240
            if (dpl > cpl)
1241
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1242
        } else {
1243
            /* non conforming code segment */
1244
            rpl = new_cs & 3;
1245
            if (rpl > cpl)
1246
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1247
            if (dpl != cpl)
1248
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1249
        }
1250
        if (!(e2 & DESC_P_MASK))
1251
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1252
        limit = get_seg_limit(e1, e2);
1253
        if (new_eip > limit)
1254
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1255
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1256
                       get_seg_base(e1, e2), limit, e2);
1257
        EIP = new_eip;
1258
    } else {
1259
        /* jump to call or task gate */
1260
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1261
        rpl = new_cs & 3;
1262
        cpl = env->hflags & HF_CPL_MASK;
1263
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1264
        switch(type) {
1265
        case 1: /* 286 TSS */
1266
        case 9: /* 386 TSS */
1267
        case 5: /* task gate */
1268
            if (dpl < cpl || dpl < rpl)
1269
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1270
            /* XXX: check if it is really the current EIP */
1271
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, env->eip);
1272
            break;
1273
        case 4: /* 286 call gate */
1274
        case 12: /* 386 call gate */
1275
            if ((dpl < cpl) || (dpl < rpl))
1276
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1277
            if (!(e2 & DESC_P_MASK))
1278
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1279
            gate_cs = e1 >> 16;
1280
            if (load_segment(&e1, &e2, gate_cs) != 0)
1281
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1282
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1283
            /* must be code segment */
1284
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1285
                 (DESC_S_MASK | DESC_CS_MASK)))
1286
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1287
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1288
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1289
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1290
            if (!(e2 & DESC_P_MASK))
1291
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1292
            new_eip = (e1 & 0xffff);
1293
            if (type == 12)
1294
                new_eip |= (e2 & 0xffff0000);
1295
            limit = get_seg_limit(e1, e2);
1296
            if (new_eip > limit)
1297
                raise_exception_err(EXCP0D_GPF, 0);
1298
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1299
                                   get_seg_base(e1, e2), limit, e2);
1300
            EIP = new_eip;
1301
            break;
1302
        default:
1303
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1304
            break;
1305
        }
1306
    }
1307
}
1308

    
1309
/* real mode call */
1310
void helper_lcall_real_T0_T1(int shift, int next_eip)
1311
{
1312
    int new_cs, new_eip;
1313
    uint32_t esp, esp_mask;
1314
    uint8_t *ssp;
1315

    
1316
    new_cs = T0;
1317
    new_eip = T1;
1318
    esp = ESP;
1319
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1320
    ssp = env->segs[R_SS].base;
1321
    if (shift) {
1322
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1323
        PUSHL(ssp, esp, esp_mask, next_eip);
1324
    } else {
1325
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1326
        PUSHW(ssp, esp, esp_mask, next_eip);
1327
    }
1328

    
1329
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1330
    env->eip = new_eip;
1331
    env->segs[R_CS].selector = new_cs;
1332
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1333
}
1334

    
1335
/* protected mode call */
1336
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1337
{
1338
    int new_cs, new_eip, new_stack, i;
1339
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1340
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1341
    uint32_t val, limit, old_sp_mask;
1342
    uint8_t *ssp, *old_ssp;
1343
    
1344
    new_cs = T0;
1345
    new_eip = T1;
1346
#ifdef DEBUG_PCALL
1347
    if (loglevel & CPU_LOG_PCALL) {
1348
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
1349
                new_cs, new_eip, shift);
1350
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1351
    }
1352
#endif
1353
    if ((new_cs & 0xfffc) == 0)
1354
        raise_exception_err(EXCP0D_GPF, 0);
1355
    if (load_segment(&e1, &e2, new_cs) != 0)
1356
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1357
    cpl = env->hflags & HF_CPL_MASK;
1358
#ifdef DEBUG_PCALL
1359
    if (loglevel & CPU_LOG_PCALL) {
1360
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1361
    }
1362
#endif
1363
    if (e2 & DESC_S_MASK) {
1364
        if (!(e2 & DESC_CS_MASK))
1365
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1366
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1367
        if (e2 & DESC_C_MASK) {
1368
            /* conforming code segment */
1369
            if (dpl > cpl)
1370
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1371
        } else {
1372
            /* non conforming code segment */
1373
            rpl = new_cs & 3;
1374
            if (rpl > cpl)
1375
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1376
            if (dpl != cpl)
1377
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1378
        }
1379
        if (!(e2 & DESC_P_MASK))
1380
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1381

    
1382
        sp = ESP;
1383
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1384
        ssp = env->segs[R_SS].base;
1385
        if (shift) {
1386
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1387
            PUSHL(ssp, sp, sp_mask, next_eip);
1388
        } else {
1389
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1390
            PUSHW(ssp, sp, sp_mask, next_eip);
1391
        }
1392
        
1393
        limit = get_seg_limit(e1, e2);
1394
        if (new_eip > limit)
1395
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1396
        /* from this point, not restartable */
1397
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1398
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1399
                       get_seg_base(e1, e2), limit, e2);
1400
        EIP = new_eip;
1401
    } else {
1402
        /* check gate type */
1403
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1404
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1405
        rpl = new_cs & 3;
1406
        switch(type) {
1407
        case 1: /* available 286 TSS */
1408
        case 9: /* available 386 TSS */
1409
        case 5: /* task gate */
1410
            if (dpl < cpl || dpl < rpl)
1411
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1412
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1413
            return;
1414
        case 4: /* 286 call gate */
1415
        case 12: /* 386 call gate */
1416
            break;
1417
        default:
1418
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1419
            break;
1420
        }
1421
        shift = type >> 3;
1422

    
1423
        if (dpl < cpl || dpl < rpl)
1424
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1425
        /* check valid bit */
1426
        if (!(e2 & DESC_P_MASK))
1427
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1428
        selector = e1 >> 16;
1429
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1430
        param_count = e2 & 0x1f;
1431
        if ((selector & 0xfffc) == 0)
1432
            raise_exception_err(EXCP0D_GPF, 0);
1433

    
1434
        if (load_segment(&e1, &e2, selector) != 0)
1435
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1436
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1437
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1438
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1439
        if (dpl > cpl)
1440
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1441
        if (!(e2 & DESC_P_MASK))
1442
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1443

    
1444
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1445
            /* to inner priviledge */
1446
            get_ss_esp_from_tss(&ss, &sp, dpl);
1447
#ifdef DEBUG_PCALL
1448
            if (loglevel & CPU_LOG_PCALL)
1449
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", 
1450
                        ss, sp, param_count, ESP);
1451
#endif
1452
            if ((ss & 0xfffc) == 0)
1453
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1454
            if ((ss & 3) != dpl)
1455
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1456
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1457
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1458
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1459
            if (ss_dpl != dpl)
1460
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1461
            if (!(ss_e2 & DESC_S_MASK) ||
1462
                (ss_e2 & DESC_CS_MASK) ||
1463
                !(ss_e2 & DESC_W_MASK))
1464
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1465
            if (!(ss_e2 & DESC_P_MASK))
1466
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1467
            
1468
            //            push_size = ((param_count * 2) + 8) << shift;
1469

    
1470
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1471
            old_ssp = env->segs[R_SS].base;
1472
            
1473
            sp_mask = get_sp_mask(ss_e2);
1474
            ssp = get_seg_base(ss_e1, ss_e2);
1475
            if (shift) {
1476
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1477
                PUSHL(ssp, sp, sp_mask, ESP);
1478
                for(i = param_count - 1; i >= 0; i--) {
1479
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1480
                    PUSHL(ssp, sp, sp_mask, val);
1481
                }
1482
            } else {
1483
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1484
                PUSHW(ssp, sp, sp_mask, ESP);
1485
                for(i = param_count - 1; i >= 0; i--) {
1486
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1487
                    PUSHW(ssp, sp, sp_mask, val);
1488
                }
1489
            }
1490
            new_stack = 1;
1491
        } else {
1492
            /* to same priviledge */
1493
            sp = ESP;
1494
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1495
            ssp = env->segs[R_SS].base;
1496
            //            push_size = (4 << shift);
1497
            new_stack = 0;
1498
        }
1499

    
1500
        if (shift) {
1501
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1502
            PUSHL(ssp, sp, sp_mask, next_eip);
1503
        } else {
1504
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1505
            PUSHW(ssp, sp, sp_mask, next_eip);
1506
        }
1507

    
1508
        /* from this point, not restartable */
1509

    
1510
        if (new_stack) {
1511
            ss = (ss & ~3) | dpl;
1512
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1513
                                   ssp,
1514
                                   get_seg_limit(ss_e1, ss_e2),
1515
                                   ss_e2);
1516
        }
1517

    
1518
        selector = (selector & ~3) | dpl;
1519
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1520
                       get_seg_base(e1, e2),
1521
                       get_seg_limit(e1, e2),
1522
                       e2);
1523
        cpu_x86_set_cpl(env, dpl);
1524
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1525
        EIP = offset;
1526
    }
1527
}
1528

    
1529
/* real and vm86 mode iret */
1530
void helper_iret_real(int shift)
1531
{
1532
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1533
    uint8_t *ssp;
1534
    int eflags_mask;
1535

    
1536
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1537
    sp = ESP;
1538
    ssp = env->segs[R_SS].base;
1539
    if (shift == 1) {
1540
        /* 32 bits */
1541
        POPL(ssp, sp, sp_mask, new_eip);
1542
        POPL(ssp, sp, sp_mask, new_cs);
1543
        new_cs &= 0xffff;
1544
        POPL(ssp, sp, sp_mask, new_eflags);
1545
    } else {
1546
        /* 16 bits */
1547
        POPW(ssp, sp, sp_mask, new_eip);
1548
        POPW(ssp, sp, sp_mask, new_cs);
1549
        POPW(ssp, sp, sp_mask, new_eflags);
1550
    }
1551
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1552
    load_seg_vm(R_CS, new_cs);
1553
    env->eip = new_eip;
1554
    if (env->eflags & VM_MASK)
1555
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1556
    else
1557
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1558
    if (shift == 0)
1559
        eflags_mask &= 0xffff;
1560
    load_eflags(new_eflags, eflags_mask);
1561
}
1562

    
1563
static inline void validate_seg(int seg_reg, int cpl)
1564
{
1565
    int dpl;
1566
    uint32_t e2;
1567
    
1568
    e2 = env->segs[seg_reg].flags;
1569
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1570
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1571
        /* data or non conforming code segment */
1572
        if (dpl < cpl) {
1573
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1574
        }
1575
    }
1576
}
1577

    
1578
/* protected mode iret */
1579
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1580
{
1581
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1582
    uint32_t new_es, new_ds, new_fs, new_gs;
1583
    uint32_t e1, e2, ss_e1, ss_e2;
1584
    int cpl, dpl, rpl, eflags_mask, iopl;
1585
    uint8_t *ssp;
1586
    
1587
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1588
    sp = ESP;
1589
    ssp = env->segs[R_SS].base;
1590
    if (shift == 1) {
1591
        /* 32 bits */
1592
        POPL(ssp, sp, sp_mask, new_eip);
1593
        POPL(ssp, sp, sp_mask, new_cs);
1594
        new_cs &= 0xffff;
1595
        if (is_iret) {
1596
            POPL(ssp, sp, sp_mask, new_eflags);
1597
            if (new_eflags & VM_MASK)
1598
                goto return_to_vm86;
1599
        }
1600
    } else {
1601
        /* 16 bits */
1602
        POPW(ssp, sp, sp_mask, new_eip);
1603
        POPW(ssp, sp, sp_mask, new_cs);
1604
        if (is_iret)
1605
            POPW(ssp, sp, sp_mask, new_eflags);
1606
    }
1607
#ifdef DEBUG_PCALL
1608
    if (loglevel & CPU_LOG_PCALL) {
1609
        fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1610
                new_cs, new_eip, shift, addend);
1611
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1612
    }
1613
#endif
1614
    if ((new_cs & 0xfffc) == 0)
1615
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1616
    if (load_segment(&e1, &e2, new_cs) != 0)
1617
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1618
    if (!(e2 & DESC_S_MASK) ||
1619
        !(e2 & DESC_CS_MASK))
1620
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1621
    cpl = env->hflags & HF_CPL_MASK;
1622
    rpl = new_cs & 3; 
1623
    if (rpl < cpl)
1624
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1625
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1626
    if (e2 & DESC_C_MASK) {
1627
        if (dpl > rpl)
1628
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1629
    } else {
1630
        if (dpl != rpl)
1631
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1632
    }
1633
    if (!(e2 & DESC_P_MASK))
1634
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1635
    
1636
    sp += addend;
1637
    if (rpl == cpl) {
1638
        /* return to same priledge level */
1639
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1640
                       get_seg_base(e1, e2),
1641
                       get_seg_limit(e1, e2),
1642
                       e2);
1643
    } else {
1644
        /* return to different priviledge level */
1645
        if (shift == 1) {
1646
            /* 32 bits */
1647
            POPL(ssp, sp, sp_mask, new_esp);
1648
            POPL(ssp, sp, sp_mask, new_ss);
1649
            new_ss &= 0xffff;
1650
        } else {
1651
            /* 16 bits */
1652
            POPW(ssp, sp, sp_mask, new_esp);
1653
            POPW(ssp, sp, sp_mask, new_ss);
1654
        }
1655
#ifdef DEBUG_PCALL
1656
        if (loglevel & CPU_LOG_PCALL) {
1657
            fprintf(logfile, "new ss:esp=%04x:%08x\n",
1658
                    new_ss, new_esp);
1659
        }
1660
#endif
1661
        
1662
        if ((new_ss & 3) != rpl)
1663
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1664
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1665
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1666
        if (!(ss_e2 & DESC_S_MASK) ||
1667
            (ss_e2 & DESC_CS_MASK) ||
1668
            !(ss_e2 & DESC_W_MASK))
1669
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1670
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1671
        if (dpl != rpl)
1672
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1673
        if (!(ss_e2 & DESC_P_MASK))
1674
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1675

    
1676
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1677
                       get_seg_base(e1, e2),
1678
                       get_seg_limit(e1, e2),
1679
                       e2);
1680
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1681
                       get_seg_base(ss_e1, ss_e2),
1682
                       get_seg_limit(ss_e1, ss_e2),
1683
                       ss_e2);
1684
        cpu_x86_set_cpl(env, rpl);
1685
        sp = new_esp;
1686
        sp_mask = get_sp_mask(ss_e2);
1687

    
1688
        /* validate data segments */
1689
        validate_seg(R_ES, cpl);
1690
        validate_seg(R_DS, cpl);
1691
        validate_seg(R_FS, cpl);
1692
        validate_seg(R_GS, cpl);
1693

    
1694
        sp += addend;
1695
    }
1696
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1697
    env->eip = new_eip;
1698
    if (is_iret) {
1699
        /* NOTE: 'cpl' is the _old_ CPL */
1700
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1701
        if (cpl == 0)
1702
            eflags_mask |= IOPL_MASK;
1703
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1704
        if (cpl <= iopl)
1705
            eflags_mask |= IF_MASK;
1706
        if (shift == 0)
1707
            eflags_mask &= 0xffff;
1708
        load_eflags(new_eflags, eflags_mask);
1709
    }
1710
    return;
1711

    
1712
 return_to_vm86:
1713
    POPL(ssp, sp, sp_mask, new_esp);
1714
    POPL(ssp, sp, sp_mask, new_ss);
1715
    POPL(ssp, sp, sp_mask, new_es);
1716
    POPL(ssp, sp, sp_mask, new_ds);
1717
    POPL(ssp, sp, sp_mask, new_fs);
1718
    POPL(ssp, sp, sp_mask, new_gs);
1719
    
1720
    /* modify processor state */
1721
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1722
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1723
    load_seg_vm(R_CS, new_cs & 0xffff);
1724
    cpu_x86_set_cpl(env, 3);
1725
    load_seg_vm(R_SS, new_ss & 0xffff);
1726
    load_seg_vm(R_ES, new_es & 0xffff);
1727
    load_seg_vm(R_DS, new_ds & 0xffff);
1728
    load_seg_vm(R_FS, new_fs & 0xffff);
1729
    load_seg_vm(R_GS, new_gs & 0xffff);
1730

    
1731
    env->eip = new_eip;
1732
    ESP = new_esp;
1733
}
1734

    
1735
void helper_iret_protected(int shift)
1736
{
1737
    int tss_selector, type;
1738
    uint32_t e1, e2;
1739
    
1740
    /* specific case for TSS */
1741
    if (env->eflags & NT_MASK) {
1742
        tss_selector = lduw_kernel(env->tr.base + 0);
1743
        if (tss_selector & 4)
1744
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1745
        if (load_segment(&e1, &e2, tss_selector) != 0)
1746
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1747
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1748
        /* NOTE: we check both segment and busy TSS */
1749
        if (type != 3)
1750
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1751
        /* XXX: check if it is really the current EIP */
1752
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, env->eip);
1753
    } else {
1754
        helper_ret_protected(shift, 1, 0);
1755
    }
1756
}
1757

    
1758
void helper_lret_protected(int shift, int addend)
1759
{
1760
    helper_ret_protected(shift, 0, addend);
1761
}
1762

    
1763
void helper_movl_crN_T0(int reg)
1764
{
1765
    switch(reg) {
1766
    case 0:
1767
        cpu_x86_update_cr0(env, T0);
1768
        break;
1769
    case 3:
1770
        cpu_x86_update_cr3(env, T0);
1771
        break;
1772
    case 4:
1773
        cpu_x86_update_cr4(env, T0);
1774
        break;
1775
    default:
1776
        env->cr[reg] = T0;
1777
        break;
1778
    }
1779
}
1780

    
1781
/* XXX: do more */
1782
void helper_movl_drN_T0(int reg)
1783
{
1784
    env->dr[reg] = T0;
1785
}
1786

    
1787
void helper_invlpg(unsigned int addr)
1788
{
1789
    cpu_x86_flush_tlb(env, addr);
1790
}
1791

    
1792
/* rdtsc */
1793
#if !defined(__i386__) && !defined(__x86_64__)
1794
uint64_t emu_time;
1795
#endif
1796

    
1797
void helper_rdtsc(void)
1798
{
1799
    uint64_t val;
1800
#if defined(__i386__) || defined(__x86_64__)
1801
    asm("rdtsc" : "=A" (val));
1802
#else
1803
    /* better than nothing: the time increases */
1804
    val = emu_time++;
1805
#endif
1806
    EAX = val;
1807
    EDX = val >> 32;
1808
}
1809

    
1810
void helper_wrmsr(void)
1811
{
1812
    switch(ECX) {
1813
    case MSR_IA32_SYSENTER_CS:
1814
        env->sysenter_cs = EAX & 0xffff;
1815
        break;
1816
    case MSR_IA32_SYSENTER_ESP:
1817
        env->sysenter_esp = EAX;
1818
        break;
1819
    case MSR_IA32_SYSENTER_EIP:
1820
        env->sysenter_eip = EAX;
1821
        break;
1822
    default:
1823
        /* XXX: exception ? */
1824
        break; 
1825
    }
1826
}
1827

    
1828
void helper_rdmsr(void)
1829
{
1830
    switch(ECX) {
1831
    case MSR_IA32_SYSENTER_CS:
1832
        EAX = env->sysenter_cs;
1833
        EDX = 0;
1834
        break;
1835
    case MSR_IA32_SYSENTER_ESP:
1836
        EAX = env->sysenter_esp;
1837
        EDX = 0;
1838
        break;
1839
    case MSR_IA32_SYSENTER_EIP:
1840
        EAX = env->sysenter_eip;
1841
        EDX = 0;
1842
        break;
1843
    default:
1844
        /* XXX: exception ? */
1845
        break; 
1846
    }
1847
}
1848

    
1849
void helper_lsl(void)
1850
{
1851
    unsigned int selector, limit;
1852
    uint32_t e1, e2;
1853
    int rpl, dpl, cpl, type;
1854

    
1855
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1856
    selector = T0 & 0xffff;
1857
    if (load_segment(&e1, &e2, selector) != 0)
1858
        return;
1859
    rpl = selector & 3;
1860
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1861
    cpl = env->hflags & HF_CPL_MASK;
1862
    if (e2 & DESC_S_MASK) {
1863
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1864
            /* conforming */
1865
        } else {
1866
            if (dpl < cpl || dpl < rpl)
1867
                return;
1868
        }
1869
    } else {
1870
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1871
        switch(type) {
1872
        case 1:
1873
        case 2:
1874
        case 3:
1875
        case 9:
1876
        case 11:
1877
            break;
1878
        default:
1879
            return;
1880
        }
1881
        if (dpl < cpl || dpl < rpl)
1882
            return;
1883
    }
1884
    limit = get_seg_limit(e1, e2);
1885
    T1 = limit;
1886
    CC_SRC |= CC_Z;
1887
}
1888

    
1889
void helper_lar(void)
1890
{
1891
    unsigned int selector;
1892
    uint32_t e1, e2;
1893
    int rpl, dpl, cpl, type;
1894

    
1895
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1896
    selector = T0 & 0xffff;
1897
    if ((selector & 0xfffc) == 0)
1898
        return;
1899
    if (load_segment(&e1, &e2, selector) != 0)
1900
        return;
1901
    rpl = selector & 3;
1902
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1903
    cpl = env->hflags & HF_CPL_MASK;
1904
    if (e2 & DESC_S_MASK) {
1905
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1906
            /* conforming */
1907
        } else {
1908
            if (dpl < cpl || dpl < rpl)
1909
                return;
1910
        }
1911
    } else {
1912
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1913
        switch(type) {
1914
        case 1:
1915
        case 2:
1916
        case 3:
1917
        case 4:
1918
        case 5:
1919
        case 9:
1920
        case 11:
1921
        case 12:
1922
            break;
1923
        default:
1924
            return;
1925
        }
1926
        if (dpl < cpl || dpl < rpl)
1927
            return;
1928
    }
1929
    T1 = e2 & 0x00f0ff00;
1930
    CC_SRC |= CC_Z;
1931
}
1932

    
1933
void helper_verr(void)
1934
{
1935
    unsigned int selector;
1936
    uint32_t e1, e2;
1937
    int rpl, dpl, cpl;
1938

    
1939
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1940
    selector = T0 & 0xffff;
1941
    if ((selector & 0xfffc) == 0)
1942
        return;
1943
    if (load_segment(&e1, &e2, selector) != 0)
1944
        return;
1945
    if (!(e2 & DESC_S_MASK))
1946
        return;
1947
    rpl = selector & 3;
1948
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1949
    cpl = env->hflags & HF_CPL_MASK;
1950
    if (e2 & DESC_CS_MASK) {
1951
        if (!(e2 & DESC_R_MASK))
1952
            return;
1953
        if (!(e2 & DESC_C_MASK)) {
1954
            if (dpl < cpl || dpl < rpl)
1955
                return;
1956
        }
1957
    } else {
1958
        if (dpl < cpl || dpl < rpl)
1959
            return;
1960
    }
1961
    CC_SRC |= CC_Z;
1962
}
1963

    
1964
void helper_verw(void)
1965
{
1966
    unsigned int selector;
1967
    uint32_t e1, e2;
1968
    int rpl, dpl, cpl;
1969

    
1970
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1971
    selector = T0 & 0xffff;
1972
    if ((selector & 0xfffc) == 0)
1973
        return;
1974
    if (load_segment(&e1, &e2, selector) != 0)
1975
        return;
1976
    if (!(e2 & DESC_S_MASK))
1977
        return;
1978
    rpl = selector & 3;
1979
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1980
    cpl = env->hflags & HF_CPL_MASK;
1981
    if (e2 & DESC_CS_MASK) {
1982
        return;
1983
    } else {
1984
        if (dpl < cpl || dpl < rpl)
1985
            return;
1986
        if (!(e2 & DESC_W_MASK))
1987
            return;
1988
    }
1989
    CC_SRC |= CC_Z;
1990
}
1991

    
1992
/* FPU helpers */
1993

    
1994
void helper_fldt_ST0_A0(void)
1995
{
1996
    int new_fpstt;
1997
    new_fpstt = (env->fpstt - 1) & 7;
1998
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1999
    env->fpstt = new_fpstt;
2000
    env->fptags[new_fpstt] = 0; /* validate stack entry */
2001
}
2002

    
2003
void helper_fstt_ST0_A0(void)
2004
{
2005
    helper_fstt(ST0, (uint8_t *)A0);
2006
}
2007

    
2008
/* BCD ops */
2009

    
2010
#define MUL10(iv) ( iv + iv + (iv << 3) )
2011

    
2012
void helper_fbld_ST0_A0(void)
2013
{
2014
    CPU86_LDouble tmp;
2015
    uint64_t val;
2016
    unsigned int v;
2017
    int i;
2018

    
2019
    val = 0;
2020
    for(i = 8; i >= 0; i--) {
2021
        v = ldub((uint8_t *)A0 + i);
2022
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2023
    }
2024
    tmp = val;
2025
    if (ldub((uint8_t *)A0 + 9) & 0x80)
2026
        tmp = -tmp;
2027
    fpush();
2028
    ST0 = tmp;
2029
}
2030

    
2031
void helper_fbst_ST0_A0(void)
2032
{
2033
    CPU86_LDouble tmp;
2034
    int v;
2035
    uint8_t *mem_ref, *mem_end;
2036
    int64_t val;
2037

    
2038
    tmp = rint(ST0);
2039
    val = (int64_t)tmp;
2040
    mem_ref = (uint8_t *)A0;
2041
    mem_end = mem_ref + 9;
2042
    if (val < 0) {
2043
        stb(mem_end, 0x80);
2044
        val = -val;
2045
    } else {
2046
        stb(mem_end, 0x00);
2047
    }
2048
    while (mem_ref < mem_end) {
2049
        if (val == 0)
2050
            break;
2051
        v = val % 100;
2052
        val = val / 100;
2053
        v = ((v / 10) << 4) | (v % 10);
2054
        stb(mem_ref++, v);
2055
    }
2056
    while (mem_ref < mem_end) {
2057
        stb(mem_ref++, 0);
2058
    }
2059
}
2060

    
2061
void helper_f2xm1(void)
2062
{
2063
    ST0 = pow(2.0,ST0) - 1.0;
2064
}
2065

    
2066
void helper_fyl2x(void)
2067
{
2068
    CPU86_LDouble fptemp;
2069
    
2070
    fptemp = ST0;
2071
    if (fptemp>0.0){
2072
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2073
        ST1 *= fptemp;
2074
        fpop();
2075
    } else { 
2076
        env->fpus &= (~0x4700);
2077
        env->fpus |= 0x400;
2078
    }
2079
}
2080

    
2081
void helper_fptan(void)
2082
{
2083
    CPU86_LDouble fptemp;
2084

    
2085
    fptemp = ST0;
2086
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2087
        env->fpus |= 0x400;
2088
    } else {
2089
        ST0 = tan(fptemp);
2090
        fpush();
2091
        ST0 = 1.0;
2092
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2093
        /* the above code is for  |arg| < 2**52 only */
2094
    }
2095
}
2096

    
2097
void helper_fpatan(void)
2098
{
2099
    CPU86_LDouble fptemp, fpsrcop;
2100

    
2101
    fpsrcop = ST1;
2102
    fptemp = ST0;
2103
    ST1 = atan2(fpsrcop,fptemp);
2104
    fpop();
2105
}
2106

    
2107
void helper_fxtract(void)
2108
{
2109
    CPU86_LDoubleU temp;
2110
    unsigned int expdif;
2111

    
2112
    temp.d = ST0;
2113
    expdif = EXPD(temp) - EXPBIAS;
2114
    /*DP exponent bias*/
2115
    ST0 = expdif;
2116
    fpush();
2117
    BIASEXPONENT(temp);
2118
    ST0 = temp.d;
2119
}
2120

    
2121
void helper_fprem1(void)
2122
{
2123
    CPU86_LDouble dblq, fpsrcop, fptemp;
2124
    CPU86_LDoubleU fpsrcop1, fptemp1;
2125
    int expdif;
2126
    int q;
2127

    
2128
    fpsrcop = ST0;
2129
    fptemp = ST1;
2130
    fpsrcop1.d = fpsrcop;
2131
    fptemp1.d = fptemp;
2132
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2133
    if (expdif < 53) {
2134
        dblq = fpsrcop / fptemp;
2135
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2136
        ST0 = fpsrcop - fptemp*dblq;
2137
        q = (int)dblq; /* cutting off top bits is assumed here */
2138
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2139
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2140
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2141
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2142
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2143
    } else {
2144
        env->fpus |= 0x400;  /* C2 <-- 1 */
2145
        fptemp = pow(2.0, expdif-50);
2146
        fpsrcop = (ST0 / ST1) / fptemp;
2147
        /* fpsrcop = integer obtained by rounding to the nearest */
2148
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2149
            floor(fpsrcop): ceil(fpsrcop);
2150
        ST0 -= (ST1 * fpsrcop * fptemp);
2151
    }
2152
}
2153

    
2154
void helper_fprem(void)
2155
{
2156
    CPU86_LDouble dblq, fpsrcop, fptemp;
2157
    CPU86_LDoubleU fpsrcop1, fptemp1;
2158
    int expdif;
2159
    int q;
2160
    
2161
    fpsrcop = ST0;
2162
    fptemp = ST1;
2163
    fpsrcop1.d = fpsrcop;
2164
    fptemp1.d = fptemp;
2165
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2166
    if ( expdif < 53 ) {
2167
        dblq = fpsrcop / fptemp;
2168
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2169
        ST0 = fpsrcop - fptemp*dblq;
2170
        q = (int)dblq; /* cutting off top bits is assumed here */
2171
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2172
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2173
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2174
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2175
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2176
    } else {
2177
        env->fpus |= 0x400;  /* C2 <-- 1 */
2178
        fptemp = pow(2.0, expdif-50);
2179
        fpsrcop = (ST0 / ST1) / fptemp;
2180
        /* fpsrcop = integer obtained by chopping */
2181
        fpsrcop = (fpsrcop < 0.0)?
2182
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2183
        ST0 -= (ST1 * fpsrcop * fptemp);
2184
    }
2185
}
2186

    
2187
void helper_fyl2xp1(void)
2188
{
2189
    CPU86_LDouble fptemp;
2190

    
2191
    fptemp = ST0;
2192
    if ((fptemp+1.0)>0.0) {
2193
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2194
        ST1 *= fptemp;
2195
        fpop();
2196
    } else { 
2197
        env->fpus &= (~0x4700);
2198
        env->fpus |= 0x400;
2199
    }
2200
}
2201

    
2202
void helper_fsqrt(void)
2203
{
2204
    CPU86_LDouble fptemp;
2205

    
2206
    fptemp = ST0;
2207
    if (fptemp<0.0) { 
2208
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2209
        env->fpus |= 0x400;
2210
    }
2211
    ST0 = sqrt(fptemp);
2212
}
2213

    
2214
void helper_fsincos(void)
2215
{
2216
    CPU86_LDouble fptemp;
2217

    
2218
    fptemp = ST0;
2219
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2220
        env->fpus |= 0x400;
2221
    } else {
2222
        ST0 = sin(fptemp);
2223
        fpush();
2224
        ST0 = cos(fptemp);
2225
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2226
        /* the above code is for  |arg| < 2**63 only */
2227
    }
2228
}
2229

    
2230
void helper_frndint(void)
2231
{
2232
    CPU86_LDouble a;
2233

    
2234
    a = ST0;
2235
#ifdef __arm__
2236
    switch(env->fpuc & RC_MASK) {
2237
    default:
2238
    case RC_NEAR:
2239
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2240
        break;
2241
    case RC_DOWN:
2242
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2243
        break;
2244
    case RC_UP:
2245
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2246
        break;
2247
    case RC_CHOP:
2248
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2249
        break;
2250
    }
2251
#else
2252
    a = rint(a);
2253
#endif
2254
    ST0 = a;
2255
}
2256

    
2257
void helper_fscale(void)
2258
{
2259
    CPU86_LDouble fpsrcop, fptemp;
2260

    
2261
    fpsrcop = 2.0;
2262
    fptemp = pow(fpsrcop,ST1);
2263
    ST0 *= fptemp;
2264
}
2265

    
2266
void helper_fsin(void)
2267
{
2268
    CPU86_LDouble fptemp;
2269

    
2270
    fptemp = ST0;
2271
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2272
        env->fpus |= 0x400;
2273
    } else {
2274
        ST0 = sin(fptemp);
2275
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2276
        /* the above code is for  |arg| < 2**53 only */
2277
    }
2278
}
2279

    
2280
void helper_fcos(void)
2281
{
2282
    CPU86_LDouble fptemp;
2283

    
2284
    fptemp = ST0;
2285
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2286
        env->fpus |= 0x400;
2287
    } else {
2288
        ST0 = cos(fptemp);
2289
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2290
        /* the above code is for  |arg5 < 2**63 only */
2291
    }
2292
}
2293

    
2294
void helper_fxam_ST0(void)
2295
{
2296
    CPU86_LDoubleU temp;
2297
    int expdif;
2298

    
2299
    temp.d = ST0;
2300

    
2301
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2302
    if (SIGND(temp))
2303
        env->fpus |= 0x200; /* C1 <-- 1 */
2304

    
2305
    expdif = EXPD(temp);
2306
    if (expdif == MAXEXPD) {
2307
        if (MANTD(temp) == 0)
2308
            env->fpus |=  0x500 /*Infinity*/;
2309
        else
2310
            env->fpus |=  0x100 /*NaN*/;
2311
    } else if (expdif == 0) {
2312
        if (MANTD(temp) == 0)
2313
            env->fpus |=  0x4000 /*Zero*/;
2314
        else
2315
            env->fpus |= 0x4400 /*Denormal*/;
2316
    } else {
2317
        env->fpus |= 0x400;
2318
    }
2319
}
2320

    
2321
void helper_fstenv(uint8_t *ptr, int data32)
2322
{
2323
    int fpus, fptag, exp, i;
2324
    uint64_t mant;
2325
    CPU86_LDoubleU tmp;
2326

    
2327
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2328
    fptag = 0;
2329
    for (i=7; i>=0; i--) {
2330
        fptag <<= 2;
2331
        if (env->fptags[i]) {
2332
            fptag |= 3;
2333
        } else {
2334
            tmp.d = env->fpregs[i];
2335
            exp = EXPD(tmp);
2336
            mant = MANTD(tmp);
2337
            if (exp == 0 && mant == 0) {
2338
                /* zero */
2339
                fptag |= 1;
2340
            } else if (exp == 0 || exp == MAXEXPD
2341
#ifdef USE_X86LDOUBLE
2342
                       || (mant & (1LL << 63)) == 0
2343
#endif
2344
                       ) {
2345
                /* NaNs, infinity, denormal */
2346
                fptag |= 2;
2347
            }
2348
        }
2349
    }
2350
    if (data32) {
2351
        /* 32 bit */
2352
        stl(ptr, env->fpuc);
2353
        stl(ptr + 4, fpus);
2354
        stl(ptr + 8, fptag);
2355
        stl(ptr + 12, 0); /* fpip */
2356
        stl(ptr + 16, 0); /* fpcs */
2357
        stl(ptr + 20, 0); /* fpoo */
2358
        stl(ptr + 24, 0); /* fpos */
2359
    } else {
2360
        /* 16 bit */
2361
        stw(ptr, env->fpuc);
2362
        stw(ptr + 2, fpus);
2363
        stw(ptr + 4, fptag);
2364
        stw(ptr + 6, 0);
2365
        stw(ptr + 8, 0);
2366
        stw(ptr + 10, 0);
2367
        stw(ptr + 12, 0);
2368
    }
2369
}
2370

    
2371
void helper_fldenv(uint8_t *ptr, int data32)
2372
{
2373
    int i, fpus, fptag;
2374

    
2375
    if (data32) {
2376
        env->fpuc = lduw(ptr);
2377
        fpus = lduw(ptr + 4);
2378
        fptag = lduw(ptr + 8);
2379
    }
2380
    else {
2381
        env->fpuc = lduw(ptr);
2382
        fpus = lduw(ptr + 2);
2383
        fptag = lduw(ptr + 4);
2384
    }
2385
    env->fpstt = (fpus >> 11) & 7;
2386
    env->fpus = fpus & ~0x3800;
2387
    for(i = 0;i < 8; i++) {
2388
        env->fptags[i] = ((fptag & 3) == 3);
2389
        fptag >>= 2;
2390
    }
2391
}
2392

    
2393
void helper_fsave(uint8_t *ptr, int data32)
2394
{
2395
    CPU86_LDouble tmp;
2396
    int i;
2397

    
2398
    helper_fstenv(ptr, data32);
2399

    
2400
    ptr += (14 << data32);
2401
    for(i = 0;i < 8; i++) {
2402
        tmp = ST(i);
2403
        helper_fstt(tmp, ptr);
2404
        ptr += 10;
2405
    }
2406

    
2407
    /* fninit */
2408
    env->fpus = 0;
2409
    env->fpstt = 0;
2410
    env->fpuc = 0x37f;
2411
    env->fptags[0] = 1;
2412
    env->fptags[1] = 1;
2413
    env->fptags[2] = 1;
2414
    env->fptags[3] = 1;
2415
    env->fptags[4] = 1;
2416
    env->fptags[5] = 1;
2417
    env->fptags[6] = 1;
2418
    env->fptags[7] = 1;
2419
}
2420

    
2421
void helper_frstor(uint8_t *ptr, int data32)
2422
{
2423
    CPU86_LDouble tmp;
2424
    int i;
2425

    
2426
    helper_fldenv(ptr, data32);
2427
    ptr += (14 << data32);
2428

    
2429
    for(i = 0;i < 8; i++) {
2430
        tmp = helper_fldt(ptr);
2431
        ST(i) = tmp;
2432
        ptr += 10;
2433
    }
2434
}
2435

    
2436
#if !defined(CONFIG_USER_ONLY) 
2437

    
2438
#define MMUSUFFIX _mmu
2439
#define GETPC() (__builtin_return_address(0))
2440

    
2441
#define SHIFT 0
2442
#include "softmmu_template.h"
2443

    
2444
#define SHIFT 1
2445
#include "softmmu_template.h"
2446

    
2447
#define SHIFT 2
2448
#include "softmmu_template.h"
2449

    
2450
#define SHIFT 3
2451
#include "softmmu_template.h"
2452

    
2453
#endif
2454

    
2455
/* try to fill the TLB and return an exception if error. If retaddr is
2456
   NULL, it means that the function was called in C code (i.e. not
2457
   from generated code or from helper.c) */
2458
/* XXX: fix it to restore all registers */
2459
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2460
{
2461
    TranslationBlock *tb;
2462
    int ret;
2463
    unsigned long pc;
2464
    CPUX86State *saved_env;
2465

    
2466
    /* XXX: hack to restore env in all cases, even if not called from
2467
       generated code */
2468
    saved_env = env;
2469
    env = cpu_single_env;
2470

    
2471
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2472
    if (ret) {
2473
        if (retaddr) {
2474
            /* now we have a real cpu fault */
2475
            pc = (unsigned long)retaddr;
2476
            tb = tb_find_pc(pc);
2477
            if (tb) {
2478
                /* the PC is inside the translated code. It means that we have
2479
                   a virtual CPU fault */
2480
                cpu_restore_state(tb, env, pc, NULL);
2481
            }
2482
        }
2483
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2484
    }
2485
    env = saved_env;
2486
}