Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 61a8c4ec

History | View | Annotate | Download (74.5 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28
    (raise_exception_err)(a, b);\
29
} while (0)
30
#endif
31

    
32
const uint8_t parity_table[256] = {
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
};
66

    
67
/* modulo 17 table */
68
const uint8_t rclw_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 9,10,11,12,13,14,15,
71
   16, 0, 1, 2, 3, 4, 5, 6,
72
    7, 8, 9,10,11,12,13,14,
73
};
74

    
75
/* modulo 9 table */
76
const uint8_t rclb_table[32] = {
77
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    8, 0, 1, 2, 3, 4, 5, 6,
79
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    6, 7, 8, 0, 1, 2, 3, 4,
81
};
82

    
83
const CPU86_LDouble f15rk[7] =
84
{
85
    0.00000000000000000000L,
86
    1.00000000000000000000L,
87
    3.14159265358979323851L,  /*pi*/
88
    0.30102999566398119523L,  /*lg2*/
89
    0.69314718055994530943L,  /*ln2*/
90
    1.44269504088896340739L,  /*l2e*/
91
    3.32192809488736234781L,  /*l2t*/
92
};
93
    
94
/* thread support */
95

    
96
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97

    
98
void cpu_lock(void)
99
{
100
    spin_lock(&global_cpu_lock);
101
}
102

    
103
void cpu_unlock(void)
104
{
105
    spin_unlock(&global_cpu_lock);
106
}
107

    
108
void cpu_loop_exit(void)
109
{
110
    /* NOTE: the register at this point must be saved by hand because
111
       longjmp restore them */
112
    regs_to_env();
113
    longjmp(env->jmp_env, 1);
114
}
115

    
116
/* return non zero if error */
117
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
118
                               int selector)
119
{
120
    SegmentCache *dt;
121
    int index;
122
    uint8_t *ptr;
123

    
124
    if (selector & 0x4)
125
        dt = &env->ldt;
126
    else
127
        dt = &env->gdt;
128
    index = selector & ~7;
129
    if ((index + 7) > dt->limit)
130
        return -1;
131
    ptr = dt->base + index;
132
    *e1_ptr = ldl_kernel(ptr);
133
    *e2_ptr = ldl_kernel(ptr + 4);
134
    return 0;
135
}
136
                                     
137
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
138
{
139
    unsigned int limit;
140
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
141
    if (e2 & DESC_G_MASK)
142
        limit = (limit << 12) | 0xfff;
143
    return limit;
144
}
145

    
146
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
147
{
148
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
149
}
150

    
151
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
152
{
153
    sc->base = get_seg_base(e1, e2);
154
    sc->limit = get_seg_limit(e1, e2);
155
    sc->flags = e2;
156
}
157

    
158
/* init the segment cache in vm86 mode. */
159
static inline void load_seg_vm(int seg, int selector)
160
{
161
    selector &= 0xffff;
162
    cpu_x86_load_seg_cache(env, seg, selector, 
163
                           (uint8_t *)(selector << 4), 0xffff, 0);
164
}
165

    
166
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
167
                                       uint32_t *esp_ptr, int dpl)
168
{
169
    int type, index, shift;
170
    
171
#if 0
172
    {
173
        int i;
174
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
175
        for(i=0;i<env->tr.limit;i++) {
176
            printf("%02x ", env->tr.base[i]);
177
            if ((i & 7) == 7) printf("\n");
178
        }
179
        printf("\n");
180
    }
181
#endif
182

    
183
    if (!(env->tr.flags & DESC_P_MASK))
184
        cpu_abort(env, "invalid tss");
185
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
186
    if ((type & 7) != 1)
187
        cpu_abort(env, "invalid tss type");
188
    shift = type >> 3;
189
    index = (dpl * 4 + 2) << shift;
190
    if (index + (4 << shift) - 1 > env->tr.limit)
191
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
192
    if (shift == 0) {
193
        *esp_ptr = lduw_kernel(env->tr.base + index);
194
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
195
    } else {
196
        *esp_ptr = ldl_kernel(env->tr.base + index);
197
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
198
    }
199
}
200

    
201
/* XXX: merge with load_seg() */
202
static void tss_load_seg(int seg_reg, int selector)
203
{
204
    uint32_t e1, e2;
205
    int rpl, dpl, cpl;
206

    
207
    if ((selector & 0xfffc) != 0) {
208
        if (load_segment(&e1, &e2, selector) != 0)
209
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
210
        if (!(e2 & DESC_S_MASK))
211
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
        rpl = selector & 3;
213
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
214
        cpl = env->hflags & HF_CPL_MASK;
215
        if (seg_reg == R_CS) {
216
            if (!(e2 & DESC_CS_MASK))
217
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
            if (dpl != rpl)
219
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220
            if ((e2 & DESC_C_MASK) && dpl > rpl)
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
                
223
        } else if (seg_reg == R_SS) {
224
            /* SS must be writable data */
225
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            if (dpl != cpl || dpl != rpl)
228
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        } else {
230
            /* not readable code */
231
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            /* if data or non conforming code, checks the rights */
234
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
235
                if (dpl < cpl || dpl < rpl)
236
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            }
238
        }
239
        if (!(e2 & DESC_P_MASK))
240
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
241
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
242
                       get_seg_base(e1, e2),
243
                       get_seg_limit(e1, e2),
244
                       e2);
245
    } else {
246
        if (seg_reg == R_SS || seg_reg == R_CS) 
247
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
    }
249
}
250

    
251
#define SWITCH_TSS_JMP  0
252
#define SWITCH_TSS_IRET 1
253
#define SWITCH_TSS_CALL 2
254

    
255
/* XXX: restore CPU state in registers (PowerPC case) */
256
static void switch_tss(int tss_selector, 
257
                       uint32_t e1, uint32_t e2, int source,
258
                       uint32_t next_eip)
259
{
260
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
261
    uint8_t *tss_base;
262
    uint32_t new_regs[8], new_segs[6];
263
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
264
    uint32_t old_eflags, eflags_mask;
265
    SegmentCache *dt;
266
    int index;
267
    uint8_t *ptr;
268

    
269
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
270
#ifdef DEBUG_PCALL
271
    if (loglevel & CPU_LOG_PCALL)
272
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
273
#endif
274

    
275
    /* if task gate, we read the TSS segment and we load it */
276
    if (type == 5) {
277
        if (!(e2 & DESC_P_MASK))
278
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
279
        tss_selector = e1 >> 16;
280
        if (tss_selector & 4)
281
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
282
        if (load_segment(&e1, &e2, tss_selector) != 0)
283
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284
        if (e2 & DESC_S_MASK)
285
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
286
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
287
        if ((type & 7) != 1)
288
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
289
    }
290

    
291
    if (!(e2 & DESC_P_MASK))
292
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
293

    
294
    if (type & 8)
295
        tss_limit_max = 103;
296
    else
297
        tss_limit_max = 43;
298
    tss_limit = get_seg_limit(e1, e2);
299
    tss_base = get_seg_base(e1, e2);
300
    if ((tss_selector & 4) != 0 || 
301
        tss_limit < tss_limit_max)
302
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
303
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
304
    if (old_type & 8)
305
        old_tss_limit_max = 103;
306
    else
307
        old_tss_limit_max = 43;
308

    
309
    /* read all the registers from the new TSS */
310
    if (type & 8) {
311
        /* 32 bit */
312
        new_cr3 = ldl_kernel(tss_base + 0x1c);
313
        new_eip = ldl_kernel(tss_base + 0x20);
314
        new_eflags = ldl_kernel(tss_base + 0x24);
315
        for(i = 0; i < 8; i++)
316
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
317
        for(i = 0; i < 6; i++)
318
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
319
        new_ldt = lduw_kernel(tss_base + 0x60);
320
        new_trap = ldl_kernel(tss_base + 0x64);
321
    } else {
322
        /* 16 bit */
323
        new_cr3 = 0;
324
        new_eip = lduw_kernel(tss_base + 0x0e);
325
        new_eflags = lduw_kernel(tss_base + 0x10);
326
        for(i = 0; i < 8; i++)
327
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
328
        for(i = 0; i < 4; i++)
329
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
330
        new_ldt = lduw_kernel(tss_base + 0x2a);
331
        new_segs[R_FS] = 0;
332
        new_segs[R_GS] = 0;
333
        new_trap = 0;
334
    }
335
    
336
    /* NOTE: we must avoid memory exceptions during the task switch,
337
       so we make dummy accesses before */
338
    /* XXX: it can still fail in some cases, so a bigger hack is
339
       necessary to valid the TLB after having done the accesses */
340

    
341
    v1 = ldub_kernel(env->tr.base);
342
    v2 = ldub(env->tr.base + old_tss_limit_max);
343
    stb_kernel(env->tr.base, v1);
344
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
345
    
346
    /* clear busy bit (it is restartable) */
347
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
348
        uint8_t *ptr;
349
        uint32_t e2;
350
        ptr = env->gdt.base + (env->tr.selector & ~7);
351
        e2 = ldl_kernel(ptr + 4);
352
        e2 &= ~DESC_TSS_BUSY_MASK;
353
        stl_kernel(ptr + 4, e2);
354
    }
355
    old_eflags = compute_eflags();
356
    if (source == SWITCH_TSS_IRET)
357
        old_eflags &= ~NT_MASK;
358
    
359
    /* save the current state in the old TSS */
360
    if (type & 8) {
361
        /* 32 bit */
362
        stl_kernel(env->tr.base + 0x20, next_eip);
363
        stl_kernel(env->tr.base + 0x24, old_eflags);
364
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
365
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
366
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
367
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
368
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
369
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
370
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
371
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
372
        for(i = 0; i < 6; i++)
373
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
374
    } else {
375
        /* 16 bit */
376
        stw_kernel(env->tr.base + 0x0e, next_eip);
377
        stw_kernel(env->tr.base + 0x10, old_eflags);
378
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
379
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
380
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
381
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
382
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
383
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
384
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
385
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
386
        for(i = 0; i < 4; i++)
387
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
388
    }
389
    
390
    /* now if an exception occurs, it will occurs in the next task
391
       context */
392

    
393
    if (source == SWITCH_TSS_CALL) {
394
        stw_kernel(tss_base, env->tr.selector);
395
        new_eflags |= NT_MASK;
396
    }
397

    
398
    /* set busy bit */
399
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
400
        uint8_t *ptr;
401
        uint32_t e2;
402
        ptr = env->gdt.base + (tss_selector & ~7);
403
        e2 = ldl_kernel(ptr + 4);
404
        e2 |= DESC_TSS_BUSY_MASK;
405
        stl_kernel(ptr + 4, e2);
406
    }
407

    
408
    /* set the new CPU state */
409
    /* from this point, any exception which occurs can give problems */
410
    env->cr[0] |= CR0_TS_MASK;
411
    env->hflags |= HF_TS_MASK;
412
    env->tr.selector = tss_selector;
413
    env->tr.base = tss_base;
414
    env->tr.limit = tss_limit;
415
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
416
    
417
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
418
        cpu_x86_update_cr3(env, new_cr3);
419
    }
420
    
421
    /* load all registers without an exception, then reload them with
422
       possible exception */
423
    env->eip = new_eip;
424
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
425
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
426
    if (!(type & 8))
427
        eflags_mask &= 0xffff;
428
    load_eflags(new_eflags, eflags_mask);
429
    /* XXX: what to do in 16 bit case ? */
430
    EAX = new_regs[0];
431
    ECX = new_regs[1];
432
    EDX = new_regs[2];
433
    EBX = new_regs[3];
434
    ESP = new_regs[4];
435
    EBP = new_regs[5];
436
    ESI = new_regs[6];
437
    EDI = new_regs[7];
438
    if (new_eflags & VM_MASK) {
439
        for(i = 0; i < 6; i++) 
440
            load_seg_vm(i, new_segs[i]);
441
        /* in vm86, CPL is always 3 */
442
        cpu_x86_set_cpl(env, 3);
443
    } else {
444
        /* CPL is set the RPL of CS */
445
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
446
        /* first just selectors as the rest may trigger exceptions */
447
        for(i = 0; i < 6; i++)
448
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
449
    }
450
    
451
    env->ldt.selector = new_ldt & ~4;
452
    env->ldt.base = NULL;
453
    env->ldt.limit = 0;
454
    env->ldt.flags = 0;
455

    
456
    /* load the LDT */
457
    if (new_ldt & 4)
458
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459

    
460
    if ((new_ldt & 0xfffc) != 0) {
461
        dt = &env->gdt;
462
        index = new_ldt & ~7;
463
        if ((index + 7) > dt->limit)
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        ptr = dt->base + index;
466
        e1 = ldl_kernel(ptr);
467
        e2 = ldl_kernel(ptr + 4);
468
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
469
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
470
        if (!(e2 & DESC_P_MASK))
471
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
472
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
473
    }
474
    
475
    /* load the segments */
476
    if (!(new_eflags & VM_MASK)) {
477
        tss_load_seg(R_CS, new_segs[R_CS]);
478
        tss_load_seg(R_SS, new_segs[R_SS]);
479
        tss_load_seg(R_ES, new_segs[R_ES]);
480
        tss_load_seg(R_DS, new_segs[R_DS]);
481
        tss_load_seg(R_FS, new_segs[R_FS]);
482
        tss_load_seg(R_GS, new_segs[R_GS]);
483
    }
484
    
485
    /* check that EIP is in the CS segment limits */
486
    if (new_eip > env->segs[R_CS].limit) {
487
        /* XXX: different exception if CALL ? */
488
        raise_exception_err(EXCP0D_GPF, 0);
489
    }
490
}
491

    
492
/* check if Port I/O is allowed in TSS */
493
static inline void check_io(int addr, int size)
494
{
495
    int io_offset, val, mask;
496
    
497
    /* TSS must be a valid 32 bit one */
498
    if (!(env->tr.flags & DESC_P_MASK) ||
499
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
500
        env->tr.limit < 103)
501
        goto fail;
502
    io_offset = lduw_kernel(env->tr.base + 0x66);
503
    io_offset += (addr >> 3);
504
    /* Note: the check needs two bytes */
505
    if ((io_offset + 1) > env->tr.limit)
506
        goto fail;
507
    val = lduw_kernel(env->tr.base + io_offset);
508
    val >>= (addr & 7);
509
    mask = (1 << size) - 1;
510
    /* all bits must be zero to allow the I/O */
511
    if ((val & mask) != 0) {
512
    fail:
513
        raise_exception_err(EXCP0D_GPF, 0);
514
    }
515
}
516

    
517
void check_iob_T0(void)
518
{
519
    check_io(T0, 1);
520
}
521

    
522
void check_iow_T0(void)
523
{
524
    check_io(T0, 2);
525
}
526

    
527
void check_iol_T0(void)
528
{
529
    check_io(T0, 4);
530
}
531

    
532
void check_iob_DX(void)
533
{
534
    check_io(EDX & 0xffff, 1);
535
}
536

    
537
void check_iow_DX(void)
538
{
539
    check_io(EDX & 0xffff, 2);
540
}
541

    
542
void check_iol_DX(void)
543
{
544
    check_io(EDX & 0xffff, 4);
545
}
546

    
547
static inline unsigned int get_sp_mask(unsigned int e2)
548
{
549
    if (e2 & DESC_B_MASK)
550
        return 0xffffffff;
551
    else
552
        return 0xffff;
553
}
554

    
555
/* XXX: add a is_user flag to have proper security support */
556
#define PUSHW(ssp, sp, sp_mask, val)\
557
{\
558
    sp -= 2;\
559
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
560
}
561

    
562
#define PUSHL(ssp, sp, sp_mask, val)\
563
{\
564
    sp -= 4;\
565
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
566
}
567

    
568
#define POPW(ssp, sp, sp_mask, val)\
569
{\
570
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
571
    sp += 2;\
572
}
573

    
574
#define POPL(ssp, sp, sp_mask, val)\
575
{\
576
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
577
    sp += 4;\
578
}
579

    
580
/* protected mode interrupt */
581
static void do_interrupt_protected(int intno, int is_int, int error_code,
582
                                   unsigned int next_eip, int is_hw)
583
{
584
    SegmentCache *dt;
585
    uint8_t *ptr, *ssp;
586
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
587
    int has_error_code, new_stack, shift;
588
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
589
    uint32_t old_eip;
590

    
591
    has_error_code = 0;
592
    if (!is_int && !is_hw) {
593
        switch(intno) {
594
        case 8:
595
        case 10:
596
        case 11:
597
        case 12:
598
        case 13:
599
        case 14:
600
        case 17:
601
            has_error_code = 1;
602
            break;
603
        }
604
    }
605
    if (is_int)
606
        old_eip = next_eip;
607
    else
608
        old_eip = env->eip;
609

    
610
    dt = &env->idt;
611
    if (intno * 8 + 7 > dt->limit)
612
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
613
    ptr = dt->base + intno * 8;
614
    e1 = ldl_kernel(ptr);
615
    e2 = ldl_kernel(ptr + 4);
616
    /* check gate type */
617
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
618
    switch(type) {
619
    case 5: /* task gate */
620
        /* must do that check here to return the correct error code */
621
        if (!(e2 & DESC_P_MASK))
622
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
623
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
624
        if (has_error_code) {
625
            int mask;
626
            /* push the error code */
627
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
628
            if (env->segs[R_SS].flags & DESC_B_MASK)
629
                mask = 0xffffffff;
630
            else
631
                mask = 0xffff;
632
            esp = (ESP - (2 << shift)) & mask;
633
            ssp = env->segs[R_SS].base + esp;
634
            if (shift)
635
                stl_kernel(ssp, error_code);
636
            else
637
                stw_kernel(ssp, error_code);
638
            ESP = (esp & mask) | (ESP & ~mask);
639
        }
640
        return;
641
    case 6: /* 286 interrupt gate */
642
    case 7: /* 286 trap gate */
643
    case 14: /* 386 interrupt gate */
644
    case 15: /* 386 trap gate */
645
        break;
646
    default:
647
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648
        break;
649
    }
650
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
651
    cpl = env->hflags & HF_CPL_MASK;
652
    /* check privledge if software int */
653
    if (is_int && dpl < cpl)
654
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655
    /* check valid bit */
656
    if (!(e2 & DESC_P_MASK))
657
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658
    selector = e1 >> 16;
659
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
660
    if ((selector & 0xfffc) == 0)
661
        raise_exception_err(EXCP0D_GPF, 0);
662

    
663
    if (load_segment(&e1, &e2, selector) != 0)
664
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
665
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
666
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
667
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668
    if (dpl > cpl)
669
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
670
    if (!(e2 & DESC_P_MASK))
671
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
672
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
673
        /* to inner priviledge */
674
        get_ss_esp_from_tss(&ss, &esp, dpl);
675
        if ((ss & 0xfffc) == 0)
676
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
677
        if ((ss & 3) != dpl)
678
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
679
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
680
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
682
        if (ss_dpl != dpl)
683
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
684
        if (!(ss_e2 & DESC_S_MASK) ||
685
            (ss_e2 & DESC_CS_MASK) ||
686
            !(ss_e2 & DESC_W_MASK))
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (!(ss_e2 & DESC_P_MASK))
689
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
690
        new_stack = 1;
691
        sp_mask = get_sp_mask(ss_e2);
692
        ssp = get_seg_base(ss_e1, ss_e2);
693
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
694
        /* to same priviledge */
695
        if (env->eflags & VM_MASK)
696
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
697
        new_stack = 0;
698
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
699
        ssp = env->segs[R_SS].base;
700
        esp = ESP;
701
        dpl = cpl;
702
    } else {
703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
        new_stack = 0; /* avoid warning */
705
        sp_mask = 0; /* avoid warning */
706
        ssp = NULL; /* avoid warning */
707
        esp = 0; /* avoid warning */
708
    }
709

    
710
    shift = type >> 3;
711

    
712
#if 0
713
    /* XXX: check that enough room is available */
714
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
715
    if (env->eflags & VM_MASK)
716
        push_size += 8;
717
    push_size <<= shift;
718
#endif
719
    if (shift == 1) {
720
        if (new_stack) {
721
            if (env->eflags & VM_MASK) {
722
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
723
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
724
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
725
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
726
            }
727
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
728
            PUSHL(ssp, esp, sp_mask, ESP);
729
        }
730
        PUSHL(ssp, esp, sp_mask, compute_eflags());
731
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
732
        PUSHL(ssp, esp, sp_mask, old_eip);
733
        if (has_error_code) {
734
            PUSHL(ssp, esp, sp_mask, error_code);
735
        }
736
    } else {
737
        if (new_stack) {
738
            if (env->eflags & VM_MASK) {
739
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
740
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
741
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
742
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
743
            }
744
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
745
            PUSHW(ssp, esp, sp_mask, ESP);
746
        }
747
        PUSHW(ssp, esp, sp_mask, compute_eflags());
748
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
749
        PUSHW(ssp, esp, sp_mask, old_eip);
750
        if (has_error_code) {
751
            PUSHW(ssp, esp, sp_mask, error_code);
752
        }
753
    }
754
    
755
    if (new_stack) {
756
        if (env->eflags & VM_MASK) {
757
            cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
758
            cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
759
            cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
760
            cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
761
        }
762
        ss = (ss & ~3) | dpl;
763
        cpu_x86_load_seg_cache(env, R_SS, ss, 
764
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
765
    }
766
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
767

    
768
    selector = (selector & ~3) | dpl;
769
    cpu_x86_load_seg_cache(env, R_CS, selector, 
770
                   get_seg_base(e1, e2),
771
                   get_seg_limit(e1, e2),
772
                   e2);
773
    cpu_x86_set_cpl(env, dpl);
774
    env->eip = offset;
775

    
776
    /* interrupt gate clear IF mask */
777
    if ((type & 1) == 0) {
778
        env->eflags &= ~IF_MASK;
779
    }
780
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
781
}
782

    
783
/* real mode interrupt */
784
static void do_interrupt_real(int intno, int is_int, int error_code,
785
                              unsigned int next_eip)
786
{
787
    SegmentCache *dt;
788
    uint8_t *ptr, *ssp;
789
    int selector;
790
    uint32_t offset, esp;
791
    uint32_t old_cs, old_eip;
792

    
793
    /* real mode (simpler !) */
794
    dt = &env->idt;
795
    if (intno * 4 + 3 > dt->limit)
796
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
797
    ptr = dt->base + intno * 4;
798
    offset = lduw_kernel(ptr);
799
    selector = lduw_kernel(ptr + 2);
800
    esp = ESP;
801
    ssp = env->segs[R_SS].base;
802
    if (is_int)
803
        old_eip = next_eip;
804
    else
805
        old_eip = env->eip;
806
    old_cs = env->segs[R_CS].selector;
807
    /* XXX: use SS segment size ? */
808
    PUSHW(ssp, esp, 0xffff, compute_eflags());
809
    PUSHW(ssp, esp, 0xffff, old_cs);
810
    PUSHW(ssp, esp, 0xffff, old_eip);
811
    
812
    /* update processor state */
813
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
814
    env->eip = offset;
815
    env->segs[R_CS].selector = selector;
816
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
817
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
818
}
819

    
820
/* fake user mode interrupt */
821
void do_interrupt_user(int intno, int is_int, int error_code, 
822
                       unsigned int next_eip)
823
{
824
    SegmentCache *dt;
825
    uint8_t *ptr;
826
    int dpl, cpl;
827
    uint32_t e2;
828

    
829
    dt = &env->idt;
830
    ptr = dt->base + (intno * 8);
831
    e2 = ldl_kernel(ptr + 4);
832
    
833
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
834
    cpl = env->hflags & HF_CPL_MASK;
835
    /* check privledge if software int */
836
    if (is_int && dpl < cpl)
837
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838

    
839
    /* Since we emulate only user space, we cannot do more than
840
       exiting the emulation with the suitable exception and error
841
       code */
842
    if (is_int)
843
        EIP = next_eip;
844
}
845

    
846
/*
847
 * Begin execution of an interruption. is_int is TRUE if coming from
848
 * the int instruction. next_eip is the EIP value AFTER the interrupt
849
 * instruction. It is only relevant if is_int is TRUE.  
850
 */
851
void do_interrupt(int intno, int is_int, int error_code, 
852
                  unsigned int next_eip, int is_hw)
853
{
854
#ifdef DEBUG_PCALL
855
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
856
        if ((env->cr[0] & CR0_PE_MASK)) {
857
            static int count;
858
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
859
                    count, intno, error_code, is_int,
860
                    env->hflags & HF_CPL_MASK,
861
                    env->segs[R_CS].selector, EIP,
862
                    (int)env->segs[R_CS].base + EIP,
863
                    env->segs[R_SS].selector, ESP);
864
            if (intno == 0x0e) {
865
                fprintf(logfile, " CR2=%08x", env->cr[2]);
866
            } else {
867
                fprintf(logfile, " EAX=%08x", EAX);
868
            }
869
            fprintf(logfile, "\n");
870
#if 0
871
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
872
            {
873
                int i;
874
                uint8_t *ptr;
875
                fprintf(logfile, "       code=");
876
                ptr = env->segs[R_CS].base + env->eip;
877
                for(i = 0; i < 16; i++) {
878
                    fprintf(logfile, " %02x", ldub(ptr + i));
879
                }
880
                fprintf(logfile, "\n");
881
            }
882
#endif
883
            count++;
884
        }
885
    }
886
#endif
887
    if (env->cr[0] & CR0_PE_MASK) {
888
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
889
    } else {
890
        do_interrupt_real(intno, is_int, error_code, next_eip);
891
    }
892
}
893

    
894
/*
895
 * Signal an interruption. It is executed in the main CPU loop.
896
 * is_int is TRUE if coming from the int instruction. next_eip is the
897
 * EIP value AFTER the interrupt instruction. It is only relevant if
898
 * is_int is TRUE.  
899
 */
900
void raise_interrupt(int intno, int is_int, int error_code, 
901
                     unsigned int next_eip)
902
{
903
    env->exception_index = intno;
904
    env->error_code = error_code;
905
    env->exception_is_int = is_int;
906
    env->exception_next_eip = next_eip;
907
    cpu_loop_exit();
908
}
909

    
910
/* same as raise_exception_err, but do not restore global registers */
911
static void raise_exception_err_norestore(int exception_index, int error_code)
912
{
913
    env->exception_index = exception_index;
914
    env->error_code = error_code;
915
    env->exception_is_int = 0;
916
    env->exception_next_eip = 0;
917
    longjmp(env->jmp_env, 1);
918
}
919

    
920
/* shortcuts to generate exceptions */
921

    
922
void (raise_exception_err)(int exception_index, int error_code)
923
{
924
    raise_interrupt(exception_index, 0, error_code, 0);
925
}
926

    
927
void raise_exception(int exception_index)
928
{
929
    raise_interrupt(exception_index, 0, 0, 0);
930
}
931

    
932
#ifdef BUGGY_GCC_DIV64
933
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
934
   call it from another function */
935
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
936
{
937
    *q_ptr = num / den;
938
    return num % den;
939
}
940

    
941
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
942
{
943
    *q_ptr = num / den;
944
    return num % den;
945
}
946
#endif
947

    
948
void helper_divl_EAX_T0(uint32_t eip)
949
{
950
    unsigned int den, q, r;
951
    uint64_t num;
952
    
953
    num = EAX | ((uint64_t)EDX << 32);
954
    den = T0;
955
    if (den == 0) {
956
        EIP = eip;
957
        raise_exception(EXCP00_DIVZ);
958
    }
959
#ifdef BUGGY_GCC_DIV64
960
    r = div64(&q, num, den);
961
#else
962
    q = (num / den);
963
    r = (num % den);
964
#endif
965
    EAX = q;
966
    EDX = r;
967
}
968

    
969
void helper_idivl_EAX_T0(uint32_t eip)
970
{
971
    int den, q, r;
972
    int64_t num;
973
    
974
    num = EAX | ((uint64_t)EDX << 32);
975
    den = T0;
976
    if (den == 0) {
977
        EIP = eip;
978
        raise_exception(EXCP00_DIVZ);
979
    }
980
#ifdef BUGGY_GCC_DIV64
981
    r = idiv64(&q, num, den);
982
#else
983
    q = (num / den);
984
    r = (num % den);
985
#endif
986
    EAX = q;
987
    EDX = r;
988
}
989

    
990
void helper_cmpxchg8b(void)
991
{
992
    uint64_t d;
993
    int eflags;
994

    
995
    eflags = cc_table[CC_OP].compute_all();
996
    d = ldq((uint8_t *)A0);
997
    if (d == (((uint64_t)EDX << 32) | EAX)) {
998
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
999
        eflags |= CC_Z;
1000
    } else {
1001
        EDX = d >> 32;
1002
        EAX = d;
1003
        eflags &= ~CC_Z;
1004
    }
1005
    CC_SRC = eflags;
1006
}
1007

    
1008
#define CPUID_FP87 (1 << 0)
1009
#define CPUID_VME  (1 << 1)
1010
#define CPUID_DE   (1 << 2)
1011
#define CPUID_PSE  (1 << 3)
1012
#define CPUID_TSC  (1 << 4)
1013
#define CPUID_MSR  (1 << 5)
1014
#define CPUID_PAE  (1 << 6)
1015
#define CPUID_MCE  (1 << 7)
1016
#define CPUID_CX8  (1 << 8)
1017
#define CPUID_APIC (1 << 9)
1018
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1019
#define CPUID_MTRR (1 << 12)
1020
#define CPUID_PGE  (1 << 13)
1021
#define CPUID_MCA  (1 << 14)
1022
#define CPUID_CMOV (1 << 15)
1023
/* ... */
1024
#define CPUID_MMX  (1 << 23)
1025
#define CPUID_FXSR (1 << 24)
1026
#define CPUID_SSE  (1 << 25)
1027
#define CPUID_SSE2 (1 << 26)
1028

    
1029
void helper_cpuid(void)
1030
{
1031
    switch(EAX) {
1032
    case 0:
1033
        EAX = 2; /* max EAX index supported */
1034
        EBX = 0x756e6547;
1035
        ECX = 0x6c65746e;
1036
        EDX = 0x49656e69;
1037
        break;
1038
    case 1:
1039
        {
1040
            int family, model, stepping;
1041
            /* EAX = 1 info */
1042
#if 0
1043
            /* pentium 75-200 */
1044
            family = 5;
1045
            model = 2;
1046
            stepping = 11;
1047
#else
1048
            /* pentium pro */
1049
            family = 6;
1050
            model = 1;
1051
            stepping = 3;
1052
#endif
1053
            EAX = (family << 8) | (model << 4) | stepping;
1054
            EBX = 0;
1055
            ECX = 0;
1056
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1057
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1058
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1059
        }
1060
        break;
1061
    default:
1062
        /* cache info: needed for Pentium Pro compatibility */
1063
        EAX = 0x410601;
1064
        EBX = 0;
1065
        ECX = 0;
1066
        EDX = 0;
1067
        break;
1068
    }
1069
}
1070

    
1071
void helper_enter_level(int level, int data32)
1072
{
1073
    uint8_t *ssp;
1074
    uint32_t esp_mask, esp, ebp;
1075

    
1076
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1077
    ssp = env->segs[R_SS].base;
1078
    ebp = EBP;
1079
    esp = ESP;
1080
    if (data32) {
1081
        /* 32 bit */
1082
        esp -= 4;
1083
        while (--level) {
1084
            esp -= 4;
1085
            ebp -= 4;
1086
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1087
        }
1088
        esp -= 4;
1089
        stl(ssp + (esp & esp_mask), T1);
1090
    } else {
1091
        /* 16 bit */
1092
        esp -= 2;
1093
        while (--level) {
1094
            esp -= 2;
1095
            ebp -= 2;
1096
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1097
        }
1098
        esp -= 2;
1099
        stw(ssp + (esp & esp_mask), T1);
1100
    }
1101
}
1102

    
1103
void helper_lldt_T0(void)
1104
{
1105
    int selector;
1106
    SegmentCache *dt;
1107
    uint32_t e1, e2;
1108
    int index;
1109
    uint8_t *ptr;
1110
    
1111
    selector = T0 & 0xffff;
1112
    if ((selector & 0xfffc) == 0) {
1113
        /* XXX: NULL selector case: invalid LDT */
1114
        env->ldt.base = NULL;
1115
        env->ldt.limit = 0;
1116
    } else {
1117
        if (selector & 0x4)
1118
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1119
        dt = &env->gdt;
1120
        index = selector & ~7;
1121
        if ((index + 7) > dt->limit)
1122
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1123
        ptr = dt->base + index;
1124
        e1 = ldl_kernel(ptr);
1125
        e2 = ldl_kernel(ptr + 4);
1126
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1127
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1128
        if (!(e2 & DESC_P_MASK))
1129
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1130
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1131
    }
1132
    env->ldt.selector = selector;
1133
}
1134

    
1135
void helper_ltr_T0(void)
1136
{
1137
    int selector;
1138
    SegmentCache *dt;
1139
    uint32_t e1, e2;
1140
    int index, type;
1141
    uint8_t *ptr;
1142
    
1143
    selector = T0 & 0xffff;
1144
    if ((selector & 0xfffc) == 0) {
1145
        /* NULL selector case: invalid LDT */
1146
        env->tr.base = NULL;
1147
        env->tr.limit = 0;
1148
        env->tr.flags = 0;
1149
    } else {
1150
        if (selector & 0x4)
1151
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1152
        dt = &env->gdt;
1153
        index = selector & ~7;
1154
        if ((index + 7) > dt->limit)
1155
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1156
        ptr = dt->base + index;
1157
        e1 = ldl_kernel(ptr);
1158
        e2 = ldl_kernel(ptr + 4);
1159
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1160
        if ((e2 & DESC_S_MASK) || 
1161
            (type != 1 && type != 9))
1162
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1163
        if (!(e2 & DESC_P_MASK))
1164
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1165
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1166
        e2 |= DESC_TSS_BUSY_MASK;
1167
        stl_kernel(ptr + 4, e2);
1168
    }
1169
    env->tr.selector = selector;
1170
}
1171

    
1172
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1173
void load_seg(int seg_reg, int selector)
1174
{
1175
    uint32_t e1, e2;
1176
    int cpl, dpl, rpl;
1177
    SegmentCache *dt;
1178
    int index;
1179
    uint8_t *ptr;
1180

    
1181
    selector &= 0xffff;
1182
    if ((selector & 0xfffc) == 0) {
1183
        /* null selector case */
1184
        if (seg_reg == R_SS)
1185
            raise_exception_err(EXCP0D_GPF, 0);
1186
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1187
    } else {
1188
        
1189
        if (selector & 0x4)
1190
            dt = &env->ldt;
1191
        else
1192
            dt = &env->gdt;
1193
        index = selector & ~7;
1194
        if ((index + 7) > dt->limit)
1195
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1196
        ptr = dt->base + index;
1197
        e1 = ldl_kernel(ptr);
1198
        e2 = ldl_kernel(ptr + 4);
1199

    
1200
        if (!(e2 & DESC_S_MASK))
1201
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1202
        rpl = selector & 3;
1203
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1204
        cpl = env->hflags & HF_CPL_MASK;
1205
        if (seg_reg == R_SS) {
1206
            /* must be writable segment */
1207
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1208
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1209
            if (rpl != cpl || dpl != cpl)
1210
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1211
        } else {
1212
            /* must be readable segment */
1213
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1214
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1215
            
1216
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1217
                /* if not conforming code, test rights */
1218
                if (dpl < cpl || dpl < rpl)
1219
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1220
            }
1221
        }
1222

    
1223
        if (!(e2 & DESC_P_MASK)) {
1224
            if (seg_reg == R_SS)
1225
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1226
            else
1227
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1228
        }
1229

    
1230
        /* set the access bit if not already set */
1231
        if (!(e2 & DESC_A_MASK)) {
1232
            e2 |= DESC_A_MASK;
1233
            stl_kernel(ptr + 4, e2);
1234
        }
1235

    
1236
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1237
                       get_seg_base(e1, e2),
1238
                       get_seg_limit(e1, e2),
1239
                       e2);
1240
#if 0
1241
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1242
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1243
#endif
1244
    }
1245
}
1246

    
1247
/* protected mode jump */
1248
void helper_ljmp_protected_T0_T1(int next_eip)
1249
{
1250
    int new_cs, new_eip, gate_cs, type;
1251
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1252

    
1253
    new_cs = T0;
1254
    new_eip = T1;
1255
    if ((new_cs & 0xfffc) == 0)
1256
        raise_exception_err(EXCP0D_GPF, 0);
1257
    if (load_segment(&e1, &e2, new_cs) != 0)
1258
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1259
    cpl = env->hflags & HF_CPL_MASK;
1260
    if (e2 & DESC_S_MASK) {
1261
        if (!(e2 & DESC_CS_MASK))
1262
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1263
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1264
        if (e2 & DESC_C_MASK) {
1265
            /* conforming code segment */
1266
            if (dpl > cpl)
1267
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1268
        } else {
1269
            /* non conforming code segment */
1270
            rpl = new_cs & 3;
1271
            if (rpl > cpl)
1272
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1273
            if (dpl != cpl)
1274
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1275
        }
1276
        if (!(e2 & DESC_P_MASK))
1277
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1278
        limit = get_seg_limit(e1, e2);
1279
        if (new_eip > limit)
1280
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1281
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1282
                       get_seg_base(e1, e2), limit, e2);
1283
        EIP = new_eip;
1284
    } else {
1285
        /* jump to call or task gate */
1286
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1287
        rpl = new_cs & 3;
1288
        cpl = env->hflags & HF_CPL_MASK;
1289
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1290
        switch(type) {
1291
        case 1: /* 286 TSS */
1292
        case 9: /* 386 TSS */
1293
        case 5: /* task gate */
1294
            if (dpl < cpl || dpl < rpl)
1295
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1296
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1297
            break;
1298
        case 4: /* 286 call gate */
1299
        case 12: /* 386 call gate */
1300
            if ((dpl < cpl) || (dpl < rpl))
1301
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1302
            if (!(e2 & DESC_P_MASK))
1303
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1304
            gate_cs = e1 >> 16;
1305
            new_eip = (e1 & 0xffff);
1306
            if (type == 12)
1307
                new_eip |= (e2 & 0xffff0000);
1308
            if (load_segment(&e1, &e2, gate_cs) != 0)
1309
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1310
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1311
            /* must be code segment */
1312
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1313
                 (DESC_S_MASK | DESC_CS_MASK)))
1314
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1315
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1316
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1317
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1318
            if (!(e2 & DESC_P_MASK))
1319
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1320
            limit = get_seg_limit(e1, e2);
1321
            if (new_eip > limit)
1322
                raise_exception_err(EXCP0D_GPF, 0);
1323
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1324
                                   get_seg_base(e1, e2), limit, e2);
1325
            EIP = new_eip;
1326
            break;
1327
        default:
1328
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1329
            break;
1330
        }
1331
    }
1332
}
1333

    
1334
/* real mode call */
1335
void helper_lcall_real_T0_T1(int shift, int next_eip)
1336
{
1337
    int new_cs, new_eip;
1338
    uint32_t esp, esp_mask;
1339
    uint8_t *ssp;
1340

    
1341
    new_cs = T0;
1342
    new_eip = T1;
1343
    esp = ESP;
1344
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1345
    ssp = env->segs[R_SS].base;
1346
    if (shift) {
1347
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1348
        PUSHL(ssp, esp, esp_mask, next_eip);
1349
    } else {
1350
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1351
        PUSHW(ssp, esp, esp_mask, next_eip);
1352
    }
1353

    
1354
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1355
    env->eip = new_eip;
1356
    env->segs[R_CS].selector = new_cs;
1357
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1358
}
1359

    
1360
/* protected mode call */
1361
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1362
{
1363
    int new_cs, new_eip, new_stack, i;
1364
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1365
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1366
    uint32_t val, limit, old_sp_mask;
1367
    uint8_t *ssp, *old_ssp;
1368
    
1369
    new_cs = T0;
1370
    new_eip = T1;
1371
#ifdef DEBUG_PCALL
1372
    if (loglevel & CPU_LOG_PCALL) {
1373
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
1374
                new_cs, new_eip, shift);
1375
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1376
    }
1377
#endif
1378
    if ((new_cs & 0xfffc) == 0)
1379
        raise_exception_err(EXCP0D_GPF, 0);
1380
    if (load_segment(&e1, &e2, new_cs) != 0)
1381
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1382
    cpl = env->hflags & HF_CPL_MASK;
1383
#ifdef DEBUG_PCALL
1384
    if (loglevel & CPU_LOG_PCALL) {
1385
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1386
    }
1387
#endif
1388
    if (e2 & DESC_S_MASK) {
1389
        if (!(e2 & DESC_CS_MASK))
1390
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1391
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1392
        if (e2 & DESC_C_MASK) {
1393
            /* conforming code segment */
1394
            if (dpl > cpl)
1395
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1396
        } else {
1397
            /* non conforming code segment */
1398
            rpl = new_cs & 3;
1399
            if (rpl > cpl)
1400
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1401
            if (dpl != cpl)
1402
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1403
        }
1404
        if (!(e2 & DESC_P_MASK))
1405
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1406

    
1407
        sp = ESP;
1408
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1409
        ssp = env->segs[R_SS].base;
1410
        if (shift) {
1411
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1412
            PUSHL(ssp, sp, sp_mask, next_eip);
1413
        } else {
1414
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1415
            PUSHW(ssp, sp, sp_mask, next_eip);
1416
        }
1417
        
1418
        limit = get_seg_limit(e1, e2);
1419
        if (new_eip > limit)
1420
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1421
        /* from this point, not restartable */
1422
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1423
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1424
                       get_seg_base(e1, e2), limit, e2);
1425
        EIP = new_eip;
1426
    } else {
1427
        /* check gate type */
1428
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1429
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1430
        rpl = new_cs & 3;
1431
        switch(type) {
1432
        case 1: /* available 286 TSS */
1433
        case 9: /* available 386 TSS */
1434
        case 5: /* task gate */
1435
            if (dpl < cpl || dpl < rpl)
1436
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1437
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1438
            return;
1439
        case 4: /* 286 call gate */
1440
        case 12: /* 386 call gate */
1441
            break;
1442
        default:
1443
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1444
            break;
1445
        }
1446
        shift = type >> 3;
1447

    
1448
        if (dpl < cpl || dpl < rpl)
1449
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1450
        /* check valid bit */
1451
        if (!(e2 & DESC_P_MASK))
1452
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1453
        selector = e1 >> 16;
1454
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1455
        param_count = e2 & 0x1f;
1456
        if ((selector & 0xfffc) == 0)
1457
            raise_exception_err(EXCP0D_GPF, 0);
1458

    
1459
        if (load_segment(&e1, &e2, selector) != 0)
1460
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1461
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1462
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1463
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1464
        if (dpl > cpl)
1465
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1466
        if (!(e2 & DESC_P_MASK))
1467
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1468

    
1469
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1470
            /* to inner priviledge */
1471
            get_ss_esp_from_tss(&ss, &sp, dpl);
1472
#ifdef DEBUG_PCALL
1473
            if (loglevel & CPU_LOG_PCALL)
1474
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", 
1475
                        ss, sp, param_count, ESP);
1476
#endif
1477
            if ((ss & 0xfffc) == 0)
1478
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1479
            if ((ss & 3) != dpl)
1480
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1481
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1482
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1483
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1484
            if (ss_dpl != dpl)
1485
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1486
            if (!(ss_e2 & DESC_S_MASK) ||
1487
                (ss_e2 & DESC_CS_MASK) ||
1488
                !(ss_e2 & DESC_W_MASK))
1489
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1490
            if (!(ss_e2 & DESC_P_MASK))
1491
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1492
            
1493
            //            push_size = ((param_count * 2) + 8) << shift;
1494

    
1495
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1496
            old_ssp = env->segs[R_SS].base;
1497
            
1498
            sp_mask = get_sp_mask(ss_e2);
1499
            ssp = get_seg_base(ss_e1, ss_e2);
1500
            if (shift) {
1501
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1502
                PUSHL(ssp, sp, sp_mask, ESP);
1503
                for(i = param_count - 1; i >= 0; i--) {
1504
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1505
                    PUSHL(ssp, sp, sp_mask, val);
1506
                }
1507
            } else {
1508
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1509
                PUSHW(ssp, sp, sp_mask, ESP);
1510
                for(i = param_count - 1; i >= 0; i--) {
1511
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1512
                    PUSHW(ssp, sp, sp_mask, val);
1513
                }
1514
            }
1515
            new_stack = 1;
1516
        } else {
1517
            /* to same priviledge */
1518
            sp = ESP;
1519
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1520
            ssp = env->segs[R_SS].base;
1521
            //            push_size = (4 << shift);
1522
            new_stack = 0;
1523
        }
1524

    
1525
        if (shift) {
1526
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1527
            PUSHL(ssp, sp, sp_mask, next_eip);
1528
        } else {
1529
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1530
            PUSHW(ssp, sp, sp_mask, next_eip);
1531
        }
1532

    
1533
        /* from this point, not restartable */
1534

    
1535
        if (new_stack) {
1536
            ss = (ss & ~3) | dpl;
1537
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1538
                                   ssp,
1539
                                   get_seg_limit(ss_e1, ss_e2),
1540
                                   ss_e2);
1541
        }
1542

    
1543
        selector = (selector & ~3) | dpl;
1544
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1545
                       get_seg_base(e1, e2),
1546
                       get_seg_limit(e1, e2),
1547
                       e2);
1548
        cpu_x86_set_cpl(env, dpl);
1549
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1550
        EIP = offset;
1551
    }
1552
}
1553

    
1554
/* real and vm86 mode iret */
1555
void helper_iret_real(int shift)
1556
{
1557
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1558
    uint8_t *ssp;
1559
    int eflags_mask;
1560

    
1561
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1562
    sp = ESP;
1563
    ssp = env->segs[R_SS].base;
1564
    if (shift == 1) {
1565
        /* 32 bits */
1566
        POPL(ssp, sp, sp_mask, new_eip);
1567
        POPL(ssp, sp, sp_mask, new_cs);
1568
        new_cs &= 0xffff;
1569
        POPL(ssp, sp, sp_mask, new_eflags);
1570
    } else {
1571
        /* 16 bits */
1572
        POPW(ssp, sp, sp_mask, new_eip);
1573
        POPW(ssp, sp, sp_mask, new_cs);
1574
        POPW(ssp, sp, sp_mask, new_eflags);
1575
    }
1576
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1577
    load_seg_vm(R_CS, new_cs);
1578
    env->eip = new_eip;
1579
    if (env->eflags & VM_MASK)
1580
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1581
    else
1582
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1583
    if (shift == 0)
1584
        eflags_mask &= 0xffff;
1585
    load_eflags(new_eflags, eflags_mask);
1586
}
1587

    
1588
static inline void validate_seg(int seg_reg, int cpl)
1589
{
1590
    int dpl;
1591
    uint32_t e2;
1592
    
1593
    e2 = env->segs[seg_reg].flags;
1594
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1595
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1596
        /* data or non conforming code segment */
1597
        if (dpl < cpl) {
1598
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1599
        }
1600
    }
1601
}
1602

    
1603
/* protected mode iret */
1604
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1605
{
1606
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1607
    uint32_t new_es, new_ds, new_fs, new_gs;
1608
    uint32_t e1, e2, ss_e1, ss_e2;
1609
    int cpl, dpl, rpl, eflags_mask, iopl;
1610
    uint8_t *ssp;
1611
    
1612
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1613
    sp = ESP;
1614
    ssp = env->segs[R_SS].base;
1615
    new_eflags = 0; /* avoid warning */
1616
    if (shift == 1) {
1617
        /* 32 bits */
1618
        POPL(ssp, sp, sp_mask, new_eip);
1619
        POPL(ssp, sp, sp_mask, new_cs);
1620
        new_cs &= 0xffff;
1621
        if (is_iret) {
1622
            POPL(ssp, sp, sp_mask, new_eflags);
1623
            if (new_eflags & VM_MASK)
1624
                goto return_to_vm86;
1625
        }
1626
    } else {
1627
        /* 16 bits */
1628
        POPW(ssp, sp, sp_mask, new_eip);
1629
        POPW(ssp, sp, sp_mask, new_cs);
1630
        if (is_iret)
1631
            POPW(ssp, sp, sp_mask, new_eflags);
1632
    }
1633
#ifdef DEBUG_PCALL
1634
    if (loglevel & CPU_LOG_PCALL) {
1635
        fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1636
                new_cs, new_eip, shift, addend);
1637
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1638
    }
1639
#endif
1640
    if ((new_cs & 0xfffc) == 0)
1641
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1642
    if (load_segment(&e1, &e2, new_cs) != 0)
1643
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1644
    if (!(e2 & DESC_S_MASK) ||
1645
        !(e2 & DESC_CS_MASK))
1646
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1647
    cpl = env->hflags & HF_CPL_MASK;
1648
    rpl = new_cs & 3; 
1649
    if (rpl < cpl)
1650
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1651
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1652
    if (e2 & DESC_C_MASK) {
1653
        if (dpl > rpl)
1654
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1655
    } else {
1656
        if (dpl != rpl)
1657
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1658
    }
1659
    if (!(e2 & DESC_P_MASK))
1660
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1661
    
1662
    sp += addend;
1663
    if (rpl == cpl) {
1664
        /* return to same priledge level */
1665
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1666
                       get_seg_base(e1, e2),
1667
                       get_seg_limit(e1, e2),
1668
                       e2);
1669
    } else {
1670
        /* return to different priviledge level */
1671
        if (shift == 1) {
1672
            /* 32 bits */
1673
            POPL(ssp, sp, sp_mask, new_esp);
1674
            POPL(ssp, sp, sp_mask, new_ss);
1675
            new_ss &= 0xffff;
1676
        } else {
1677
            /* 16 bits */
1678
            POPW(ssp, sp, sp_mask, new_esp);
1679
            POPW(ssp, sp, sp_mask, new_ss);
1680
        }
1681
#ifdef DEBUG_PCALL
1682
        if (loglevel & CPU_LOG_PCALL) {
1683
            fprintf(logfile, "new ss:esp=%04x:%08x\n",
1684
                    new_ss, new_esp);
1685
        }
1686
#endif
1687
        
1688
        if ((new_ss & 3) != rpl)
1689
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1690
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1691
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1692
        if (!(ss_e2 & DESC_S_MASK) ||
1693
            (ss_e2 & DESC_CS_MASK) ||
1694
            !(ss_e2 & DESC_W_MASK))
1695
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1696
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1697
        if (dpl != rpl)
1698
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1699
        if (!(ss_e2 & DESC_P_MASK))
1700
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1701

    
1702
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1703
                       get_seg_base(e1, e2),
1704
                       get_seg_limit(e1, e2),
1705
                       e2);
1706
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1707
                       get_seg_base(ss_e1, ss_e2),
1708
                       get_seg_limit(ss_e1, ss_e2),
1709
                       ss_e2);
1710
        cpu_x86_set_cpl(env, rpl);
1711
        sp = new_esp;
1712
        sp_mask = get_sp_mask(ss_e2);
1713

    
1714
        /* validate data segments */
1715
        validate_seg(R_ES, cpl);
1716
        validate_seg(R_DS, cpl);
1717
        validate_seg(R_FS, cpl);
1718
        validate_seg(R_GS, cpl);
1719

    
1720
        sp += addend;
1721
    }
1722
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1723
    env->eip = new_eip;
1724
    if (is_iret) {
1725
        /* NOTE: 'cpl' is the _old_ CPL */
1726
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1727
        if (cpl == 0)
1728
            eflags_mask |= IOPL_MASK;
1729
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1730
        if (cpl <= iopl)
1731
            eflags_mask |= IF_MASK;
1732
        if (shift == 0)
1733
            eflags_mask &= 0xffff;
1734
        load_eflags(new_eflags, eflags_mask);
1735
    }
1736
    return;
1737

    
1738
 return_to_vm86:
1739
    POPL(ssp, sp, sp_mask, new_esp);
1740
    POPL(ssp, sp, sp_mask, new_ss);
1741
    POPL(ssp, sp, sp_mask, new_es);
1742
    POPL(ssp, sp, sp_mask, new_ds);
1743
    POPL(ssp, sp, sp_mask, new_fs);
1744
    POPL(ssp, sp, sp_mask, new_gs);
1745
    
1746
    /* modify processor state */
1747
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1748
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1749
    load_seg_vm(R_CS, new_cs & 0xffff);
1750
    cpu_x86_set_cpl(env, 3);
1751
    load_seg_vm(R_SS, new_ss & 0xffff);
1752
    load_seg_vm(R_ES, new_es & 0xffff);
1753
    load_seg_vm(R_DS, new_ds & 0xffff);
1754
    load_seg_vm(R_FS, new_fs & 0xffff);
1755
    load_seg_vm(R_GS, new_gs & 0xffff);
1756

    
1757
    env->eip = new_eip & 0xffff;
1758
    ESP = new_esp;
1759
}
1760

    
1761
void helper_iret_protected(int shift, int next_eip)
1762
{
1763
    int tss_selector, type;
1764
    uint32_t e1, e2;
1765
    
1766
    /* specific case for TSS */
1767
    if (env->eflags & NT_MASK) {
1768
        tss_selector = lduw_kernel(env->tr.base + 0);
1769
        if (tss_selector & 4)
1770
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1771
        if (load_segment(&e1, &e2, tss_selector) != 0)
1772
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1773
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1774
        /* NOTE: we check both segment and busy TSS */
1775
        if (type != 3)
1776
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1777
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
1778
    } else {
1779
        helper_ret_protected(shift, 1, 0);
1780
    }
1781
}
1782

    
1783
void helper_lret_protected(int shift, int addend)
1784
{
1785
    helper_ret_protected(shift, 0, addend);
1786
}
1787

    
1788
void helper_sysenter(void)
1789
{
1790
    if (env->sysenter_cs == 0) {
1791
        raise_exception_err(EXCP0D_GPF, 0);
1792
    }
1793
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
1794
    cpu_x86_set_cpl(env, 0);
1795
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
1796
                           NULL, 0xffffffff, 
1797
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1798
                           DESC_S_MASK |
1799
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1800
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
1801
                           NULL, 0xffffffff,
1802
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1803
                           DESC_S_MASK |
1804
                           DESC_W_MASK | DESC_A_MASK);
1805
    ESP = env->sysenter_esp;
1806
    EIP = env->sysenter_eip;
1807
}
1808

    
1809
void helper_sysexit(void)
1810
{
1811
    int cpl;
1812

    
1813
    cpl = env->hflags & HF_CPL_MASK;
1814
    if (env->sysenter_cs == 0 || cpl != 0) {
1815
        raise_exception_err(EXCP0D_GPF, 0);
1816
    }
1817
    cpu_x86_set_cpl(env, 3);
1818
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
1819
                           NULL, 0xffffffff, 
1820
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1821
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1822
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1823
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
1824
                           NULL, 0xffffffff,
1825
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1826
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1827
                           DESC_W_MASK | DESC_A_MASK);
1828
    ESP = ECX;
1829
    EIP = EDX;
1830
}
1831

    
1832
void helper_movl_crN_T0(int reg)
1833
{
1834
    switch(reg) {
1835
    case 0:
1836
        cpu_x86_update_cr0(env, T0);
1837
        break;
1838
    case 3:
1839
        cpu_x86_update_cr3(env, T0);
1840
        break;
1841
    case 4:
1842
        cpu_x86_update_cr4(env, T0);
1843
        break;
1844
    default:
1845
        env->cr[reg] = T0;
1846
        break;
1847
    }
1848
}
1849

    
1850
/* XXX: do more */
1851
void helper_movl_drN_T0(int reg)
1852
{
1853
    env->dr[reg] = T0;
1854
}
1855

    
1856
void helper_invlpg(unsigned int addr)
1857
{
1858
    cpu_x86_flush_tlb(env, addr);
1859
}
1860

    
1861
void helper_rdtsc(void)
1862
{
1863
    uint64_t val;
1864
    
1865
    val = cpu_get_tsc(env);
1866
    EAX = val;
1867
    EDX = val >> 32;
1868
}
1869

    
1870
void helper_wrmsr(void)
1871
{
1872
    switch(ECX) {
1873
    case MSR_IA32_SYSENTER_CS:
1874
        env->sysenter_cs = EAX & 0xffff;
1875
        break;
1876
    case MSR_IA32_SYSENTER_ESP:
1877
        env->sysenter_esp = EAX;
1878
        break;
1879
    case MSR_IA32_SYSENTER_EIP:
1880
        env->sysenter_eip = EAX;
1881
        break;
1882
    default:
1883
        /* XXX: exception ? */
1884
        break; 
1885
    }
1886
}
1887

    
1888
void helper_rdmsr(void)
1889
{
1890
    switch(ECX) {
1891
    case MSR_IA32_SYSENTER_CS:
1892
        EAX = env->sysenter_cs;
1893
        EDX = 0;
1894
        break;
1895
    case MSR_IA32_SYSENTER_ESP:
1896
        EAX = env->sysenter_esp;
1897
        EDX = 0;
1898
        break;
1899
    case MSR_IA32_SYSENTER_EIP:
1900
        EAX = env->sysenter_eip;
1901
        EDX = 0;
1902
        break;
1903
    default:
1904
        /* XXX: exception ? */
1905
        break; 
1906
    }
1907
}
1908

    
1909
void helper_lsl(void)
1910
{
1911
    unsigned int selector, limit;
1912
    uint32_t e1, e2;
1913
    int rpl, dpl, cpl, type;
1914

    
1915
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1916
    selector = T0 & 0xffff;
1917
    if (load_segment(&e1, &e2, selector) != 0)
1918
        return;
1919
    rpl = selector & 3;
1920
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1921
    cpl = env->hflags & HF_CPL_MASK;
1922
    if (e2 & DESC_S_MASK) {
1923
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1924
            /* conforming */
1925
        } else {
1926
            if (dpl < cpl || dpl < rpl)
1927
                return;
1928
        }
1929
    } else {
1930
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1931
        switch(type) {
1932
        case 1:
1933
        case 2:
1934
        case 3:
1935
        case 9:
1936
        case 11:
1937
            break;
1938
        default:
1939
            return;
1940
        }
1941
        if (dpl < cpl || dpl < rpl)
1942
            return;
1943
    }
1944
    limit = get_seg_limit(e1, e2);
1945
    T1 = limit;
1946
    CC_SRC |= CC_Z;
1947
}
1948

    
1949
void helper_lar(void)
1950
{
1951
    unsigned int selector;
1952
    uint32_t e1, e2;
1953
    int rpl, dpl, cpl, type;
1954

    
1955
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1956
    selector = T0 & 0xffff;
1957
    if ((selector & 0xfffc) == 0)
1958
        return;
1959
    if (load_segment(&e1, &e2, selector) != 0)
1960
        return;
1961
    rpl = selector & 3;
1962
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1963
    cpl = env->hflags & HF_CPL_MASK;
1964
    if (e2 & DESC_S_MASK) {
1965
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1966
            /* conforming */
1967
        } else {
1968
            if (dpl < cpl || dpl < rpl)
1969
                return;
1970
        }
1971
    } else {
1972
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1973
        switch(type) {
1974
        case 1:
1975
        case 2:
1976
        case 3:
1977
        case 4:
1978
        case 5:
1979
        case 9:
1980
        case 11:
1981
        case 12:
1982
            break;
1983
        default:
1984
            return;
1985
        }
1986
        if (dpl < cpl || dpl < rpl)
1987
            return;
1988
    }
1989
    T1 = e2 & 0x00f0ff00;
1990
    CC_SRC |= CC_Z;
1991
}
1992

    
1993
void helper_verr(void)
1994
{
1995
    unsigned int selector;
1996
    uint32_t e1, e2;
1997
    int rpl, dpl, cpl;
1998

    
1999
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
2000
    selector = T0 & 0xffff;
2001
    if ((selector & 0xfffc) == 0)
2002
        return;
2003
    if (load_segment(&e1, &e2, selector) != 0)
2004
        return;
2005
    if (!(e2 & DESC_S_MASK))
2006
        return;
2007
    rpl = selector & 3;
2008
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2009
    cpl = env->hflags & HF_CPL_MASK;
2010
    if (e2 & DESC_CS_MASK) {
2011
        if (!(e2 & DESC_R_MASK))
2012
            return;
2013
        if (!(e2 & DESC_C_MASK)) {
2014
            if (dpl < cpl || dpl < rpl)
2015
                return;
2016
        }
2017
    } else {
2018
        if (dpl < cpl || dpl < rpl)
2019
            return;
2020
    }
2021
    CC_SRC |= CC_Z;
2022
}
2023

    
2024
void helper_verw(void)
2025
{
2026
    unsigned int selector;
2027
    uint32_t e1, e2;
2028
    int rpl, dpl, cpl;
2029

    
2030
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
2031
    selector = T0 & 0xffff;
2032
    if ((selector & 0xfffc) == 0)
2033
        return;
2034
    if (load_segment(&e1, &e2, selector) != 0)
2035
        return;
2036
    if (!(e2 & DESC_S_MASK))
2037
        return;
2038
    rpl = selector & 3;
2039
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2040
    cpl = env->hflags & HF_CPL_MASK;
2041
    if (e2 & DESC_CS_MASK) {
2042
        return;
2043
    } else {
2044
        if (dpl < cpl || dpl < rpl)
2045
            return;
2046
        if (!(e2 & DESC_W_MASK))
2047
            return;
2048
    }
2049
    CC_SRC |= CC_Z;
2050
}
2051

    
2052
/* FPU helpers */
2053

    
2054
void helper_fldt_ST0_A0(void)
2055
{
2056
    int new_fpstt;
2057
    new_fpstt = (env->fpstt - 1) & 7;
2058
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
2059
    env->fpstt = new_fpstt;
2060
    env->fptags[new_fpstt] = 0; /* validate stack entry */
2061
}
2062

    
2063
void helper_fstt_ST0_A0(void)
2064
{
2065
    helper_fstt(ST0, (uint8_t *)A0);
2066
}
2067

    
2068
void fpu_set_exception(int mask)
2069
{
2070
    env->fpus |= mask;
2071
    if (env->fpus & (~env->fpuc & FPUC_EM))
2072
        env->fpus |= FPUS_SE | FPUS_B;
2073
}
2074

    
2075
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2076
{
2077
    if (b == 0.0) 
2078
        fpu_set_exception(FPUS_ZE);
2079
    return a / b;
2080
}
2081

    
2082
void fpu_raise_exception(void)
2083
{
2084
    if (env->cr[0] & CR0_NE_MASK) {
2085
        raise_exception(EXCP10_COPR);
2086
    } 
2087
#if !defined(CONFIG_USER_ONLY) 
2088
    else {
2089
        cpu_set_ferr(env);
2090
    }
2091
#endif
2092
}
2093

    
2094
/* BCD ops */
2095

    
2096
void helper_fbld_ST0_A0(void)
2097
{
2098
    CPU86_LDouble tmp;
2099
    uint64_t val;
2100
    unsigned int v;
2101
    int i;
2102

    
2103
    val = 0;
2104
    for(i = 8; i >= 0; i--) {
2105
        v = ldub((uint8_t *)A0 + i);
2106
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2107
    }
2108
    tmp = val;
2109
    if (ldub((uint8_t *)A0 + 9) & 0x80)
2110
        tmp = -tmp;
2111
    fpush();
2112
    ST0 = tmp;
2113
}
2114

    
2115
void helper_fbst_ST0_A0(void)
2116
{
2117
    CPU86_LDouble tmp;
2118
    int v;
2119
    uint8_t *mem_ref, *mem_end;
2120
    int64_t val;
2121

    
2122
    tmp = rint(ST0);
2123
    val = (int64_t)tmp;
2124
    mem_ref = (uint8_t *)A0;
2125
    mem_end = mem_ref + 9;
2126
    if (val < 0) {
2127
        stb(mem_end, 0x80);
2128
        val = -val;
2129
    } else {
2130
        stb(mem_end, 0x00);
2131
    }
2132
    while (mem_ref < mem_end) {
2133
        if (val == 0)
2134
            break;
2135
        v = val % 100;
2136
        val = val / 100;
2137
        v = ((v / 10) << 4) | (v % 10);
2138
        stb(mem_ref++, v);
2139
    }
2140
    while (mem_ref < mem_end) {
2141
        stb(mem_ref++, 0);
2142
    }
2143
}
2144

    
2145
void helper_f2xm1(void)
2146
{
2147
    ST0 = pow(2.0,ST0) - 1.0;
2148
}
2149

    
2150
void helper_fyl2x(void)
2151
{
2152
    CPU86_LDouble fptemp;
2153
    
2154
    fptemp = ST0;
2155
    if (fptemp>0.0){
2156
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2157
        ST1 *= fptemp;
2158
        fpop();
2159
    } else { 
2160
        env->fpus &= (~0x4700);
2161
        env->fpus |= 0x400;
2162
    }
2163
}
2164

    
2165
void helper_fptan(void)
2166
{
2167
    CPU86_LDouble fptemp;
2168

    
2169
    fptemp = ST0;
2170
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2171
        env->fpus |= 0x400;
2172
    } else {
2173
        ST0 = tan(fptemp);
2174
        fpush();
2175
        ST0 = 1.0;
2176
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2177
        /* the above code is for  |arg| < 2**52 only */
2178
    }
2179
}
2180

    
2181
void helper_fpatan(void)
2182
{
2183
    CPU86_LDouble fptemp, fpsrcop;
2184

    
2185
    fpsrcop = ST1;
2186
    fptemp = ST0;
2187
    ST1 = atan2(fpsrcop,fptemp);
2188
    fpop();
2189
}
2190

    
2191
void helper_fxtract(void)
2192
{
2193
    CPU86_LDoubleU temp;
2194
    unsigned int expdif;
2195

    
2196
    temp.d = ST0;
2197
    expdif = EXPD(temp) - EXPBIAS;
2198
    /*DP exponent bias*/
2199
    ST0 = expdif;
2200
    fpush();
2201
    BIASEXPONENT(temp);
2202
    ST0 = temp.d;
2203
}
2204

    
2205
void helper_fprem1(void)
2206
{
2207
    CPU86_LDouble dblq, fpsrcop, fptemp;
2208
    CPU86_LDoubleU fpsrcop1, fptemp1;
2209
    int expdif;
2210
    int q;
2211

    
2212
    fpsrcop = ST0;
2213
    fptemp = ST1;
2214
    fpsrcop1.d = fpsrcop;
2215
    fptemp1.d = fptemp;
2216
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2217
    if (expdif < 53) {
2218
        dblq = fpsrcop / fptemp;
2219
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2220
        ST0 = fpsrcop - fptemp*dblq;
2221
        q = (int)dblq; /* cutting off top bits is assumed here */
2222
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2223
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2224
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2225
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2226
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2227
    } else {
2228
        env->fpus |= 0x400;  /* C2 <-- 1 */
2229
        fptemp = pow(2.0, expdif-50);
2230
        fpsrcop = (ST0 / ST1) / fptemp;
2231
        /* fpsrcop = integer obtained by rounding to the nearest */
2232
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2233
            floor(fpsrcop): ceil(fpsrcop);
2234
        ST0 -= (ST1 * fpsrcop * fptemp);
2235
    }
2236
}
2237

    
2238
void helper_fprem(void)
2239
{
2240
    CPU86_LDouble dblq, fpsrcop, fptemp;
2241
    CPU86_LDoubleU fpsrcop1, fptemp1;
2242
    int expdif;
2243
    int q;
2244
    
2245
    fpsrcop = ST0;
2246
    fptemp = ST1;
2247
    fpsrcop1.d = fpsrcop;
2248
    fptemp1.d = fptemp;
2249
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2250
    if ( expdif < 53 ) {
2251
        dblq = fpsrcop / fptemp;
2252
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2253
        ST0 = fpsrcop - fptemp*dblq;
2254
        q = (int)dblq; /* cutting off top bits is assumed here */
2255
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2256
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2257
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2258
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2259
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2260
    } else {
2261
        env->fpus |= 0x400;  /* C2 <-- 1 */
2262
        fptemp = pow(2.0, expdif-50);
2263
        fpsrcop = (ST0 / ST1) / fptemp;
2264
        /* fpsrcop = integer obtained by chopping */
2265
        fpsrcop = (fpsrcop < 0.0)?
2266
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2267
        ST0 -= (ST1 * fpsrcop * fptemp);
2268
    }
2269
}
2270

    
2271
void helper_fyl2xp1(void)
2272
{
2273
    CPU86_LDouble fptemp;
2274

    
2275
    fptemp = ST0;
2276
    if ((fptemp+1.0)>0.0) {
2277
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2278
        ST1 *= fptemp;
2279
        fpop();
2280
    } else { 
2281
        env->fpus &= (~0x4700);
2282
        env->fpus |= 0x400;
2283
    }
2284
}
2285

    
2286
void helper_fsqrt(void)
2287
{
2288
    CPU86_LDouble fptemp;
2289

    
2290
    fptemp = ST0;
2291
    if (fptemp<0.0) { 
2292
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2293
        env->fpus |= 0x400;
2294
    }
2295
    ST0 = sqrt(fptemp);
2296
}
2297

    
2298
void helper_fsincos(void)
2299
{
2300
    CPU86_LDouble fptemp;
2301

    
2302
    fptemp = ST0;
2303
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2304
        env->fpus |= 0x400;
2305
    } else {
2306
        ST0 = sin(fptemp);
2307
        fpush();
2308
        ST0 = cos(fptemp);
2309
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2310
        /* the above code is for  |arg| < 2**63 only */
2311
    }
2312
}
2313

    
2314
void helper_frndint(void)
2315
{
2316
    CPU86_LDouble a;
2317

    
2318
    a = ST0;
2319
#ifdef __arm__
2320
    switch(env->fpuc & RC_MASK) {
2321
    default:
2322
    case RC_NEAR:
2323
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2324
        break;
2325
    case RC_DOWN:
2326
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2327
        break;
2328
    case RC_UP:
2329
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2330
        break;
2331
    case RC_CHOP:
2332
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2333
        break;
2334
    }
2335
#else
2336
    a = rint(a);
2337
#endif
2338
    ST0 = a;
2339
}
2340

    
2341
void helper_fscale(void)
2342
{
2343
    CPU86_LDouble fpsrcop, fptemp;
2344

    
2345
    fpsrcop = 2.0;
2346
    fptemp = pow(fpsrcop,ST1);
2347
    ST0 *= fptemp;
2348
}
2349

    
2350
void helper_fsin(void)
2351
{
2352
    CPU86_LDouble fptemp;
2353

    
2354
    fptemp = ST0;
2355
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2356
        env->fpus |= 0x400;
2357
    } else {
2358
        ST0 = sin(fptemp);
2359
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2360
        /* the above code is for  |arg| < 2**53 only */
2361
    }
2362
}
2363

    
2364
void helper_fcos(void)
2365
{
2366
    CPU86_LDouble fptemp;
2367

    
2368
    fptemp = ST0;
2369
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2370
        env->fpus |= 0x400;
2371
    } else {
2372
        ST0 = cos(fptemp);
2373
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2374
        /* the above code is for  |arg5 < 2**63 only */
2375
    }
2376
}
2377

    
2378
void helper_fxam_ST0(void)
2379
{
2380
    CPU86_LDoubleU temp;
2381
    int expdif;
2382

    
2383
    temp.d = ST0;
2384

    
2385
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2386
    if (SIGND(temp))
2387
        env->fpus |= 0x200; /* C1 <-- 1 */
2388

    
2389
    expdif = EXPD(temp);
2390
    if (expdif == MAXEXPD) {
2391
        if (MANTD(temp) == 0)
2392
            env->fpus |=  0x500 /*Infinity*/;
2393
        else
2394
            env->fpus |=  0x100 /*NaN*/;
2395
    } else if (expdif == 0) {
2396
        if (MANTD(temp) == 0)
2397
            env->fpus |=  0x4000 /*Zero*/;
2398
        else
2399
            env->fpus |= 0x4400 /*Denormal*/;
2400
    } else {
2401
        env->fpus |= 0x400;
2402
    }
2403
}
2404

    
2405
void helper_fstenv(uint8_t *ptr, int data32)
2406
{
2407
    int fpus, fptag, exp, i;
2408
    uint64_t mant;
2409
    CPU86_LDoubleU tmp;
2410

    
2411
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2412
    fptag = 0;
2413
    for (i=7; i>=0; i--) {
2414
        fptag <<= 2;
2415
        if (env->fptags[i]) {
2416
            fptag |= 3;
2417
        } else {
2418
            tmp.d = env->fpregs[i];
2419
            exp = EXPD(tmp);
2420
            mant = MANTD(tmp);
2421
            if (exp == 0 && mant == 0) {
2422
                /* zero */
2423
                fptag |= 1;
2424
            } else if (exp == 0 || exp == MAXEXPD
2425
#ifdef USE_X86LDOUBLE
2426
                       || (mant & (1LL << 63)) == 0
2427
#endif
2428
                       ) {
2429
                /* NaNs, infinity, denormal */
2430
                fptag |= 2;
2431
            }
2432
        }
2433
    }
2434
    if (data32) {
2435
        /* 32 bit */
2436
        stl(ptr, env->fpuc);
2437
        stl(ptr + 4, fpus);
2438
        stl(ptr + 8, fptag);
2439
        stl(ptr + 12, 0); /* fpip */
2440
        stl(ptr + 16, 0); /* fpcs */
2441
        stl(ptr + 20, 0); /* fpoo */
2442
        stl(ptr + 24, 0); /* fpos */
2443
    } else {
2444
        /* 16 bit */
2445
        stw(ptr, env->fpuc);
2446
        stw(ptr + 2, fpus);
2447
        stw(ptr + 4, fptag);
2448
        stw(ptr + 6, 0);
2449
        stw(ptr + 8, 0);
2450
        stw(ptr + 10, 0);
2451
        stw(ptr + 12, 0);
2452
    }
2453
}
2454

    
2455
void helper_fldenv(uint8_t *ptr, int data32)
2456
{
2457
    int i, fpus, fptag;
2458

    
2459
    if (data32) {
2460
        env->fpuc = lduw(ptr);
2461
        fpus = lduw(ptr + 4);
2462
        fptag = lduw(ptr + 8);
2463
    }
2464
    else {
2465
        env->fpuc = lduw(ptr);
2466
        fpus = lduw(ptr + 2);
2467
        fptag = lduw(ptr + 4);
2468
    }
2469
    env->fpstt = (fpus >> 11) & 7;
2470
    env->fpus = fpus & ~0x3800;
2471
    for(i = 0;i < 8; i++) {
2472
        env->fptags[i] = ((fptag & 3) == 3);
2473
        fptag >>= 2;
2474
    }
2475
}
2476

    
2477
void helper_fsave(uint8_t *ptr, int data32)
2478
{
2479
    CPU86_LDouble tmp;
2480
    int i;
2481

    
2482
    helper_fstenv(ptr, data32);
2483

    
2484
    ptr += (14 << data32);
2485
    for(i = 0;i < 8; i++) {
2486
        tmp = ST(i);
2487
        helper_fstt(tmp, ptr);
2488
        ptr += 10;
2489
    }
2490

    
2491
    /* fninit */
2492
    env->fpus = 0;
2493
    env->fpstt = 0;
2494
    env->fpuc = 0x37f;
2495
    env->fptags[0] = 1;
2496
    env->fptags[1] = 1;
2497
    env->fptags[2] = 1;
2498
    env->fptags[3] = 1;
2499
    env->fptags[4] = 1;
2500
    env->fptags[5] = 1;
2501
    env->fptags[6] = 1;
2502
    env->fptags[7] = 1;
2503
}
2504

    
2505
void helper_frstor(uint8_t *ptr, int data32)
2506
{
2507
    CPU86_LDouble tmp;
2508
    int i;
2509

    
2510
    helper_fldenv(ptr, data32);
2511
    ptr += (14 << data32);
2512

    
2513
    for(i = 0;i < 8; i++) {
2514
        tmp = helper_fldt(ptr);
2515
        ST(i) = tmp;
2516
        ptr += 10;
2517
    }
2518
}
2519

    
2520
/* XXX: merge with helper_fstt ? */
2521

    
2522
#ifndef USE_X86LDOUBLE
2523

    
2524
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2525
{
2526
    CPU86_LDoubleU temp;
2527
    int e;
2528

    
2529
    temp.d = f;
2530
    /* mantissa */
2531
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
2532
    /* exponent + sign */
2533
    e = EXPD(temp) - EXPBIAS + 16383;
2534
    e |= SIGND(temp) >> 16;
2535
    *pexp = e;
2536
}
2537

    
2538
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2539
{
2540
    CPU86_LDoubleU temp;
2541
    int e;
2542
    uint64_t ll;
2543

    
2544
    /* XXX: handle overflow ? */
2545
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
2546
    e |= (upper >> 4) & 0x800; /* sign */
2547
    ll = (mant >> 11) & ((1LL << 52) - 1);
2548
#ifdef __arm__
2549
    temp.l.upper = (e << 20) | (ll >> 32);
2550
    temp.l.lower = ll;
2551
#else
2552
    temp.ll = ll | ((uint64_t)e << 52);
2553
#endif
2554
    return temp.d;
2555
}
2556

    
2557
#else
2558

    
2559
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2560
{
2561
    CPU86_LDoubleU temp;
2562

    
2563
    temp.d = f;
2564
    *pmant = temp.l.lower;
2565
    *pexp = temp.l.upper;
2566
}
2567

    
2568
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2569
{
2570
    CPU86_LDoubleU temp;
2571

    
2572
    temp.l.upper = upper;
2573
    temp.l.lower = mant;
2574
    return temp.d;
2575
}
2576
#endif
2577

    
2578
#if !defined(CONFIG_USER_ONLY) 
2579

    
2580
#define MMUSUFFIX _mmu
2581
#define GETPC() (__builtin_return_address(0))
2582

    
2583
#define SHIFT 0
2584
#include "softmmu_template.h"
2585

    
2586
#define SHIFT 1
2587
#include "softmmu_template.h"
2588

    
2589
#define SHIFT 2
2590
#include "softmmu_template.h"
2591

    
2592
#define SHIFT 3
2593
#include "softmmu_template.h"
2594

    
2595
#endif
2596

    
2597
/* try to fill the TLB and return an exception if error. If retaddr is
2598
   NULL, it means that the function was called in C code (i.e. not
2599
   from generated code or from helper.c) */
2600
/* XXX: fix it to restore all registers */
2601
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2602
{
2603
    TranslationBlock *tb;
2604
    int ret;
2605
    unsigned long pc;
2606
    CPUX86State *saved_env;
2607

    
2608
    /* XXX: hack to restore env in all cases, even if not called from
2609
       generated code */
2610
    saved_env = env;
2611
    env = cpu_single_env;
2612

    
2613
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2614
    if (ret) {
2615
        if (retaddr) {
2616
            /* now we have a real cpu fault */
2617
            pc = (unsigned long)retaddr;
2618
            tb = tb_find_pc(pc);
2619
            if (tb) {
2620
                /* the PC is inside the translated code. It means that we have
2621
                   a virtual CPU fault */
2622
                cpu_restore_state(tb, env, pc, NULL);
2623
            }
2624
        }
2625
        if (retaddr)
2626
            raise_exception_err(EXCP0E_PAGE, env->error_code);
2627
        else
2628
            raise_exception_err_norestore(EXCP0E_PAGE, env->error_code);
2629
    }
2630
    env = saved_env;
2631
}