Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 354ff226

History | View | Annotate | Download (72.9 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28
    (raise_exception_err)(a, b);\
29
} while (0)
30
#endif
31

    
32
const uint8_t parity_table[256] = {
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
};
66

    
67
/* modulo 17 table */
68
const uint8_t rclw_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 9,10,11,12,13,14,15,
71
   16, 0, 1, 2, 3, 4, 5, 6,
72
    7, 8, 9,10,11,12,13,14,
73
};
74

    
75
/* modulo 9 table */
76
const uint8_t rclb_table[32] = {
77
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    8, 0, 1, 2, 3, 4, 5, 6,
79
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    6, 7, 8, 0, 1, 2, 3, 4,
81
};
82

    
83
const CPU86_LDouble f15rk[7] =
84
{
85
    0.00000000000000000000L,
86
    1.00000000000000000000L,
87
    3.14159265358979323851L,  /*pi*/
88
    0.30102999566398119523L,  /*lg2*/
89
    0.69314718055994530943L,  /*ln2*/
90
    1.44269504088896340739L,  /*l2e*/
91
    3.32192809488736234781L,  /*l2t*/
92
};
93
    
94
/* thread support */
95

    
96
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97

    
98
void cpu_lock(void)
99
{
100
    spin_lock(&global_cpu_lock);
101
}
102

    
103
void cpu_unlock(void)
104
{
105
    spin_unlock(&global_cpu_lock);
106
}
107

    
108
void cpu_loop_exit(void)
109
{
110
    /* NOTE: the register at this point must be saved by hand because
111
       longjmp restore them */
112
#ifdef reg_EAX
113
    env->regs[R_EAX] = EAX;
114
#endif
115
#ifdef reg_ECX
116
    env->regs[R_ECX] = ECX;
117
#endif
118
#ifdef reg_EDX
119
    env->regs[R_EDX] = EDX;
120
#endif
121
#ifdef reg_EBX
122
    env->regs[R_EBX] = EBX;
123
#endif
124
#ifdef reg_ESP
125
    env->regs[R_ESP] = ESP;
126
#endif
127
#ifdef reg_EBP
128
    env->regs[R_EBP] = EBP;
129
#endif
130
#ifdef reg_ESI
131
    env->regs[R_ESI] = ESI;
132
#endif
133
#ifdef reg_EDI
134
    env->regs[R_EDI] = EDI;
135
#endif
136
    longjmp(env->jmp_env, 1);
137
}
138

    
139
/* return non zero if error */
140
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
141
                               int selector)
142
{
143
    SegmentCache *dt;
144
    int index;
145
    uint8_t *ptr;
146

    
147
    if (selector & 0x4)
148
        dt = &env->ldt;
149
    else
150
        dt = &env->gdt;
151
    index = selector & ~7;
152
    if ((index + 7) > dt->limit)
153
        return -1;
154
    ptr = dt->base + index;
155
    *e1_ptr = ldl_kernel(ptr);
156
    *e2_ptr = ldl_kernel(ptr + 4);
157
    return 0;
158
}
159
                                     
160
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161
{
162
    unsigned int limit;
163
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
164
    if (e2 & DESC_G_MASK)
165
        limit = (limit << 12) | 0xfff;
166
    return limit;
167
}
168

    
169
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
170
{
171
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
172
}
173

    
174
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
175
{
176
    sc->base = get_seg_base(e1, e2);
177
    sc->limit = get_seg_limit(e1, e2);
178
    sc->flags = e2;
179
}
180

    
181
/* init the segment cache in vm86 mode. */
182
static inline void load_seg_vm(int seg, int selector)
183
{
184
    selector &= 0xffff;
185
    cpu_x86_load_seg_cache(env, seg, selector, 
186
                           (uint8_t *)(selector << 4), 0xffff, 0);
187
}
188

    
189
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
190
                                       uint32_t *esp_ptr, int dpl)
191
{
192
    int type, index, shift;
193
    
194
#if 0
195
    {
196
        int i;
197
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
198
        for(i=0;i<env->tr.limit;i++) {
199
            printf("%02x ", env->tr.base[i]);
200
            if ((i & 7) == 7) printf("\n");
201
        }
202
        printf("\n");
203
    }
204
#endif
205

    
206
    if (!(env->tr.flags & DESC_P_MASK))
207
        cpu_abort(env, "invalid tss");
208
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
209
    if ((type & 7) != 1)
210
        cpu_abort(env, "invalid tss type");
211
    shift = type >> 3;
212
    index = (dpl * 4 + 2) << shift;
213
    if (index + (4 << shift) - 1 > env->tr.limit)
214
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
215
    if (shift == 0) {
216
        *esp_ptr = lduw_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
218
    } else {
219
        *esp_ptr = ldl_kernel(env->tr.base + index);
220
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221
    }
222
}
223

    
224
/* XXX: merge with load_seg() */
225
static void tss_load_seg(int seg_reg, int selector)
226
{
227
    uint32_t e1, e2;
228
    int rpl, dpl, cpl;
229

    
230
    if ((selector & 0xfffc) != 0) {
231
        if (load_segment(&e1, &e2, selector) != 0)
232
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
        if (!(e2 & DESC_S_MASK))
234
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
        rpl = selector & 3;
236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
237
        cpl = env->hflags & HF_CPL_MASK;
238
        if (seg_reg == R_CS) {
239
            if (!(e2 & DESC_CS_MASK))
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if (dpl != rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
            if ((e2 & DESC_C_MASK) && dpl > rpl)
244
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245
                
246
        } else if (seg_reg == R_SS) {
247
            /* SS must be writable data */
248
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
249
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250
            if (dpl != cpl || dpl != rpl)
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
        } else {
253
            /* not readable code */
254
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
255
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            /* if data or non conforming code, checks the rights */
257
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
258
                if (dpl < cpl || dpl < rpl)
259
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260
            }
261
        }
262
        if (!(e2 & DESC_P_MASK))
263
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
264
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
265
                       get_seg_base(e1, e2),
266
                       get_seg_limit(e1, e2),
267
                       e2);
268
    } else {
269
        if (seg_reg == R_SS || seg_reg == R_CS) 
270
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271
    }
272
}
273

    
274
#define SWITCH_TSS_JMP  0
275
#define SWITCH_TSS_IRET 1
276
#define SWITCH_TSS_CALL 2
277

    
278
/* XXX: restore CPU state in registers (PowerPC case) */
279
static void switch_tss(int tss_selector, 
280
                       uint32_t e1, uint32_t e2, int source,
281
                       uint32_t next_eip)
282
{
283
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
284
    uint8_t *tss_base;
285
    uint32_t new_regs[8], new_segs[6];
286
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
287
    uint32_t old_eflags, eflags_mask;
288
    SegmentCache *dt;
289
    int index;
290
    uint8_t *ptr;
291

    
292
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
293
#ifdef DEBUG_PCALL
294
    if (loglevel & CPU_LOG_PCALL)
295
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
296
#endif
297

    
298
    /* if task gate, we read the TSS segment and we load it */
299
    if (type == 5) {
300
        if (!(e2 & DESC_P_MASK))
301
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
        tss_selector = e1 >> 16;
303
        if (tss_selector & 4)
304
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
305
        if (load_segment(&e1, &e2, tss_selector) != 0)
306
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307
        if (e2 & DESC_S_MASK)
308
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
309
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
310
        if ((type & 7) != 1)
311
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312
    }
313

    
314
    if (!(e2 & DESC_P_MASK))
315
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
316

    
317
    if (type & 8)
318
        tss_limit_max = 103;
319
    else
320
        tss_limit_max = 43;
321
    tss_limit = get_seg_limit(e1, e2);
322
    tss_base = get_seg_base(e1, e2);
323
    if ((tss_selector & 4) != 0 || 
324
        tss_limit < tss_limit_max)
325
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
326
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
327
    if (old_type & 8)
328
        old_tss_limit_max = 103;
329
    else
330
        old_tss_limit_max = 43;
331

    
332
    /* read all the registers from the new TSS */
333
    if (type & 8) {
334
        /* 32 bit */
335
        new_cr3 = ldl_kernel(tss_base + 0x1c);
336
        new_eip = ldl_kernel(tss_base + 0x20);
337
        new_eflags = ldl_kernel(tss_base + 0x24);
338
        for(i = 0; i < 8; i++)
339
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
340
        for(i = 0; i < 6; i++)
341
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
342
        new_ldt = lduw_kernel(tss_base + 0x60);
343
        new_trap = ldl_kernel(tss_base + 0x64);
344
    } else {
345
        /* 16 bit */
346
        new_cr3 = 0;
347
        new_eip = lduw_kernel(tss_base + 0x0e);
348
        new_eflags = lduw_kernel(tss_base + 0x10);
349
        for(i = 0; i < 8; i++)
350
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
351
        for(i = 0; i < 4; i++)
352
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
353
        new_ldt = lduw_kernel(tss_base + 0x2a);
354
        new_segs[R_FS] = 0;
355
        new_segs[R_GS] = 0;
356
        new_trap = 0;
357
    }
358
    
359
    /* NOTE: we must avoid memory exceptions during the task switch,
360
       so we make dummy accesses before */
361
    /* XXX: it can still fail in some cases, so a bigger hack is
362
       necessary to valid the TLB after having done the accesses */
363

    
364
    v1 = ldub_kernel(env->tr.base);
365
    v2 = ldub(env->tr.base + old_tss_limit_max);
366
    stb_kernel(env->tr.base, v1);
367
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
368
    
369
    /* clear busy bit (it is restartable) */
370
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
371
        uint8_t *ptr;
372
        uint32_t e2;
373
        ptr = env->gdt.base + (env->tr.selector & ~7);
374
        e2 = ldl_kernel(ptr + 4);
375
        e2 &= ~DESC_TSS_BUSY_MASK;
376
        stl_kernel(ptr + 4, e2);
377
    }
378
    old_eflags = compute_eflags();
379
    if (source == SWITCH_TSS_IRET)
380
        old_eflags &= ~NT_MASK;
381
    
382
    /* save the current state in the old TSS */
383
    if (type & 8) {
384
        /* 32 bit */
385
        stl_kernel(env->tr.base + 0x20, next_eip);
386
        stl_kernel(env->tr.base + 0x24, old_eflags);
387
        for(i = 0; i < 8; i++)
388
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        for(i = 0; i < 8; i++)
396
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
397
        for(i = 0; i < 4; i++)
398
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399
    }
400
    
401
    /* now if an exception occurs, it will occurs in the next task
402
       context */
403

    
404
    if (source == SWITCH_TSS_CALL) {
405
        stw_kernel(tss_base, env->tr.selector);
406
        new_eflags |= NT_MASK;
407
    }
408

    
409
    /* set busy bit */
410
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
411
        uint8_t *ptr;
412
        uint32_t e2;
413
        ptr = env->gdt.base + (tss_selector & ~7);
414
        e2 = ldl_kernel(ptr + 4);
415
        e2 |= DESC_TSS_BUSY_MASK;
416
        stl_kernel(ptr + 4, e2);
417
    }
418

    
419
    /* set the new CPU state */
420
    /* from this point, any exception which occurs can give problems */
421
    env->cr[0] |= CR0_TS_MASK;
422
    env->hflags |= HF_TS_MASK;
423
    env->tr.selector = tss_selector;
424
    env->tr.base = tss_base;
425
    env->tr.limit = tss_limit;
426
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
427
    
428
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
429
        cpu_x86_update_cr3(env, new_cr3);
430
    }
431
    
432
    /* load all registers without an exception, then reload them with
433
       possible exception */
434
    env->eip = new_eip;
435
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
436
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
437
    if (!(type & 8))
438
        eflags_mask &= 0xffff;
439
    load_eflags(new_eflags, eflags_mask);
440
    for(i = 0; i < 8; i++)
441
        env->regs[i] = new_regs[i];
442
    if (new_eflags & VM_MASK) {
443
        for(i = 0; i < 6; i++) 
444
            load_seg_vm(i, new_segs[i]);
445
        /* in vm86, CPL is always 3 */
446
        cpu_x86_set_cpl(env, 3);
447
    } else {
448
        /* CPL is set the RPL of CS */
449
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
450
        /* first just selectors as the rest may trigger exceptions */
451
        for(i = 0; i < 6; i++)
452
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
453
    }
454
    
455
    env->ldt.selector = new_ldt & ~4;
456
    env->ldt.base = NULL;
457
    env->ldt.limit = 0;
458
    env->ldt.flags = 0;
459

    
460
    /* load the LDT */
461
    if (new_ldt & 4)
462
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463

    
464
    if ((new_ldt & 0xfffc) != 0) {
465
        dt = &env->gdt;
466
        index = new_ldt & ~7;
467
        if ((index + 7) > dt->limit)
468
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469
        ptr = dt->base + index;
470
        e1 = ldl_kernel(ptr);
471
        e2 = ldl_kernel(ptr + 4);
472
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        if (!(e2 & DESC_P_MASK))
475
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
477
    }
478
    
479
    /* load the segments */
480
    if (!(new_eflags & VM_MASK)) {
481
        tss_load_seg(R_CS, new_segs[R_CS]);
482
        tss_load_seg(R_SS, new_segs[R_SS]);
483
        tss_load_seg(R_ES, new_segs[R_ES]);
484
        tss_load_seg(R_DS, new_segs[R_DS]);
485
        tss_load_seg(R_FS, new_segs[R_FS]);
486
        tss_load_seg(R_GS, new_segs[R_GS]);
487
    }
488
    
489
    /* check that EIP is in the CS segment limits */
490
    if (new_eip > env->segs[R_CS].limit) {
491
        /* XXX: different exception if CALL ? */
492
        raise_exception_err(EXCP0D_GPF, 0);
493
    }
494
}
495

    
496
/* check if Port I/O is allowed in TSS */
497
static inline void check_io(int addr, int size)
498
{
499
    int io_offset, val, mask;
500
    
501
    /* TSS must be a valid 32 bit one */
502
    if (!(env->tr.flags & DESC_P_MASK) ||
503
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
504
        env->tr.limit < 103)
505
        goto fail;
506
    io_offset = lduw_kernel(env->tr.base + 0x66);
507
    io_offset += (addr >> 3);
508
    /* Note: the check needs two bytes */
509
    if ((io_offset + 1) > env->tr.limit)
510
        goto fail;
511
    val = lduw_kernel(env->tr.base + io_offset);
512
    val >>= (addr & 7);
513
    mask = (1 << size) - 1;
514
    /* all bits must be zero to allow the I/O */
515
    if ((val & mask) != 0) {
516
    fail:
517
        raise_exception_err(EXCP0D_GPF, 0);
518
    }
519
}
520

    
521
void check_iob_T0(void)
522
{
523
    check_io(T0, 1);
524
}
525

    
526
void check_iow_T0(void)
527
{
528
    check_io(T0, 2);
529
}
530

    
531
void check_iol_T0(void)
532
{
533
    check_io(T0, 4);
534
}
535

    
536
void check_iob_DX(void)
537
{
538
    check_io(EDX & 0xffff, 1);
539
}
540

    
541
void check_iow_DX(void)
542
{
543
    check_io(EDX & 0xffff, 2);
544
}
545

    
546
void check_iol_DX(void)
547
{
548
    check_io(EDX & 0xffff, 4);
549
}
550

    
551
static inline unsigned int get_sp_mask(unsigned int e2)
552
{
553
    if (e2 & DESC_B_MASK)
554
        return 0xffffffff;
555
    else
556
        return 0xffff;
557
}
558

    
559
/* XXX: add a is_user flag to have proper security support */
560
#define PUSHW(ssp, sp, sp_mask, val)\
561
{\
562
    sp -= 2;\
563
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
564
}
565

    
566
#define PUSHL(ssp, sp, sp_mask, val)\
567
{\
568
    sp -= 4;\
569
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
570
}
571

    
572
#define POPW(ssp, sp, sp_mask, val)\
573
{\
574
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
575
    sp += 2;\
576
}
577

    
578
#define POPL(ssp, sp, sp_mask, val)\
579
{\
580
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
581
    sp += 4;\
582
}
583

    
584
/* protected mode interrupt */
585
static void do_interrupt_protected(int intno, int is_int, int error_code,
586
                                   unsigned int next_eip, int is_hw)
587
{
588
    SegmentCache *dt;
589
    uint8_t *ptr, *ssp;
590
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
591
    int has_error_code, new_stack, shift;
592
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
593
    uint32_t old_eip;
594

    
595
    has_error_code = 0;
596
    if (!is_int && !is_hw) {
597
        switch(intno) {
598
        case 8:
599
        case 10:
600
        case 11:
601
        case 12:
602
        case 13:
603
        case 14:
604
        case 17:
605
            has_error_code = 1;
606
            break;
607
        }
608
    }
609
    if (is_int)
610
        old_eip = next_eip;
611
    else
612
        old_eip = env->eip;
613

    
614
    dt = &env->idt;
615
    if (intno * 8 + 7 > dt->limit)
616
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
617
    ptr = dt->base + intno * 8;
618
    e1 = ldl_kernel(ptr);
619
    e2 = ldl_kernel(ptr + 4);
620
    /* check gate type */
621
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
622
    switch(type) {
623
    case 5: /* task gate */
624
        /* must do that check here to return the correct error code */
625
        if (!(e2 & DESC_P_MASK))
626
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
627
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
628
        if (has_error_code) {
629
            int mask;
630
            /* push the error code */
631
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
632
            if (env->segs[R_SS].flags & DESC_B_MASK)
633
                mask = 0xffffffff;
634
            else
635
                mask = 0xffff;
636
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
637
            ssp = env->segs[R_SS].base + esp;
638
            if (shift)
639
                stl_kernel(ssp, error_code);
640
            else
641
                stw_kernel(ssp, error_code);
642
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
643
        }
644
        return;
645
    case 6: /* 286 interrupt gate */
646
    case 7: /* 286 trap gate */
647
    case 14: /* 386 interrupt gate */
648
    case 15: /* 386 trap gate */
649
        break;
650
    default:
651
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
652
        break;
653
    }
654
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
655
    cpl = env->hflags & HF_CPL_MASK;
656
    /* check privledge if software int */
657
    if (is_int && dpl < cpl)
658
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
659
    /* check valid bit */
660
    if (!(e2 & DESC_P_MASK))
661
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
662
    selector = e1 >> 16;
663
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
664
    if ((selector & 0xfffc) == 0)
665
        raise_exception_err(EXCP0D_GPF, 0);
666

    
667
    if (load_segment(&e1, &e2, selector) != 0)
668
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
670
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    if (dpl > cpl)
673
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674
    if (!(e2 & DESC_P_MASK))
675
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
676
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
677
        /* to inner priviledge */
678
        get_ss_esp_from_tss(&ss, &esp, dpl);
679
        if ((ss & 0xfffc) == 0)
680
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681
        if ((ss & 3) != dpl)
682
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
683
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
684
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
685
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
686
        if (ss_dpl != dpl)
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (!(ss_e2 & DESC_S_MASK) ||
689
            (ss_e2 & DESC_CS_MASK) ||
690
            !(ss_e2 & DESC_W_MASK))
691
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
692
        if (!(ss_e2 & DESC_P_MASK))
693
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
694
        new_stack = 1;
695
        sp_mask = get_sp_mask(ss_e2);
696
        ssp = get_seg_base(ss_e1, ss_e2);
697
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
698
        /* to same priviledge */
699
        if (env->eflags & VM_MASK)
700
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701
        new_stack = 0;
702
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
703
        ssp = env->segs[R_SS].base;
704
        esp = ESP;
705
        dpl = cpl;
706
    } else {
707
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
708
        new_stack = 0; /* avoid warning */
709
        sp_mask = 0; /* avoid warning */
710
        ssp = NULL; /* avoid warning */
711
        esp = 0; /* avoid warning */
712
    }
713

    
714
    shift = type >> 3;
715

    
716
#if 0
717
    /* XXX: check that enough room is available */
718
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
719
    if (env->eflags & VM_MASK)
720
        push_size += 8;
721
    push_size <<= shift;
722
#endif
723
    if (shift == 1) {
724
        if (new_stack) {
725
            if (env->eflags & VM_MASK) {
726
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
727
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
728
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
729
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
730
            }
731
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
732
            PUSHL(ssp, esp, sp_mask, ESP);
733
        }
734
        PUSHL(ssp, esp, sp_mask, compute_eflags());
735
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
736
        PUSHL(ssp, esp, sp_mask, old_eip);
737
        if (has_error_code) {
738
            PUSHL(ssp, esp, sp_mask, error_code);
739
        }
740
    } else {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHW(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHW(ssp, esp, sp_mask, compute_eflags());
752
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHW(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHW(ssp, esp, sp_mask, error_code);
756
        }
757
    }
758
    
759
    if (new_stack) {
760
        if (env->eflags & VM_MASK) {
761
            cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
762
            cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
763
            cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
764
            cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
765
        }
766
        ss = (ss & ~3) | dpl;
767
        cpu_x86_load_seg_cache(env, R_SS, ss, 
768
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
769
    }
770
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
771

    
772
    selector = (selector & ~3) | dpl;
773
    cpu_x86_load_seg_cache(env, R_CS, selector, 
774
                   get_seg_base(e1, e2),
775
                   get_seg_limit(e1, e2),
776
                   e2);
777
    cpu_x86_set_cpl(env, dpl);
778
    env->eip = offset;
779

    
780
    /* interrupt gate clear IF mask */
781
    if ((type & 1) == 0) {
782
        env->eflags &= ~IF_MASK;
783
    }
784
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
785
}
786

    
787
/* real mode interrupt */
788
static void do_interrupt_real(int intno, int is_int, int error_code,
789
                              unsigned int next_eip)
790
{
791
    SegmentCache *dt;
792
    uint8_t *ptr, *ssp;
793
    int selector;
794
    uint32_t offset, esp;
795
    uint32_t old_cs, old_eip;
796

    
797
    /* real mode (simpler !) */
798
    dt = &env->idt;
799
    if (intno * 4 + 3 > dt->limit)
800
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
801
    ptr = dt->base + intno * 4;
802
    offset = lduw_kernel(ptr);
803
    selector = lduw_kernel(ptr + 2);
804
    esp = ESP;
805
    ssp = env->segs[R_SS].base;
806
    if (is_int)
807
        old_eip = next_eip;
808
    else
809
        old_eip = env->eip;
810
    old_cs = env->segs[R_CS].selector;
811
    /* XXX: use SS segment size ? */
812
    PUSHW(ssp, esp, 0xffff, compute_eflags());
813
    PUSHW(ssp, esp, 0xffff, old_cs);
814
    PUSHW(ssp, esp, 0xffff, old_eip);
815
    
816
    /* update processor state */
817
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
818
    env->eip = offset;
819
    env->segs[R_CS].selector = selector;
820
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
821
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
822
}
823

    
824
/* fake user mode interrupt */
825
void do_interrupt_user(int intno, int is_int, int error_code, 
826
                       unsigned int next_eip)
827
{
828
    SegmentCache *dt;
829
    uint8_t *ptr;
830
    int dpl, cpl;
831
    uint32_t e2;
832

    
833
    dt = &env->idt;
834
    ptr = dt->base + (intno * 8);
835
    e2 = ldl_kernel(ptr + 4);
836
    
837
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
838
    cpl = env->hflags & HF_CPL_MASK;
839
    /* check privledge if software int */
840
    if (is_int && dpl < cpl)
841
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
842

    
843
    /* Since we emulate only user space, we cannot do more than
844
       exiting the emulation with the suitable exception and error
845
       code */
846
    if (is_int)
847
        EIP = next_eip;
848
}
849

    
850
/*
851
 * Begin execution of an interruption. is_int is TRUE if coming from
852
 * the int instruction. next_eip is the EIP value AFTER the interrupt
853
 * instruction. It is only relevant if is_int is TRUE.  
854
 */
855
void do_interrupt(int intno, int is_int, int error_code, 
856
                  unsigned int next_eip, int is_hw)
857
{
858
#ifdef DEBUG_PCALL
859
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
860
        if ((env->cr[0] & CR0_PE_MASK)) {
861
            static int count;
862
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
863
                    count, intno, error_code, is_int,
864
                    env->hflags & HF_CPL_MASK,
865
                    env->segs[R_CS].selector, EIP,
866
                    (int)env->segs[R_CS].base + EIP,
867
                    env->segs[R_SS].selector, ESP);
868
            if (intno == 0x0e) {
869
                fprintf(logfile, " CR2=%08x", env->cr[2]);
870
            } else {
871
                fprintf(logfile, " EAX=%08x", env->regs[R_EAX]);
872
            }
873
            fprintf(logfile, "\n");
874
#if 0
875
            cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
876
            {
877
                int i;
878
                uint8_t *ptr;
879
                fprintf(logfile, "       code=");
880
                ptr = env->segs[R_CS].base + env->eip;
881
                for(i = 0; i < 16; i++) {
882
                    fprintf(logfile, " %02x", ldub(ptr + i));
883
                }
884
                fprintf(logfile, "\n");
885
            }
886
#endif
887
            count++;
888
        }
889
    }
890
#endif
891
    if (env->cr[0] & CR0_PE_MASK) {
892
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
893
    } else {
894
        do_interrupt_real(intno, is_int, error_code, next_eip);
895
    }
896
}
897

    
898
/*
899
 * Signal an interruption. It is executed in the main CPU loop.
900
 * is_int is TRUE if coming from the int instruction. next_eip is the
901
 * EIP value AFTER the interrupt instruction. It is only relevant if
902
 * is_int is TRUE.  
903
 */
904
void raise_interrupt(int intno, int is_int, int error_code, 
905
                     unsigned int next_eip)
906
{
907
    env->exception_index = intno;
908
    env->error_code = error_code;
909
    env->exception_is_int = is_int;
910
    env->exception_next_eip = next_eip;
911
    cpu_loop_exit();
912
}
913

    
914
/* shortcuts to generate exceptions */
915

    
916
void (raise_exception_err)(int exception_index, int error_code)
917
{
918
    raise_interrupt(exception_index, 0, error_code, 0);
919
}
920

    
921
void raise_exception(int exception_index)
922
{
923
    raise_interrupt(exception_index, 0, 0, 0);
924
}
925

    
926
#ifdef BUGGY_GCC_DIV64
927
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
928
   call it from another function */
929
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
930
{
931
    *q_ptr = num / den;
932
    return num % den;
933
}
934

    
935
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
936
{
937
    *q_ptr = num / den;
938
    return num % den;
939
}
940
#endif
941

    
942
void helper_divl_EAX_T0(uint32_t eip)
943
{
944
    unsigned int den, q, r;
945
    uint64_t num;
946
    
947
    num = EAX | ((uint64_t)EDX << 32);
948
    den = T0;
949
    if (den == 0) {
950
        EIP = eip;
951
        raise_exception(EXCP00_DIVZ);
952
    }
953
#ifdef BUGGY_GCC_DIV64
954
    r = div64(&q, num, den);
955
#else
956
    q = (num / den);
957
    r = (num % den);
958
#endif
959
    EAX = q;
960
    EDX = r;
961
}
962

    
963
void helper_idivl_EAX_T0(uint32_t eip)
964
{
965
    int den, q, r;
966
    int64_t num;
967
    
968
    num = EAX | ((uint64_t)EDX << 32);
969
    den = T0;
970
    if (den == 0) {
971
        EIP = eip;
972
        raise_exception(EXCP00_DIVZ);
973
    }
974
#ifdef BUGGY_GCC_DIV64
975
    r = idiv64(&q, num, den);
976
#else
977
    q = (num / den);
978
    r = (num % den);
979
#endif
980
    EAX = q;
981
    EDX = r;
982
}
983

    
984
void helper_cmpxchg8b(void)
985
{
986
    uint64_t d;
987
    int eflags;
988

    
989
    eflags = cc_table[CC_OP].compute_all();
990
    d = ldq((uint8_t *)A0);
991
    if (d == (((uint64_t)EDX << 32) | EAX)) {
992
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
993
        eflags |= CC_Z;
994
    } else {
995
        EDX = d >> 32;
996
        EAX = d;
997
        eflags &= ~CC_Z;
998
    }
999
    CC_SRC = eflags;
1000
}
1001

    
1002
#define CPUID_FP87 (1 << 0)
1003
#define CPUID_VME  (1 << 1)
1004
#define CPUID_DE   (1 << 2)
1005
#define CPUID_PSE  (1 << 3)
1006
#define CPUID_TSC  (1 << 4)
1007
#define CPUID_MSR  (1 << 5)
1008
#define CPUID_PAE  (1 << 6)
1009
#define CPUID_MCE  (1 << 7)
1010
#define CPUID_CX8  (1 << 8)
1011
#define CPUID_APIC (1 << 9)
1012
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1013
#define CPUID_MTRR (1 << 12)
1014
#define CPUID_PGE  (1 << 13)
1015
#define CPUID_MCA  (1 << 14)
1016
#define CPUID_CMOV (1 << 15)
1017
/* ... */
1018
#define CPUID_MMX  (1 << 23)
1019
#define CPUID_FXSR (1 << 24)
1020
#define CPUID_SSE  (1 << 25)
1021
#define CPUID_SSE2 (1 << 26)
1022

    
1023
void helper_cpuid(void)
1024
{
1025
    switch(EAX) {
1026
    case 0:
1027
        EAX = 2; /* max EAX index supported */
1028
        EBX = 0x756e6547;
1029
        ECX = 0x6c65746e;
1030
        EDX = 0x49656e69;
1031
        break;
1032
    case 1:
1033
        {
1034
            int family, model, stepping;
1035
            /* EAX = 1 info */
1036
#if 0
1037
            /* pentium 75-200 */
1038
            family = 5;
1039
            model = 2;
1040
            stepping = 11;
1041
#else
1042
            /* pentium pro */
1043
            family = 6;
1044
            model = 1;
1045
            stepping = 3;
1046
#endif
1047
            EAX = (family << 8) | (model << 4) | stepping;
1048
            EBX = 0;
1049
            ECX = 0;
1050
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1051
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1052
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1053
        }
1054
        break;
1055
    default:
1056
        /* cache info: needed for Pentium Pro compatibility */
1057
        EAX = 0x410601;
1058
        EBX = 0;
1059
        ECX = 0;
1060
        EDX = 0;
1061
        break;
1062
    }
1063
}
1064

    
1065
void helper_lldt_T0(void)
1066
{
1067
    int selector;
1068
    SegmentCache *dt;
1069
    uint32_t e1, e2;
1070
    int index;
1071
    uint8_t *ptr;
1072
    
1073
    selector = T0 & 0xffff;
1074
    if ((selector & 0xfffc) == 0) {
1075
        /* XXX: NULL selector case: invalid LDT */
1076
        env->ldt.base = NULL;
1077
        env->ldt.limit = 0;
1078
    } else {
1079
        if (selector & 0x4)
1080
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1081
        dt = &env->gdt;
1082
        index = selector & ~7;
1083
        if ((index + 7) > dt->limit)
1084
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1085
        ptr = dt->base + index;
1086
        e1 = ldl_kernel(ptr);
1087
        e2 = ldl_kernel(ptr + 4);
1088
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1089
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1090
        if (!(e2 & DESC_P_MASK))
1091
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1092
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1093
    }
1094
    env->ldt.selector = selector;
1095
}
1096

    
1097
void helper_ltr_T0(void)
1098
{
1099
    int selector;
1100
    SegmentCache *dt;
1101
    uint32_t e1, e2;
1102
    int index, type;
1103
    uint8_t *ptr;
1104
    
1105
    selector = T0 & 0xffff;
1106
    if ((selector & 0xfffc) == 0) {
1107
        /* NULL selector case: invalid LDT */
1108
        env->tr.base = NULL;
1109
        env->tr.limit = 0;
1110
        env->tr.flags = 0;
1111
    } else {
1112
        if (selector & 0x4)
1113
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1114
        dt = &env->gdt;
1115
        index = selector & ~7;
1116
        if ((index + 7) > dt->limit)
1117
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1118
        ptr = dt->base + index;
1119
        e1 = ldl_kernel(ptr);
1120
        e2 = ldl_kernel(ptr + 4);
1121
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1122
        if ((e2 & DESC_S_MASK) || 
1123
            (type != 1 && type != 9))
1124
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1125
        if (!(e2 & DESC_P_MASK))
1126
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1127
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1128
        e2 |= DESC_TSS_BUSY_MASK;
1129
        stl_kernel(ptr + 4, e2);
1130
    }
1131
    env->tr.selector = selector;
1132
}
1133

    
1134
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1135
void load_seg(int seg_reg, int selector)
1136
{
1137
    uint32_t e1, e2;
1138
    int cpl, dpl, rpl;
1139
    SegmentCache *dt;
1140
    int index;
1141
    uint8_t *ptr;
1142

    
1143
    selector &= 0xffff;
1144
    if ((selector & 0xfffc) == 0) {
1145
        /* null selector case */
1146
        if (seg_reg == R_SS)
1147
            raise_exception_err(EXCP0D_GPF, 0);
1148
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1149
    } else {
1150
        
1151
        if (selector & 0x4)
1152
            dt = &env->ldt;
1153
        else
1154
            dt = &env->gdt;
1155
        index = selector & ~7;
1156
        if ((index + 7) > dt->limit)
1157
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1158
        ptr = dt->base + index;
1159
        e1 = ldl_kernel(ptr);
1160
        e2 = ldl_kernel(ptr + 4);
1161

    
1162
        if (!(e2 & DESC_S_MASK))
1163
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1164
        rpl = selector & 3;
1165
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1166
        cpl = env->hflags & HF_CPL_MASK;
1167
        if (seg_reg == R_SS) {
1168
            /* must be writable segment */
1169
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1170
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1171
            if (rpl != cpl || dpl != cpl)
1172
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1173
        } else {
1174
            /* must be readable segment */
1175
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1176
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1177
            
1178
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1179
                /* if not conforming code, test rights */
1180
                if (dpl < cpl || dpl < rpl)
1181
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1182
            }
1183
        }
1184

    
1185
        if (!(e2 & DESC_P_MASK)) {
1186
            if (seg_reg == R_SS)
1187
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1188
            else
1189
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1190
        }
1191

    
1192
        /* set the access bit if not already set */
1193
        if (!(e2 & DESC_A_MASK)) {
1194
            e2 |= DESC_A_MASK;
1195
            stl_kernel(ptr + 4, e2);
1196
        }
1197

    
1198
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1199
                       get_seg_base(e1, e2),
1200
                       get_seg_limit(e1, e2),
1201
                       e2);
1202
#if 0
1203
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1204
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1205
#endif
1206
    }
1207
}
1208

    
1209
/* protected mode jump */
1210
void helper_ljmp_protected_T0_T1(int next_eip)
1211
{
1212
    int new_cs, new_eip, gate_cs, type;
1213
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1214

    
1215
    new_cs = T0;
1216
    new_eip = T1;
1217
    if ((new_cs & 0xfffc) == 0)
1218
        raise_exception_err(EXCP0D_GPF, 0);
1219
    if (load_segment(&e1, &e2, new_cs) != 0)
1220
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1221
    cpl = env->hflags & HF_CPL_MASK;
1222
    if (e2 & DESC_S_MASK) {
1223
        if (!(e2 & DESC_CS_MASK))
1224
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1225
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1226
        if (e2 & DESC_C_MASK) {
1227
            /* conforming code segment */
1228
            if (dpl > cpl)
1229
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1230
        } else {
1231
            /* non conforming code segment */
1232
            rpl = new_cs & 3;
1233
            if (rpl > cpl)
1234
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1235
            if (dpl != cpl)
1236
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1237
        }
1238
        if (!(e2 & DESC_P_MASK))
1239
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1240
        limit = get_seg_limit(e1, e2);
1241
        if (new_eip > limit)
1242
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1243
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1244
                       get_seg_base(e1, e2), limit, e2);
1245
        EIP = new_eip;
1246
    } else {
1247
        /* jump to call or task gate */
1248
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1249
        rpl = new_cs & 3;
1250
        cpl = env->hflags & HF_CPL_MASK;
1251
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1252
        switch(type) {
1253
        case 1: /* 286 TSS */
1254
        case 9: /* 386 TSS */
1255
        case 5: /* task gate */
1256
            if (dpl < cpl || dpl < rpl)
1257
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1258
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1259
            break;
1260
        case 4: /* 286 call gate */
1261
        case 12: /* 386 call gate */
1262
            if ((dpl < cpl) || (dpl < rpl))
1263
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1264
            if (!(e2 & DESC_P_MASK))
1265
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1266
            gate_cs = e1 >> 16;
1267
            new_eip = (e1 & 0xffff);
1268
            if (type == 12)
1269
                new_eip |= (e2 & 0xffff0000);
1270
            if (load_segment(&e1, &e2, gate_cs) != 0)
1271
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1272
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1273
            /* must be code segment */
1274
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1275
                 (DESC_S_MASK | DESC_CS_MASK)))
1276
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1277
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1278
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1279
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1280
            if (!(e2 & DESC_P_MASK))
1281
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1282
            limit = get_seg_limit(e1, e2);
1283
            if (new_eip > limit)
1284
                raise_exception_err(EXCP0D_GPF, 0);
1285
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1286
                                   get_seg_base(e1, e2), limit, e2);
1287
            EIP = new_eip;
1288
            break;
1289
        default:
1290
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1291
            break;
1292
        }
1293
    }
1294
}
1295

    
1296
/* real mode call */
1297
void helper_lcall_real_T0_T1(int shift, int next_eip)
1298
{
1299
    int new_cs, new_eip;
1300
    uint32_t esp, esp_mask;
1301
    uint8_t *ssp;
1302

    
1303
    new_cs = T0;
1304
    new_eip = T1;
1305
    esp = ESP;
1306
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1307
    ssp = env->segs[R_SS].base;
1308
    if (shift) {
1309
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1310
        PUSHL(ssp, esp, esp_mask, next_eip);
1311
    } else {
1312
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1313
        PUSHW(ssp, esp, esp_mask, next_eip);
1314
    }
1315

    
1316
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1317
    env->eip = new_eip;
1318
    env->segs[R_CS].selector = new_cs;
1319
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1320
}
1321

    
1322
/* protected mode call */
1323
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1324
{
1325
    int new_cs, new_eip, new_stack, i;
1326
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1327
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1328
    uint32_t val, limit, old_sp_mask;
1329
    uint8_t *ssp, *old_ssp;
1330
    
1331
    new_cs = T0;
1332
    new_eip = T1;
1333
#ifdef DEBUG_PCALL
1334
    if (loglevel & CPU_LOG_PCALL) {
1335
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
1336
                new_cs, new_eip, shift);
1337
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1338
    }
1339
#endif
1340
    if ((new_cs & 0xfffc) == 0)
1341
        raise_exception_err(EXCP0D_GPF, 0);
1342
    if (load_segment(&e1, &e2, new_cs) != 0)
1343
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1344
    cpl = env->hflags & HF_CPL_MASK;
1345
#ifdef DEBUG_PCALL
1346
    if (loglevel & CPU_LOG_PCALL) {
1347
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1348
    }
1349
#endif
1350
    if (e2 & DESC_S_MASK) {
1351
        if (!(e2 & DESC_CS_MASK))
1352
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1353
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1354
        if (e2 & DESC_C_MASK) {
1355
            /* conforming code segment */
1356
            if (dpl > cpl)
1357
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1358
        } else {
1359
            /* non conforming code segment */
1360
            rpl = new_cs & 3;
1361
            if (rpl > cpl)
1362
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1363
            if (dpl != cpl)
1364
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1365
        }
1366
        if (!(e2 & DESC_P_MASK))
1367
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1368

    
1369
        sp = ESP;
1370
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1371
        ssp = env->segs[R_SS].base;
1372
        if (shift) {
1373
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1374
            PUSHL(ssp, sp, sp_mask, next_eip);
1375
        } else {
1376
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1377
            PUSHW(ssp, sp, sp_mask, next_eip);
1378
        }
1379
        
1380
        limit = get_seg_limit(e1, e2);
1381
        if (new_eip > limit)
1382
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1383
        /* from this point, not restartable */
1384
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1385
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1386
                       get_seg_base(e1, e2), limit, e2);
1387
        EIP = new_eip;
1388
    } else {
1389
        /* check gate type */
1390
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1391
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1392
        rpl = new_cs & 3;
1393
        switch(type) {
1394
        case 1: /* available 286 TSS */
1395
        case 9: /* available 386 TSS */
1396
        case 5: /* task gate */
1397
            if (dpl < cpl || dpl < rpl)
1398
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1399
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1400
            return;
1401
        case 4: /* 286 call gate */
1402
        case 12: /* 386 call gate */
1403
            break;
1404
        default:
1405
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1406
            break;
1407
        }
1408
        shift = type >> 3;
1409

    
1410
        if (dpl < cpl || dpl < rpl)
1411
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1412
        /* check valid bit */
1413
        if (!(e2 & DESC_P_MASK))
1414
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1415
        selector = e1 >> 16;
1416
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1417
        param_count = e2 & 0x1f;
1418
        if ((selector & 0xfffc) == 0)
1419
            raise_exception_err(EXCP0D_GPF, 0);
1420

    
1421
        if (load_segment(&e1, &e2, selector) != 0)
1422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1423
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1424
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1425
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1426
        if (dpl > cpl)
1427
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1428
        if (!(e2 & DESC_P_MASK))
1429
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1430

    
1431
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1432
            /* to inner priviledge */
1433
            get_ss_esp_from_tss(&ss, &sp, dpl);
1434
#ifdef DEBUG_PCALL
1435
            if (loglevel & CPU_LOG_PCALL)
1436
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", 
1437
                        ss, sp, param_count, ESP);
1438
#endif
1439
            if ((ss & 0xfffc) == 0)
1440
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1441
            if ((ss & 3) != dpl)
1442
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1443
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1445
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1446
            if (ss_dpl != dpl)
1447
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1448
            if (!(ss_e2 & DESC_S_MASK) ||
1449
                (ss_e2 & DESC_CS_MASK) ||
1450
                !(ss_e2 & DESC_W_MASK))
1451
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1452
            if (!(ss_e2 & DESC_P_MASK))
1453
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1454
            
1455
            //            push_size = ((param_count * 2) + 8) << shift;
1456

    
1457
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1458
            old_ssp = env->segs[R_SS].base;
1459
            
1460
            sp_mask = get_sp_mask(ss_e2);
1461
            ssp = get_seg_base(ss_e1, ss_e2);
1462
            if (shift) {
1463
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1464
                PUSHL(ssp, sp, sp_mask, ESP);
1465
                for(i = param_count - 1; i >= 0; i--) {
1466
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1467
                    PUSHL(ssp, sp, sp_mask, val);
1468
                }
1469
            } else {
1470
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1471
                PUSHW(ssp, sp, sp_mask, ESP);
1472
                for(i = param_count - 1; i >= 0; i--) {
1473
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1474
                    PUSHW(ssp, sp, sp_mask, val);
1475
                }
1476
            }
1477
            new_stack = 1;
1478
        } else {
1479
            /* to same priviledge */
1480
            sp = ESP;
1481
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1482
            ssp = env->segs[R_SS].base;
1483
            //            push_size = (4 << shift);
1484
            new_stack = 0;
1485
        }
1486

    
1487
        if (shift) {
1488
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1489
            PUSHL(ssp, sp, sp_mask, next_eip);
1490
        } else {
1491
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1492
            PUSHW(ssp, sp, sp_mask, next_eip);
1493
        }
1494

    
1495
        /* from this point, not restartable */
1496

    
1497
        if (new_stack) {
1498
            ss = (ss & ~3) | dpl;
1499
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1500
                                   ssp,
1501
                                   get_seg_limit(ss_e1, ss_e2),
1502
                                   ss_e2);
1503
        }
1504

    
1505
        selector = (selector & ~3) | dpl;
1506
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1507
                       get_seg_base(e1, e2),
1508
                       get_seg_limit(e1, e2),
1509
                       e2);
1510
        cpu_x86_set_cpl(env, dpl);
1511
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1512
        EIP = offset;
1513
    }
1514
}
1515

    
1516
/* real and vm86 mode iret */
1517
void helper_iret_real(int shift)
1518
{
1519
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1520
    uint8_t *ssp;
1521
    int eflags_mask;
1522

    
1523
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1524
    sp = ESP;
1525
    ssp = env->segs[R_SS].base;
1526
    if (shift == 1) {
1527
        /* 32 bits */
1528
        POPL(ssp, sp, sp_mask, new_eip);
1529
        POPL(ssp, sp, sp_mask, new_cs);
1530
        new_cs &= 0xffff;
1531
        POPL(ssp, sp, sp_mask, new_eflags);
1532
    } else {
1533
        /* 16 bits */
1534
        POPW(ssp, sp, sp_mask, new_eip);
1535
        POPW(ssp, sp, sp_mask, new_cs);
1536
        POPW(ssp, sp, sp_mask, new_eflags);
1537
    }
1538
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1539
    load_seg_vm(R_CS, new_cs);
1540
    env->eip = new_eip;
1541
    if (env->eflags & VM_MASK)
1542
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1543
    else
1544
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1545
    if (shift == 0)
1546
        eflags_mask &= 0xffff;
1547
    load_eflags(new_eflags, eflags_mask);
1548
}
1549

    
1550
static inline void validate_seg(int seg_reg, int cpl)
1551
{
1552
    int dpl;
1553
    uint32_t e2;
1554
    
1555
    e2 = env->segs[seg_reg].flags;
1556
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1557
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1558
        /* data or non conforming code segment */
1559
        if (dpl < cpl) {
1560
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1561
        }
1562
    }
1563
}
1564

    
1565
/* protected mode iret */
1566
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1567
{
1568
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1569
    uint32_t new_es, new_ds, new_fs, new_gs;
1570
    uint32_t e1, e2, ss_e1, ss_e2;
1571
    int cpl, dpl, rpl, eflags_mask, iopl;
1572
    uint8_t *ssp;
1573
    
1574
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1575
    sp = ESP;
1576
    ssp = env->segs[R_SS].base;
1577
    new_eflags = 0; /* avoid warning */
1578
    if (shift == 1) {
1579
        /* 32 bits */
1580
        POPL(ssp, sp, sp_mask, new_eip);
1581
        POPL(ssp, sp, sp_mask, new_cs);
1582
        new_cs &= 0xffff;
1583
        if (is_iret) {
1584
            POPL(ssp, sp, sp_mask, new_eflags);
1585
            if (new_eflags & VM_MASK)
1586
                goto return_to_vm86;
1587
        }
1588
    } else {
1589
        /* 16 bits */
1590
        POPW(ssp, sp, sp_mask, new_eip);
1591
        POPW(ssp, sp, sp_mask, new_cs);
1592
        if (is_iret)
1593
            POPW(ssp, sp, sp_mask, new_eflags);
1594
    }
1595
#ifdef DEBUG_PCALL
1596
    if (loglevel & CPU_LOG_PCALL) {
1597
        fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1598
                new_cs, new_eip, shift, addend);
1599
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1600
    }
1601
#endif
1602
    if ((new_cs & 0xfffc) == 0)
1603
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1604
    if (load_segment(&e1, &e2, new_cs) != 0)
1605
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1606
    if (!(e2 & DESC_S_MASK) ||
1607
        !(e2 & DESC_CS_MASK))
1608
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1609
    cpl = env->hflags & HF_CPL_MASK;
1610
    rpl = new_cs & 3; 
1611
    if (rpl < cpl)
1612
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1613
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1614
    if (e2 & DESC_C_MASK) {
1615
        if (dpl > rpl)
1616
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1617
    } else {
1618
        if (dpl != rpl)
1619
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1620
    }
1621
    if (!(e2 & DESC_P_MASK))
1622
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1623
    
1624
    sp += addend;
1625
    if (rpl == cpl) {
1626
        /* return to same priledge level */
1627
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1628
                       get_seg_base(e1, e2),
1629
                       get_seg_limit(e1, e2),
1630
                       e2);
1631
    } else {
1632
        /* return to different priviledge level */
1633
        if (shift == 1) {
1634
            /* 32 bits */
1635
            POPL(ssp, sp, sp_mask, new_esp);
1636
            POPL(ssp, sp, sp_mask, new_ss);
1637
            new_ss &= 0xffff;
1638
        } else {
1639
            /* 16 bits */
1640
            POPW(ssp, sp, sp_mask, new_esp);
1641
            POPW(ssp, sp, sp_mask, new_ss);
1642
        }
1643
#ifdef DEBUG_PCALL
1644
        if (loglevel & CPU_LOG_PCALL) {
1645
            fprintf(logfile, "new ss:esp=%04x:%08x\n",
1646
                    new_ss, new_esp);
1647
        }
1648
#endif
1649
        
1650
        if ((new_ss & 3) != rpl)
1651
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1652
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1653
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1654
        if (!(ss_e2 & DESC_S_MASK) ||
1655
            (ss_e2 & DESC_CS_MASK) ||
1656
            !(ss_e2 & DESC_W_MASK))
1657
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1658
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1659
        if (dpl != rpl)
1660
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1661
        if (!(ss_e2 & DESC_P_MASK))
1662
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1663

    
1664
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1665
                       get_seg_base(e1, e2),
1666
                       get_seg_limit(e1, e2),
1667
                       e2);
1668
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1669
                       get_seg_base(ss_e1, ss_e2),
1670
                       get_seg_limit(ss_e1, ss_e2),
1671
                       ss_e2);
1672
        cpu_x86_set_cpl(env, rpl);
1673
        sp = new_esp;
1674
        sp_mask = get_sp_mask(ss_e2);
1675

    
1676
        /* validate data segments */
1677
        validate_seg(R_ES, cpl);
1678
        validate_seg(R_DS, cpl);
1679
        validate_seg(R_FS, cpl);
1680
        validate_seg(R_GS, cpl);
1681

    
1682
        sp += addend;
1683
    }
1684
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1685
    env->eip = new_eip;
1686
    if (is_iret) {
1687
        /* NOTE: 'cpl' is the _old_ CPL */
1688
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1689
        if (cpl == 0)
1690
            eflags_mask |= IOPL_MASK;
1691
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1692
        if (cpl <= iopl)
1693
            eflags_mask |= IF_MASK;
1694
        if (shift == 0)
1695
            eflags_mask &= 0xffff;
1696
        load_eflags(new_eflags, eflags_mask);
1697
    }
1698
    return;
1699

    
1700
 return_to_vm86:
1701
    POPL(ssp, sp, sp_mask, new_esp);
1702
    POPL(ssp, sp, sp_mask, new_ss);
1703
    POPL(ssp, sp, sp_mask, new_es);
1704
    POPL(ssp, sp, sp_mask, new_ds);
1705
    POPL(ssp, sp, sp_mask, new_fs);
1706
    POPL(ssp, sp, sp_mask, new_gs);
1707
    
1708
    /* modify processor state */
1709
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1710
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1711
    load_seg_vm(R_CS, new_cs & 0xffff);
1712
    cpu_x86_set_cpl(env, 3);
1713
    load_seg_vm(R_SS, new_ss & 0xffff);
1714
    load_seg_vm(R_ES, new_es & 0xffff);
1715
    load_seg_vm(R_DS, new_ds & 0xffff);
1716
    load_seg_vm(R_FS, new_fs & 0xffff);
1717
    load_seg_vm(R_GS, new_gs & 0xffff);
1718

    
1719
    env->eip = new_eip & 0xffff;
1720
    ESP = new_esp;
1721
}
1722

    
1723
void helper_iret_protected(int shift, int next_eip)
1724
{
1725
    int tss_selector, type;
1726
    uint32_t e1, e2;
1727
    
1728
    /* specific case for TSS */
1729
    if (env->eflags & NT_MASK) {
1730
        tss_selector = lduw_kernel(env->tr.base + 0);
1731
        if (tss_selector & 4)
1732
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1733
        if (load_segment(&e1, &e2, tss_selector) != 0)
1734
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1735
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1736
        /* NOTE: we check both segment and busy TSS */
1737
        if (type != 3)
1738
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1739
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
1740
    } else {
1741
        helper_ret_protected(shift, 1, 0);
1742
    }
1743
}
1744

    
1745
void helper_lret_protected(int shift, int addend)
1746
{
1747
    helper_ret_protected(shift, 0, addend);
1748
}
1749

    
1750
void helper_sysenter(void)
1751
{
1752
    if (env->sysenter_cs == 0) {
1753
        raise_exception_err(EXCP0D_GPF, 0);
1754
    }
1755
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
1756
    cpu_x86_set_cpl(env, 0);
1757
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
1758
                           NULL, 0xffffffff, 
1759
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1760
                           DESC_S_MASK |
1761
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1762
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
1763
                           NULL, 0xffffffff,
1764
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1765
                           DESC_S_MASK |
1766
                           DESC_W_MASK | DESC_A_MASK);
1767
    ESP = env->sysenter_esp;
1768
    EIP = env->sysenter_eip;
1769
}
1770

    
1771
void helper_sysexit(void)
1772
{
1773
    int cpl;
1774

    
1775
    cpl = env->hflags & HF_CPL_MASK;
1776
    if (env->sysenter_cs == 0 || cpl != 0) {
1777
        raise_exception_err(EXCP0D_GPF, 0);
1778
    }
1779
    cpu_x86_set_cpl(env, 3);
1780
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
1781
                           NULL, 0xffffffff, 
1782
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1783
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1784
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1785
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
1786
                           NULL, 0xffffffff,
1787
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1788
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1789
                           DESC_W_MASK | DESC_A_MASK);
1790
    ESP = ECX;
1791
    EIP = EDX;
1792
}
1793

    
1794
void helper_movl_crN_T0(int reg)
1795
{
1796
    switch(reg) {
1797
    case 0:
1798
        cpu_x86_update_cr0(env, T0);
1799
        break;
1800
    case 3:
1801
        cpu_x86_update_cr3(env, T0);
1802
        break;
1803
    case 4:
1804
        cpu_x86_update_cr4(env, T0);
1805
        break;
1806
    default:
1807
        env->cr[reg] = T0;
1808
        break;
1809
    }
1810
}
1811

    
1812
/* XXX: do more */
1813
void helper_movl_drN_T0(int reg)
1814
{
1815
    env->dr[reg] = T0;
1816
}
1817

    
1818
void helper_invlpg(unsigned int addr)
1819
{
1820
    cpu_x86_flush_tlb(env, addr);
1821
}
1822

    
1823
void helper_rdtsc(void)
1824
{
1825
    uint64_t val;
1826
    
1827
    val = cpu_get_tsc(env);
1828
    EAX = val;
1829
    EDX = val >> 32;
1830
}
1831

    
1832
void helper_wrmsr(void)
1833
{
1834
    switch(ECX) {
1835
    case MSR_IA32_SYSENTER_CS:
1836
        env->sysenter_cs = EAX & 0xffff;
1837
        break;
1838
    case MSR_IA32_SYSENTER_ESP:
1839
        env->sysenter_esp = EAX;
1840
        break;
1841
    case MSR_IA32_SYSENTER_EIP:
1842
        env->sysenter_eip = EAX;
1843
        break;
1844
    default:
1845
        /* XXX: exception ? */
1846
        break; 
1847
    }
1848
}
1849

    
1850
void helper_rdmsr(void)
1851
{
1852
    switch(ECX) {
1853
    case MSR_IA32_SYSENTER_CS:
1854
        EAX = env->sysenter_cs;
1855
        EDX = 0;
1856
        break;
1857
    case MSR_IA32_SYSENTER_ESP:
1858
        EAX = env->sysenter_esp;
1859
        EDX = 0;
1860
        break;
1861
    case MSR_IA32_SYSENTER_EIP:
1862
        EAX = env->sysenter_eip;
1863
        EDX = 0;
1864
        break;
1865
    default:
1866
        /* XXX: exception ? */
1867
        break; 
1868
    }
1869
}
1870

    
1871
void helper_lsl(void)
1872
{
1873
    unsigned int selector, limit;
1874
    uint32_t e1, e2;
1875
    int rpl, dpl, cpl, type;
1876

    
1877
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1878
    selector = T0 & 0xffff;
1879
    if (load_segment(&e1, &e2, selector) != 0)
1880
        return;
1881
    rpl = selector & 3;
1882
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1883
    cpl = env->hflags & HF_CPL_MASK;
1884
    if (e2 & DESC_S_MASK) {
1885
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1886
            /* conforming */
1887
        } else {
1888
            if (dpl < cpl || dpl < rpl)
1889
                return;
1890
        }
1891
    } else {
1892
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1893
        switch(type) {
1894
        case 1:
1895
        case 2:
1896
        case 3:
1897
        case 9:
1898
        case 11:
1899
            break;
1900
        default:
1901
            return;
1902
        }
1903
        if (dpl < cpl || dpl < rpl)
1904
            return;
1905
    }
1906
    limit = get_seg_limit(e1, e2);
1907
    T1 = limit;
1908
    CC_SRC |= CC_Z;
1909
}
1910

    
1911
void helper_lar(void)
1912
{
1913
    unsigned int selector;
1914
    uint32_t e1, e2;
1915
    int rpl, dpl, cpl, type;
1916

    
1917
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1918
    selector = T0 & 0xffff;
1919
    if ((selector & 0xfffc) == 0)
1920
        return;
1921
    if (load_segment(&e1, &e2, selector) != 0)
1922
        return;
1923
    rpl = selector & 3;
1924
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1925
    cpl = env->hflags & HF_CPL_MASK;
1926
    if (e2 & DESC_S_MASK) {
1927
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1928
            /* conforming */
1929
        } else {
1930
            if (dpl < cpl || dpl < rpl)
1931
                return;
1932
        }
1933
    } else {
1934
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1935
        switch(type) {
1936
        case 1:
1937
        case 2:
1938
        case 3:
1939
        case 4:
1940
        case 5:
1941
        case 9:
1942
        case 11:
1943
        case 12:
1944
            break;
1945
        default:
1946
            return;
1947
        }
1948
        if (dpl < cpl || dpl < rpl)
1949
            return;
1950
    }
1951
    T1 = e2 & 0x00f0ff00;
1952
    CC_SRC |= CC_Z;
1953
}
1954

    
1955
void helper_verr(void)
1956
{
1957
    unsigned int selector;
1958
    uint32_t e1, e2;
1959
    int rpl, dpl, cpl;
1960

    
1961
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1962
    selector = T0 & 0xffff;
1963
    if ((selector & 0xfffc) == 0)
1964
        return;
1965
    if (load_segment(&e1, &e2, selector) != 0)
1966
        return;
1967
    if (!(e2 & DESC_S_MASK))
1968
        return;
1969
    rpl = selector & 3;
1970
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1971
    cpl = env->hflags & HF_CPL_MASK;
1972
    if (e2 & DESC_CS_MASK) {
1973
        if (!(e2 & DESC_R_MASK))
1974
            return;
1975
        if (!(e2 & DESC_C_MASK)) {
1976
            if (dpl < cpl || dpl < rpl)
1977
                return;
1978
        }
1979
    } else {
1980
        if (dpl < cpl || dpl < rpl)
1981
            return;
1982
    }
1983
    CC_SRC |= CC_Z;
1984
}
1985

    
1986
void helper_verw(void)
1987
{
1988
    unsigned int selector;
1989
    uint32_t e1, e2;
1990
    int rpl, dpl, cpl;
1991

    
1992
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1993
    selector = T0 & 0xffff;
1994
    if ((selector & 0xfffc) == 0)
1995
        return;
1996
    if (load_segment(&e1, &e2, selector) != 0)
1997
        return;
1998
    if (!(e2 & DESC_S_MASK))
1999
        return;
2000
    rpl = selector & 3;
2001
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2002
    cpl = env->hflags & HF_CPL_MASK;
2003
    if (e2 & DESC_CS_MASK) {
2004
        return;
2005
    } else {
2006
        if (dpl < cpl || dpl < rpl)
2007
            return;
2008
        if (!(e2 & DESC_W_MASK))
2009
            return;
2010
    }
2011
    CC_SRC |= CC_Z;
2012
}
2013

    
2014
/* FPU helpers */
2015

    
2016
void helper_fldt_ST0_A0(void)
2017
{
2018
    int new_fpstt;
2019
    new_fpstt = (env->fpstt - 1) & 7;
2020
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
2021
    env->fpstt = new_fpstt;
2022
    env->fptags[new_fpstt] = 0; /* validate stack entry */
2023
}
2024

    
2025
void helper_fstt_ST0_A0(void)
2026
{
2027
    helper_fstt(ST0, (uint8_t *)A0);
2028
}
2029

    
2030
void fpu_set_exception(int mask)
2031
{
2032
    env->fpus |= mask;
2033
    if (env->fpus & (~env->fpuc & FPUC_EM))
2034
        env->fpus |= FPUS_SE | FPUS_B;
2035
}
2036

    
2037
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2038
{
2039
    if (b == 0.0) 
2040
        fpu_set_exception(FPUS_ZE);
2041
    return a / b;
2042
}
2043

    
2044
void fpu_raise_exception(void)
2045
{
2046
    if (env->cr[0] & CR0_NE_MASK) {
2047
        raise_exception(EXCP10_COPR);
2048
    } 
2049
#if !defined(CONFIG_USER_ONLY) 
2050
    else {
2051
        cpu_set_ferr(env);
2052
    }
2053
#endif
2054
}
2055

    
2056
/* BCD ops */
2057

    
2058
void helper_fbld_ST0_A0(void)
2059
{
2060
    CPU86_LDouble tmp;
2061
    uint64_t val;
2062
    unsigned int v;
2063
    int i;
2064

    
2065
    val = 0;
2066
    for(i = 8; i >= 0; i--) {
2067
        v = ldub((uint8_t *)A0 + i);
2068
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2069
    }
2070
    tmp = val;
2071
    if (ldub((uint8_t *)A0 + 9) & 0x80)
2072
        tmp = -tmp;
2073
    fpush();
2074
    ST0 = tmp;
2075
}
2076

    
2077
void helper_fbst_ST0_A0(void)
2078
{
2079
    CPU86_LDouble tmp;
2080
    int v;
2081
    uint8_t *mem_ref, *mem_end;
2082
    int64_t val;
2083

    
2084
    tmp = rint(ST0);
2085
    val = (int64_t)tmp;
2086
    mem_ref = (uint8_t *)A0;
2087
    mem_end = mem_ref + 9;
2088
    if (val < 0) {
2089
        stb(mem_end, 0x80);
2090
        val = -val;
2091
    } else {
2092
        stb(mem_end, 0x00);
2093
    }
2094
    while (mem_ref < mem_end) {
2095
        if (val == 0)
2096
            break;
2097
        v = val % 100;
2098
        val = val / 100;
2099
        v = ((v / 10) << 4) | (v % 10);
2100
        stb(mem_ref++, v);
2101
    }
2102
    while (mem_ref < mem_end) {
2103
        stb(mem_ref++, 0);
2104
    }
2105
}
2106

    
2107
void helper_f2xm1(void)
2108
{
2109
    ST0 = pow(2.0,ST0) - 1.0;
2110
}
2111

    
2112
void helper_fyl2x(void)
2113
{
2114
    CPU86_LDouble fptemp;
2115
    
2116
    fptemp = ST0;
2117
    if (fptemp>0.0){
2118
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2119
        ST1 *= fptemp;
2120
        fpop();
2121
    } else { 
2122
        env->fpus &= (~0x4700);
2123
        env->fpus |= 0x400;
2124
    }
2125
}
2126

    
2127
void helper_fptan(void)
2128
{
2129
    CPU86_LDouble fptemp;
2130

    
2131
    fptemp = ST0;
2132
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2133
        env->fpus |= 0x400;
2134
    } else {
2135
        ST0 = tan(fptemp);
2136
        fpush();
2137
        ST0 = 1.0;
2138
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2139
        /* the above code is for  |arg| < 2**52 only */
2140
    }
2141
}
2142

    
2143
void helper_fpatan(void)
2144
{
2145
    CPU86_LDouble fptemp, fpsrcop;
2146

    
2147
    fpsrcop = ST1;
2148
    fptemp = ST0;
2149
    ST1 = atan2(fpsrcop,fptemp);
2150
    fpop();
2151
}
2152

    
2153
void helper_fxtract(void)
2154
{
2155
    CPU86_LDoubleU temp;
2156
    unsigned int expdif;
2157

    
2158
    temp.d = ST0;
2159
    expdif = EXPD(temp) - EXPBIAS;
2160
    /*DP exponent bias*/
2161
    ST0 = expdif;
2162
    fpush();
2163
    BIASEXPONENT(temp);
2164
    ST0 = temp.d;
2165
}
2166

    
2167
void helper_fprem1(void)
2168
{
2169
    CPU86_LDouble dblq, fpsrcop, fptemp;
2170
    CPU86_LDoubleU fpsrcop1, fptemp1;
2171
    int expdif;
2172
    int q;
2173

    
2174
    fpsrcop = ST0;
2175
    fptemp = ST1;
2176
    fpsrcop1.d = fpsrcop;
2177
    fptemp1.d = fptemp;
2178
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2179
    if (expdif < 53) {
2180
        dblq = fpsrcop / fptemp;
2181
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2182
        ST0 = fpsrcop - fptemp*dblq;
2183
        q = (int)dblq; /* cutting off top bits is assumed here */
2184
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2185
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2186
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2187
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2188
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2189
    } else {
2190
        env->fpus |= 0x400;  /* C2 <-- 1 */
2191
        fptemp = pow(2.0, expdif-50);
2192
        fpsrcop = (ST0 / ST1) / fptemp;
2193
        /* fpsrcop = integer obtained by rounding to the nearest */
2194
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2195
            floor(fpsrcop): ceil(fpsrcop);
2196
        ST0 -= (ST1 * fpsrcop * fptemp);
2197
    }
2198
}
2199

    
2200
void helper_fprem(void)
2201
{
2202
    CPU86_LDouble dblq, fpsrcop, fptemp;
2203
    CPU86_LDoubleU fpsrcop1, fptemp1;
2204
    int expdif;
2205
    int q;
2206
    
2207
    fpsrcop = ST0;
2208
    fptemp = ST1;
2209
    fpsrcop1.d = fpsrcop;
2210
    fptemp1.d = fptemp;
2211
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2212
    if ( expdif < 53 ) {
2213
        dblq = fpsrcop / fptemp;
2214
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2215
        ST0 = fpsrcop - fptemp*dblq;
2216
        q = (int)dblq; /* cutting off top bits is assumed here */
2217
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2218
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2219
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2220
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2221
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2222
    } else {
2223
        env->fpus |= 0x400;  /* C2 <-- 1 */
2224
        fptemp = pow(2.0, expdif-50);
2225
        fpsrcop = (ST0 / ST1) / fptemp;
2226
        /* fpsrcop = integer obtained by chopping */
2227
        fpsrcop = (fpsrcop < 0.0)?
2228
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2229
        ST0 -= (ST1 * fpsrcop * fptemp);
2230
    }
2231
}
2232

    
2233
void helper_fyl2xp1(void)
2234
{
2235
    CPU86_LDouble fptemp;
2236

    
2237
    fptemp = ST0;
2238
    if ((fptemp+1.0)>0.0) {
2239
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2240
        ST1 *= fptemp;
2241
        fpop();
2242
    } else { 
2243
        env->fpus &= (~0x4700);
2244
        env->fpus |= 0x400;
2245
    }
2246
}
2247

    
2248
void helper_fsqrt(void)
2249
{
2250
    CPU86_LDouble fptemp;
2251

    
2252
    fptemp = ST0;
2253
    if (fptemp<0.0) { 
2254
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2255
        env->fpus |= 0x400;
2256
    }
2257
    ST0 = sqrt(fptemp);
2258
}
2259

    
2260
void helper_fsincos(void)
2261
{
2262
    CPU86_LDouble fptemp;
2263

    
2264
    fptemp = ST0;
2265
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2266
        env->fpus |= 0x400;
2267
    } else {
2268
        ST0 = sin(fptemp);
2269
        fpush();
2270
        ST0 = cos(fptemp);
2271
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2272
        /* the above code is for  |arg| < 2**63 only */
2273
    }
2274
}
2275

    
2276
void helper_frndint(void)
2277
{
2278
    CPU86_LDouble a;
2279

    
2280
    a = ST0;
2281
#ifdef __arm__
2282
    switch(env->fpuc & RC_MASK) {
2283
    default:
2284
    case RC_NEAR:
2285
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2286
        break;
2287
    case RC_DOWN:
2288
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2289
        break;
2290
    case RC_UP:
2291
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2292
        break;
2293
    case RC_CHOP:
2294
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2295
        break;
2296
    }
2297
#else
2298
    a = rint(a);
2299
#endif
2300
    ST0 = a;
2301
}
2302

    
2303
void helper_fscale(void)
2304
{
2305
    CPU86_LDouble fpsrcop, fptemp;
2306

    
2307
    fpsrcop = 2.0;
2308
    fptemp = pow(fpsrcop,ST1);
2309
    ST0 *= fptemp;
2310
}
2311

    
2312
void helper_fsin(void)
2313
{
2314
    CPU86_LDouble fptemp;
2315

    
2316
    fptemp = ST0;
2317
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2318
        env->fpus |= 0x400;
2319
    } else {
2320
        ST0 = sin(fptemp);
2321
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2322
        /* the above code is for  |arg| < 2**53 only */
2323
    }
2324
}
2325

    
2326
void helper_fcos(void)
2327
{
2328
    CPU86_LDouble fptemp;
2329

    
2330
    fptemp = ST0;
2331
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2332
        env->fpus |= 0x400;
2333
    } else {
2334
        ST0 = cos(fptemp);
2335
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2336
        /* the above code is for  |arg5 < 2**63 only */
2337
    }
2338
}
2339

    
2340
void helper_fxam_ST0(void)
2341
{
2342
    CPU86_LDoubleU temp;
2343
    int expdif;
2344

    
2345
    temp.d = ST0;
2346

    
2347
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2348
    if (SIGND(temp))
2349
        env->fpus |= 0x200; /* C1 <-- 1 */
2350

    
2351
    expdif = EXPD(temp);
2352
    if (expdif == MAXEXPD) {
2353
        if (MANTD(temp) == 0)
2354
            env->fpus |=  0x500 /*Infinity*/;
2355
        else
2356
            env->fpus |=  0x100 /*NaN*/;
2357
    } else if (expdif == 0) {
2358
        if (MANTD(temp) == 0)
2359
            env->fpus |=  0x4000 /*Zero*/;
2360
        else
2361
            env->fpus |= 0x4400 /*Denormal*/;
2362
    } else {
2363
        env->fpus |= 0x400;
2364
    }
2365
}
2366

    
2367
void helper_fstenv(uint8_t *ptr, int data32)
2368
{
2369
    int fpus, fptag, exp, i;
2370
    uint64_t mant;
2371
    CPU86_LDoubleU tmp;
2372

    
2373
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2374
    fptag = 0;
2375
    for (i=7; i>=0; i--) {
2376
        fptag <<= 2;
2377
        if (env->fptags[i]) {
2378
            fptag |= 3;
2379
        } else {
2380
            tmp.d = env->fpregs[i];
2381
            exp = EXPD(tmp);
2382
            mant = MANTD(tmp);
2383
            if (exp == 0 && mant == 0) {
2384
                /* zero */
2385
                fptag |= 1;
2386
            } else if (exp == 0 || exp == MAXEXPD
2387
#ifdef USE_X86LDOUBLE
2388
                       || (mant & (1LL << 63)) == 0
2389
#endif
2390
                       ) {
2391
                /* NaNs, infinity, denormal */
2392
                fptag |= 2;
2393
            }
2394
        }
2395
    }
2396
    if (data32) {
2397
        /* 32 bit */
2398
        stl(ptr, env->fpuc);
2399
        stl(ptr + 4, fpus);
2400
        stl(ptr + 8, fptag);
2401
        stl(ptr + 12, 0); /* fpip */
2402
        stl(ptr + 16, 0); /* fpcs */
2403
        stl(ptr + 20, 0); /* fpoo */
2404
        stl(ptr + 24, 0); /* fpos */
2405
    } else {
2406
        /* 16 bit */
2407
        stw(ptr, env->fpuc);
2408
        stw(ptr + 2, fpus);
2409
        stw(ptr + 4, fptag);
2410
        stw(ptr + 6, 0);
2411
        stw(ptr + 8, 0);
2412
        stw(ptr + 10, 0);
2413
        stw(ptr + 12, 0);
2414
    }
2415
}
2416

    
2417
void helper_fldenv(uint8_t *ptr, int data32)
2418
{
2419
    int i, fpus, fptag;
2420

    
2421
    if (data32) {
2422
        env->fpuc = lduw(ptr);
2423
        fpus = lduw(ptr + 4);
2424
        fptag = lduw(ptr + 8);
2425
    }
2426
    else {
2427
        env->fpuc = lduw(ptr);
2428
        fpus = lduw(ptr + 2);
2429
        fptag = lduw(ptr + 4);
2430
    }
2431
    env->fpstt = (fpus >> 11) & 7;
2432
    env->fpus = fpus & ~0x3800;
2433
    for(i = 0;i < 8; i++) {
2434
        env->fptags[i] = ((fptag & 3) == 3);
2435
        fptag >>= 2;
2436
    }
2437
}
2438

    
2439
void helper_fsave(uint8_t *ptr, int data32)
2440
{
2441
    CPU86_LDouble tmp;
2442
    int i;
2443

    
2444
    helper_fstenv(ptr, data32);
2445

    
2446
    ptr += (14 << data32);
2447
    for(i = 0;i < 8; i++) {
2448
        tmp = ST(i);
2449
        helper_fstt(tmp, ptr);
2450
        ptr += 10;
2451
    }
2452

    
2453
    /* fninit */
2454
    env->fpus = 0;
2455
    env->fpstt = 0;
2456
    env->fpuc = 0x37f;
2457
    env->fptags[0] = 1;
2458
    env->fptags[1] = 1;
2459
    env->fptags[2] = 1;
2460
    env->fptags[3] = 1;
2461
    env->fptags[4] = 1;
2462
    env->fptags[5] = 1;
2463
    env->fptags[6] = 1;
2464
    env->fptags[7] = 1;
2465
}
2466

    
2467
void helper_frstor(uint8_t *ptr, int data32)
2468
{
2469
    CPU86_LDouble tmp;
2470
    int i;
2471

    
2472
    helper_fldenv(ptr, data32);
2473
    ptr += (14 << data32);
2474

    
2475
    for(i = 0;i < 8; i++) {
2476
        tmp = helper_fldt(ptr);
2477
        ST(i) = tmp;
2478
        ptr += 10;
2479
    }
2480
}
2481

    
2482
/* XXX: merge with helper_fstt ? */
2483

    
2484
#ifndef USE_X86LDOUBLE
2485

    
2486
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2487
{
2488
    CPU86_LDoubleU temp;
2489
    int e;
2490

    
2491
    temp.d = f;
2492
    /* mantissa */
2493
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
2494
    /* exponent + sign */
2495
    e = EXPD(temp) - EXPBIAS + 16383;
2496
    e |= SIGND(temp) >> 16;
2497
    *pexp = e;
2498
}
2499

    
2500
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2501
{
2502
    CPU86_LDoubleU temp;
2503
    int e;
2504
    uint64_t ll;
2505

    
2506
    /* XXX: handle overflow ? */
2507
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
2508
    e |= (upper >> 4) & 0x800; /* sign */
2509
    ll = (mant >> 11) & ((1LL << 52) - 1);
2510
#ifdef __arm__
2511
    temp.l.upper = (e << 20) | (ll >> 32);
2512
    temp.l.lower = ll;
2513
#else
2514
    temp.ll = ll | ((uint64_t)e << 52);
2515
#endif
2516
    return temp.d;
2517
}
2518

    
2519
#else
2520

    
2521
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2522
{
2523
    CPU86_LDoubleU temp;
2524

    
2525
    temp.d = f;
2526
    *pmant = temp.l.lower;
2527
    *pexp = temp.l.upper;
2528
}
2529

    
2530
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2531
{
2532
    CPU86_LDoubleU temp;
2533

    
2534
    temp.l.upper = upper;
2535
    temp.l.lower = mant;
2536
    return temp.d;
2537
}
2538
#endif
2539

    
2540
#if !defined(CONFIG_USER_ONLY) 
2541

    
2542
#define MMUSUFFIX _mmu
2543
#define GETPC() (__builtin_return_address(0))
2544

    
2545
#define SHIFT 0
2546
#include "softmmu_template.h"
2547

    
2548
#define SHIFT 1
2549
#include "softmmu_template.h"
2550

    
2551
#define SHIFT 2
2552
#include "softmmu_template.h"
2553

    
2554
#define SHIFT 3
2555
#include "softmmu_template.h"
2556

    
2557
#endif
2558

    
2559
/* try to fill the TLB and return an exception if error. If retaddr is
2560
   NULL, it means that the function was called in C code (i.e. not
2561
   from generated code or from helper.c) */
2562
/* XXX: fix it to restore all registers */
2563
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2564
{
2565
    TranslationBlock *tb;
2566
    int ret;
2567
    unsigned long pc;
2568
    CPUX86State *saved_env;
2569

    
2570
    /* XXX: hack to restore env in all cases, even if not called from
2571
       generated code */
2572
    saved_env = env;
2573
    env = cpu_single_env;
2574

    
2575
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2576
    if (ret) {
2577
        if (retaddr) {
2578
            /* now we have a real cpu fault */
2579
            pc = (unsigned long)retaddr;
2580
            tb = tb_find_pc(pc);
2581
            if (tb) {
2582
                /* the PC is inside the translated code. It means that we have
2583
                   a virtual CPU fault */
2584
                cpu_restore_state(tb, env, pc, NULL);
2585
            }
2586
        }
2587
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2588
    }
2589
    env = saved_env;
2590
}