Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 1f1af9fd

History | View | Annotate | Download (71.3 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    printf("raise_exception line=%d\n", __LINE__);\
28
    (raise_exception_err)(a, b);\
29
} while (0)
30
#endif
31

    
32
const uint8_t parity_table[256] = {
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
};
66

    
67
/* modulo 17 table */
68
const uint8_t rclw_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 9,10,11,12,13,14,15,
71
   16, 0, 1, 2, 3, 4, 5, 6,
72
    7, 8, 9,10,11,12,13,14,
73
};
74

    
75
/* modulo 9 table */
76
const uint8_t rclb_table[32] = {
77
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    8, 0, 1, 2, 3, 4, 5, 6,
79
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    6, 7, 8, 0, 1, 2, 3, 4,
81
};
82

    
83
const CPU86_LDouble f15rk[7] =
84
{
85
    0.00000000000000000000L,
86
    1.00000000000000000000L,
87
    3.14159265358979323851L,  /*pi*/
88
    0.30102999566398119523L,  /*lg2*/
89
    0.69314718055994530943L,  /*ln2*/
90
    1.44269504088896340739L,  /*l2e*/
91
    3.32192809488736234781L,  /*l2t*/
92
};
93
    
94
/* thread support */
95

    
96
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97

    
98
void cpu_lock(void)
99
{
100
    spin_lock(&global_cpu_lock);
101
}
102

    
103
void cpu_unlock(void)
104
{
105
    spin_unlock(&global_cpu_lock);
106
}
107

    
108
void cpu_loop_exit(void)
109
{
110
    /* NOTE: the register at this point must be saved by hand because
111
       longjmp restore them */
112
#ifdef reg_EAX
113
    env->regs[R_EAX] = EAX;
114
#endif
115
#ifdef reg_ECX
116
    env->regs[R_ECX] = ECX;
117
#endif
118
#ifdef reg_EDX
119
    env->regs[R_EDX] = EDX;
120
#endif
121
#ifdef reg_EBX
122
    env->regs[R_EBX] = EBX;
123
#endif
124
#ifdef reg_ESP
125
    env->regs[R_ESP] = ESP;
126
#endif
127
#ifdef reg_EBP
128
    env->regs[R_EBP] = EBP;
129
#endif
130
#ifdef reg_ESI
131
    env->regs[R_ESI] = ESI;
132
#endif
133
#ifdef reg_EDI
134
    env->regs[R_EDI] = EDI;
135
#endif
136
    longjmp(env->jmp_env, 1);
137
}
138

    
139
/* return non zero if error */
140
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
141
                               int selector)
142
{
143
    SegmentCache *dt;
144
    int index;
145
    uint8_t *ptr;
146

    
147
    if (selector & 0x4)
148
        dt = &env->ldt;
149
    else
150
        dt = &env->gdt;
151
    index = selector & ~7;
152
    if ((index + 7) > dt->limit)
153
        return -1;
154
    ptr = dt->base + index;
155
    *e1_ptr = ldl_kernel(ptr);
156
    *e2_ptr = ldl_kernel(ptr + 4);
157
    return 0;
158
}
159
                                     
160
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161
{
162
    unsigned int limit;
163
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
164
    if (e2 & DESC_G_MASK)
165
        limit = (limit << 12) | 0xfff;
166
    return limit;
167
}
168

    
169
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
170
{
171
    return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
172
}
173

    
174
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
175
{
176
    sc->base = get_seg_base(e1, e2);
177
    sc->limit = get_seg_limit(e1, e2);
178
    sc->flags = e2;
179
}
180

    
181
/* init the segment cache in vm86 mode. */
182
static inline void load_seg_vm(int seg, int selector)
183
{
184
    selector &= 0xffff;
185
    cpu_x86_load_seg_cache(env, seg, selector, 
186
                           (uint8_t *)(selector << 4), 0xffff, 0);
187
}
188

    
189
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
190
                                       uint32_t *esp_ptr, int dpl)
191
{
192
    int type, index, shift;
193
    
194
#if 0
195
    {
196
        int i;
197
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
198
        for(i=0;i<env->tr.limit;i++) {
199
            printf("%02x ", env->tr.base[i]);
200
            if ((i & 7) == 7) printf("\n");
201
        }
202
        printf("\n");
203
    }
204
#endif
205

    
206
    if (!(env->tr.flags & DESC_P_MASK))
207
        cpu_abort(env, "invalid tss");
208
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
209
    if ((type & 7) != 1)
210
        cpu_abort(env, "invalid tss type");
211
    shift = type >> 3;
212
    index = (dpl * 4 + 2) << shift;
213
    if (index + (4 << shift) - 1 > env->tr.limit)
214
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
215
    if (shift == 0) {
216
        *esp_ptr = lduw_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
218
    } else {
219
        *esp_ptr = ldl_kernel(env->tr.base + index);
220
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221
    }
222
}
223

    
224
/* XXX: merge with load_seg() */
225
static void tss_load_seg(int seg_reg, int selector)
226
{
227
    uint32_t e1, e2;
228
    int rpl, dpl, cpl;
229

    
230
    if ((selector & 0xfffc) != 0) {
231
        if (load_segment(&e1, &e2, selector) != 0)
232
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
        if (!(e2 & DESC_S_MASK))
234
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
        rpl = selector & 3;
236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
237
        cpl = env->hflags & HF_CPL_MASK;
238
        if (seg_reg == R_CS) {
239
            if (!(e2 & DESC_CS_MASK))
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if (dpl != rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
            if ((e2 & DESC_C_MASK) && dpl > rpl)
244
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245
                
246
        } else if (seg_reg == R_SS) {
247
            /* SS must be writable data */
248
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
249
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250
            if (dpl != cpl || dpl != rpl)
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
        } else {
253
            /* not readable code */
254
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
255
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            /* if data or non conforming code, checks the rights */
257
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
258
                if (dpl < cpl || dpl < rpl)
259
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260
            }
261
        }
262
        if (!(e2 & DESC_P_MASK))
263
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
264
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
265
                       get_seg_base(e1, e2),
266
                       get_seg_limit(e1, e2),
267
                       e2);
268
    } else {
269
        if (seg_reg == R_SS || seg_reg == R_CS) 
270
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271
    }
272
}
273

    
274
#define SWITCH_TSS_JMP  0
275
#define SWITCH_TSS_IRET 1
276
#define SWITCH_TSS_CALL 2
277

    
278
/* XXX: restore CPU state in registers (PowerPC case) */
279
static void switch_tss(int tss_selector, 
280
                       uint32_t e1, uint32_t e2, int source,
281
                       uint32_t next_eip)
282
{
283
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
284
    uint8_t *tss_base;
285
    uint32_t new_regs[8], new_segs[6];
286
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
287
    uint32_t old_eflags, eflags_mask;
288
    SegmentCache *dt;
289
    int index;
290
    uint8_t *ptr;
291

    
292
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
293
#ifdef DEBUG_PCALL
294
    if (loglevel & CPU_LOG_PCALL)
295
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
296
#endif
297

    
298
    /* if task gate, we read the TSS segment and we load it */
299
    if (type == 5) {
300
        if (!(e2 & DESC_P_MASK))
301
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
        tss_selector = e1 >> 16;
303
        if (tss_selector & 4)
304
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
305
        if (load_segment(&e1, &e2, tss_selector) != 0)
306
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307
        if (e2 & DESC_S_MASK)
308
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
309
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
310
        if ((type & 7) != 1)
311
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312
    }
313

    
314
    if (!(e2 & DESC_P_MASK))
315
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
316

    
317
    if (type & 8)
318
        tss_limit_max = 103;
319
    else
320
        tss_limit_max = 43;
321
    tss_limit = get_seg_limit(e1, e2);
322
    tss_base = get_seg_base(e1, e2);
323
    if ((tss_selector & 4) != 0 || 
324
        tss_limit < tss_limit_max)
325
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
326
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
327
    if (old_type & 8)
328
        old_tss_limit_max = 103;
329
    else
330
        old_tss_limit_max = 43;
331

    
332
    /* read all the registers from the new TSS */
333
    if (type & 8) {
334
        /* 32 bit */
335
        new_cr3 = ldl_kernel(tss_base + 0x1c);
336
        new_eip = ldl_kernel(tss_base + 0x20);
337
        new_eflags = ldl_kernel(tss_base + 0x24);
338
        for(i = 0; i < 8; i++)
339
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
340
        for(i = 0; i < 6; i++)
341
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
342
        new_ldt = lduw_kernel(tss_base + 0x60);
343
        new_trap = ldl_kernel(tss_base + 0x64);
344
    } else {
345
        /* 16 bit */
346
        new_cr3 = 0;
347
        new_eip = lduw_kernel(tss_base + 0x0e);
348
        new_eflags = lduw_kernel(tss_base + 0x10);
349
        for(i = 0; i < 8; i++)
350
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
351
        for(i = 0; i < 4; i++)
352
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
353
        new_ldt = lduw_kernel(tss_base + 0x2a);
354
        new_segs[R_FS] = 0;
355
        new_segs[R_GS] = 0;
356
        new_trap = 0;
357
    }
358
    
359
    /* NOTE: we must avoid memory exceptions during the task switch,
360
       so we make dummy accesses before */
361
    /* XXX: it can still fail in some cases, so a bigger hack is
362
       necessary to valid the TLB after having done the accesses */
363

    
364
    v1 = ldub_kernel(env->tr.base);
365
    v2 = ldub(env->tr.base + old_tss_limit_max);
366
    stb_kernel(env->tr.base, v1);
367
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
368
    
369
    /* clear busy bit (it is restartable) */
370
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
371
        uint8_t *ptr;
372
        uint32_t e2;
373
        ptr = env->gdt.base + (env->tr.selector & ~7);
374
        e2 = ldl_kernel(ptr + 4);
375
        e2 &= ~DESC_TSS_BUSY_MASK;
376
        stl_kernel(ptr + 4, e2);
377
    }
378
    old_eflags = compute_eflags();
379
    if (source == SWITCH_TSS_IRET)
380
        old_eflags &= ~NT_MASK;
381
    
382
    /* save the current state in the old TSS */
383
    if (type & 8) {
384
        /* 32 bit */
385
        stl_kernel(env->tr.base + 0x20, next_eip);
386
        stl_kernel(env->tr.base + 0x24, old_eflags);
387
        for(i = 0; i < 8; i++)
388
            stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        for(i = 0; i < 8; i++)
396
            stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
397
        for(i = 0; i < 4; i++)
398
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399
    }
400
    
401
    /* now if an exception occurs, it will occurs in the next task
402
       context */
403

    
404
    if (source == SWITCH_TSS_CALL) {
405
        stw_kernel(tss_base, env->tr.selector);
406
        new_eflags |= NT_MASK;
407
    }
408

    
409
    /* set busy bit */
410
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
411
        uint8_t *ptr;
412
        uint32_t e2;
413
        ptr = env->gdt.base + (tss_selector & ~7);
414
        e2 = ldl_kernel(ptr + 4);
415
        e2 |= DESC_TSS_BUSY_MASK;
416
        stl_kernel(ptr + 4, e2);
417
    }
418

    
419
    /* set the new CPU state */
420
    /* from this point, any exception which occurs can give problems */
421
    env->cr[0] |= CR0_TS_MASK;
422
    env->hflags |= HF_TS_MASK;
423
    env->tr.selector = tss_selector;
424
    env->tr.base = tss_base;
425
    env->tr.limit = tss_limit;
426
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
427
    
428
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
429
        cpu_x86_update_cr3(env, new_cr3);
430
    }
431
    
432
    /* load all registers without an exception, then reload them with
433
       possible exception */
434
    env->eip = new_eip;
435
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
436
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
437
    if (!(type & 8))
438
        eflags_mask &= 0xffff;
439
    load_eflags(new_eflags, eflags_mask);
440
    for(i = 0; i < 8; i++)
441
        env->regs[i] = new_regs[i];
442
    if (new_eflags & VM_MASK) {
443
        for(i = 0; i < 6; i++) 
444
            load_seg_vm(i, new_segs[i]);
445
        /* in vm86, CPL is always 3 */
446
        cpu_x86_set_cpl(env, 3);
447
    } else {
448
        /* CPL is set the RPL of CS */
449
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
450
        /* first just selectors as the rest may trigger exceptions */
451
        for(i = 0; i < 6; i++)
452
            cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
453
    }
454
    
455
    env->ldt.selector = new_ldt & ~4;
456
    env->ldt.base = NULL;
457
    env->ldt.limit = 0;
458
    env->ldt.flags = 0;
459

    
460
    /* load the LDT */
461
    if (new_ldt & 4)
462
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463

    
464
    if ((new_ldt & 0xfffc) != 0) {
465
        dt = &env->gdt;
466
        index = new_ldt & ~7;
467
        if ((index + 7) > dt->limit)
468
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469
        ptr = dt->base + index;
470
        e1 = ldl_kernel(ptr);
471
        e2 = ldl_kernel(ptr + 4);
472
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        if (!(e2 & DESC_P_MASK))
475
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
477
    }
478
    
479
    /* load the segments */
480
    if (!(new_eflags & VM_MASK)) {
481
        tss_load_seg(R_CS, new_segs[R_CS]);
482
        tss_load_seg(R_SS, new_segs[R_SS]);
483
        tss_load_seg(R_ES, new_segs[R_ES]);
484
        tss_load_seg(R_DS, new_segs[R_DS]);
485
        tss_load_seg(R_FS, new_segs[R_FS]);
486
        tss_load_seg(R_GS, new_segs[R_GS]);
487
    }
488
    
489
    /* check that EIP is in the CS segment limits */
490
    if (new_eip > env->segs[R_CS].limit) {
491
        /* XXX: different exception if CALL ? */
492
        raise_exception_err(EXCP0D_GPF, 0);
493
    }
494
}
495

    
496
/* check if Port I/O is allowed in TSS */
497
static inline void check_io(int addr, int size)
498
{
499
    int io_offset, val, mask;
500
    
501
    /* TSS must be a valid 32 bit one */
502
    if (!(env->tr.flags & DESC_P_MASK) ||
503
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
504
        env->tr.limit < 103)
505
        goto fail;
506
    io_offset = lduw_kernel(env->tr.base + 0x66);
507
    io_offset += (addr >> 3);
508
    /* Note: the check needs two bytes */
509
    if ((io_offset + 1) > env->tr.limit)
510
        goto fail;
511
    val = lduw_kernel(env->tr.base + io_offset);
512
    val >>= (addr & 7);
513
    mask = (1 << size) - 1;
514
    /* all bits must be zero to allow the I/O */
515
    if ((val & mask) != 0) {
516
    fail:
517
        raise_exception_err(EXCP0D_GPF, 0);
518
    }
519
}
520

    
521
void check_iob_T0(void)
522
{
523
    check_io(T0, 1);
524
}
525

    
526
void check_iow_T0(void)
527
{
528
    check_io(T0, 2);
529
}
530

    
531
void check_iol_T0(void)
532
{
533
    check_io(T0, 4);
534
}
535

    
536
void check_iob_DX(void)
537
{
538
    check_io(EDX & 0xffff, 1);
539
}
540

    
541
void check_iow_DX(void)
542
{
543
    check_io(EDX & 0xffff, 2);
544
}
545

    
546
void check_iol_DX(void)
547
{
548
    check_io(EDX & 0xffff, 4);
549
}
550

    
551
static inline unsigned int get_sp_mask(unsigned int e2)
552
{
553
    if (e2 & DESC_B_MASK)
554
        return 0xffffffff;
555
    else
556
        return 0xffff;
557
}
558

    
559
/* XXX: add a is_user flag to have proper security support */
560
#define PUSHW(ssp, sp, sp_mask, val)\
561
{\
562
    sp -= 2;\
563
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
564
}
565

    
566
#define PUSHL(ssp, sp, sp_mask, val)\
567
{\
568
    sp -= 4;\
569
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
570
}
571

    
572
#define POPW(ssp, sp, sp_mask, val)\
573
{\
574
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
575
    sp += 2;\
576
}
577

    
578
#define POPL(ssp, sp, sp_mask, val)\
579
{\
580
    val = ldl_kernel((ssp) + (sp & (sp_mask)));\
581
    sp += 4;\
582
}
583

    
584
/* protected mode interrupt */
585
static void do_interrupt_protected(int intno, int is_int, int error_code,
586
                                   unsigned int next_eip, int is_hw)
587
{
588
    SegmentCache *dt;
589
    uint8_t *ptr, *ssp;
590
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
591
    int has_error_code, new_stack, shift;
592
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
593
    uint32_t old_eip;
594

    
595
    has_error_code = 0;
596
    if (!is_int && !is_hw) {
597
        switch(intno) {
598
        case 8:
599
        case 10:
600
        case 11:
601
        case 12:
602
        case 13:
603
        case 14:
604
        case 17:
605
            has_error_code = 1;
606
            break;
607
        }
608
    }
609
    if (is_int)
610
        old_eip = next_eip;
611
    else
612
        old_eip = env->eip;
613

    
614
    dt = &env->idt;
615
    if (intno * 8 + 7 > dt->limit)
616
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
617
    ptr = dt->base + intno * 8;
618
    e1 = ldl_kernel(ptr);
619
    e2 = ldl_kernel(ptr + 4);
620
    /* check gate type */
621
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
622
    switch(type) {
623
    case 5: /* task gate */
624
        /* must do that check here to return the correct error code */
625
        if (!(e2 & DESC_P_MASK))
626
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
627
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
628
        if (has_error_code) {
629
            int mask;
630
            /* push the error code */
631
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
632
            if (env->segs[R_SS].flags & DESC_B_MASK)
633
                mask = 0xffffffff;
634
            else
635
                mask = 0xffff;
636
            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
637
            ssp = env->segs[R_SS].base + esp;
638
            if (shift)
639
                stl_kernel(ssp, error_code);
640
            else
641
                stw_kernel(ssp, error_code);
642
            env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
643
        }
644
        return;
645
    case 6: /* 286 interrupt gate */
646
    case 7: /* 286 trap gate */
647
    case 14: /* 386 interrupt gate */
648
    case 15: /* 386 trap gate */
649
        break;
650
    default:
651
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
652
        break;
653
    }
654
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
655
    cpl = env->hflags & HF_CPL_MASK;
656
    /* check privledge if software int */
657
    if (is_int && dpl < cpl)
658
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
659
    /* check valid bit */
660
    if (!(e2 & DESC_P_MASK))
661
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
662
    selector = e1 >> 16;
663
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
664
    if ((selector & 0xfffc) == 0)
665
        raise_exception_err(EXCP0D_GPF, 0);
666

    
667
    if (load_segment(&e1, &e2, selector) != 0)
668
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
670
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    if (dpl > cpl)
673
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674
    if (!(e2 & DESC_P_MASK))
675
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
676
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
677
        /* to inner priviledge */
678
        get_ss_esp_from_tss(&ss, &esp, dpl);
679
        if ((ss & 0xfffc) == 0)
680
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681
        if ((ss & 3) != dpl)
682
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
683
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
684
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
685
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
686
        if (ss_dpl != dpl)
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (!(ss_e2 & DESC_S_MASK) ||
689
            (ss_e2 & DESC_CS_MASK) ||
690
            !(ss_e2 & DESC_W_MASK))
691
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
692
        if (!(ss_e2 & DESC_P_MASK))
693
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
694
        new_stack = 1;
695
        sp_mask = get_sp_mask(ss_e2);
696
        ssp = get_seg_base(ss_e1, ss_e2);
697
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
698
        /* to same priviledge */
699
        if (env->eflags & VM_MASK)
700
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701
        new_stack = 0;
702
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
703
        ssp = env->segs[R_SS].base;
704
        esp = ESP;
705
        dpl = cpl;
706
    } else {
707
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
708
        new_stack = 0; /* avoid warning */
709
        sp_mask = 0; /* avoid warning */
710
        ssp = NULL; /* avoid warning */
711
        esp = 0; /* avoid warning */
712
    }
713

    
714
    shift = type >> 3;
715

    
716
#if 0
717
    /* XXX: check that enough room is available */
718
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
719
    if (env->eflags & VM_MASK)
720
        push_size += 8;
721
    push_size <<= shift;
722
#endif
723
    if (shift == 1) {
724
        if (new_stack) {
725
            if (env->eflags & VM_MASK) {
726
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
727
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
728
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
729
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
730
            }
731
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
732
            PUSHL(ssp, esp, sp_mask, ESP);
733
        }
734
        PUSHL(ssp, esp, sp_mask, compute_eflags());
735
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
736
        PUSHL(ssp, esp, sp_mask, old_eip);
737
        if (has_error_code) {
738
            PUSHL(ssp, esp, sp_mask, error_code);
739
        }
740
    } else {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHW(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHW(ssp, esp, sp_mask, compute_eflags());
752
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHW(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHW(ssp, esp, sp_mask, error_code);
756
        }
757
    }
758
    
759
    if (new_stack) {
760
        if (env->eflags & VM_MASK) {
761
            /* XXX: explain me why W2K hangs if the whole segment cache is
762
               reset ? */
763
#if 1
764
            env->segs[R_ES].selector = 0;
765
            env->segs[R_ES].flags = 0;
766
            env->segs[R_DS].selector = 0;
767
            env->segs[R_DS].flags = 0;
768
            env->segs[R_FS].selector = 0;
769
            env->segs[R_FS].flags = 0;
770
            env->segs[R_GS].selector = 0;
771
            env->segs[R_GS].flags = 0;
772
#else
773
            cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
774
            cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
775
            cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
776
            cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
777
#endif
778
        }
779
        ss = (ss & ~3) | dpl;
780
        cpu_x86_load_seg_cache(env, R_SS, ss, 
781
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
782
    }
783
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
784

    
785
    selector = (selector & ~3) | dpl;
786
    cpu_x86_load_seg_cache(env, R_CS, selector, 
787
                   get_seg_base(e1, e2),
788
                   get_seg_limit(e1, e2),
789
                   e2);
790
    cpu_x86_set_cpl(env, dpl);
791
    env->eip = offset;
792

    
793
    /* interrupt gate clear IF mask */
794
    if ((type & 1) == 0) {
795
        env->eflags &= ~IF_MASK;
796
    }
797
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
798
}
799

    
800
/* real mode interrupt */
801
static void do_interrupt_real(int intno, int is_int, int error_code,
802
                              unsigned int next_eip)
803
{
804
    SegmentCache *dt;
805
    uint8_t *ptr, *ssp;
806
    int selector;
807
    uint32_t offset, esp;
808
    uint32_t old_cs, old_eip;
809

    
810
    /* real mode (simpler !) */
811
    dt = &env->idt;
812
    if (intno * 4 + 3 > dt->limit)
813
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
814
    ptr = dt->base + intno * 4;
815
    offset = lduw_kernel(ptr);
816
    selector = lduw_kernel(ptr + 2);
817
    esp = ESP;
818
    ssp = env->segs[R_SS].base;
819
    if (is_int)
820
        old_eip = next_eip;
821
    else
822
        old_eip = env->eip;
823
    old_cs = env->segs[R_CS].selector;
824
    /* XXX: use SS segment size ? */
825
    PUSHW(ssp, esp, 0xffff, compute_eflags());
826
    PUSHW(ssp, esp, 0xffff, old_cs);
827
    PUSHW(ssp, esp, 0xffff, old_eip);
828
    
829
    /* update processor state */
830
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
831
    env->eip = offset;
832
    env->segs[R_CS].selector = selector;
833
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
834
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
835
}
836

    
837
/* fake user mode interrupt */
838
void do_interrupt_user(int intno, int is_int, int error_code, 
839
                       unsigned int next_eip)
840
{
841
    SegmentCache *dt;
842
    uint8_t *ptr;
843
    int dpl, cpl;
844
    uint32_t e2;
845

    
846
    dt = &env->idt;
847
    ptr = dt->base + (intno * 8);
848
    e2 = ldl_kernel(ptr + 4);
849
    
850
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
851
    cpl = env->hflags & HF_CPL_MASK;
852
    /* check privledge if software int */
853
    if (is_int && dpl < cpl)
854
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855

    
856
    /* Since we emulate only user space, we cannot do more than
857
       exiting the emulation with the suitable exception and error
858
       code */
859
    if (is_int)
860
        EIP = next_eip;
861
}
862

    
863
/*
864
 * Begin execution of an interruption. is_int is TRUE if coming from
865
 * the int instruction. next_eip is the EIP value AFTER the interrupt
866
 * instruction. It is only relevant if is_int is TRUE.  
867
 */
868
void do_interrupt(int intno, int is_int, int error_code, 
869
                  unsigned int next_eip, int is_hw)
870
{
871
#ifdef DEBUG_PCALL
872
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
873
        if ((env->cr[0] & CR0_PE_MASK)) {
874
            static int count;
875
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x SP=%04x:%08x",
876
                    count, intno, error_code, is_int,
877
                    env->hflags & HF_CPL_MASK,
878
                    env->segs[R_CS].selector, EIP,
879
                    env->segs[R_SS].selector, ESP);
880
            if (intno == 0x0e) {
881
                fprintf(logfile, " CR2=%08x", env->cr[2]);
882
            } else {
883
                fprintf(logfile, " EAX=%08x", env->regs[R_EAX]);
884
            }
885
            fprintf(logfile, "\n");
886
#if 0
887
            cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
888
            {
889
                int i;
890
                uint8_t *ptr;
891
                fprintf(logfile, "       code=");
892
                ptr = env->segs[R_CS].base + env->eip;
893
                for(i = 0; i < 16; i++) {
894
                    fprintf(logfile, " %02x", ldub(ptr + i));
895
                }
896
                fprintf(logfile, "\n");
897
            }
898
#endif
899
            count++;
900
        }
901
    }
902
#endif
903
    if (env->cr[0] & CR0_PE_MASK) {
904
        do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
905
    } else {
906
        do_interrupt_real(intno, is_int, error_code, next_eip);
907
    }
908
}
909

    
910
/*
911
 * Signal an interruption. It is executed in the main CPU loop.
912
 * is_int is TRUE if coming from the int instruction. next_eip is the
913
 * EIP value AFTER the interrupt instruction. It is only relevant if
914
 * is_int is TRUE.  
915
 */
916
void raise_interrupt(int intno, int is_int, int error_code, 
917
                     unsigned int next_eip)
918
{
919
    env->exception_index = intno;
920
    env->error_code = error_code;
921
    env->exception_is_int = is_int;
922
    env->exception_next_eip = next_eip;
923
    cpu_loop_exit();
924
}
925

    
926
/* shortcuts to generate exceptions */
927

    
928
void (raise_exception_err)(int exception_index, int error_code)
929
{
930
    raise_interrupt(exception_index, 0, error_code, 0);
931
}
932

    
933
void raise_exception(int exception_index)
934
{
935
    raise_interrupt(exception_index, 0, 0, 0);
936
}
937

    
938
#ifdef BUGGY_GCC_DIV64
939
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
940
   call it from another function */
941
uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
942
{
943
    *q_ptr = num / den;
944
    return num % den;
945
}
946

    
947
int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
948
{
949
    *q_ptr = num / den;
950
    return num % den;
951
}
952
#endif
953

    
954
void helper_divl_EAX_T0(uint32_t eip)
955
{
956
    unsigned int den, q, r;
957
    uint64_t num;
958
    
959
    num = EAX | ((uint64_t)EDX << 32);
960
    den = T0;
961
    if (den == 0) {
962
        EIP = eip;
963
        raise_exception(EXCP00_DIVZ);
964
    }
965
#ifdef BUGGY_GCC_DIV64
966
    r = div64(&q, num, den);
967
#else
968
    q = (num / den);
969
    r = (num % den);
970
#endif
971
    EAX = q;
972
    EDX = r;
973
}
974

    
975
void helper_idivl_EAX_T0(uint32_t eip)
976
{
977
    int den, q, r;
978
    int64_t num;
979
    
980
    num = EAX | ((uint64_t)EDX << 32);
981
    den = T0;
982
    if (den == 0) {
983
        EIP = eip;
984
        raise_exception(EXCP00_DIVZ);
985
    }
986
#ifdef BUGGY_GCC_DIV64
987
    r = idiv64(&q, num, den);
988
#else
989
    q = (num / den);
990
    r = (num % den);
991
#endif
992
    EAX = q;
993
    EDX = r;
994
}
995

    
996
void helper_cmpxchg8b(void)
997
{
998
    uint64_t d;
999
    int eflags;
1000

    
1001
    eflags = cc_table[CC_OP].compute_all();
1002
    d = ldq((uint8_t *)A0);
1003
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1004
        stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1005
        eflags |= CC_Z;
1006
    } else {
1007
        EDX = d >> 32;
1008
        EAX = d;
1009
        eflags &= ~CC_Z;
1010
    }
1011
    CC_SRC = eflags;
1012
}
1013

    
1014
#define CPUID_FP87 (1 << 0)
1015
#define CPUID_VME  (1 << 1)
1016
#define CPUID_DE   (1 << 2)
1017
#define CPUID_PSE  (1 << 3)
1018
#define CPUID_TSC  (1 << 4)
1019
#define CPUID_MSR  (1 << 5)
1020
#define CPUID_PAE  (1 << 6)
1021
#define CPUID_MCE  (1 << 7)
1022
#define CPUID_CX8  (1 << 8)
1023
#define CPUID_APIC (1 << 9)
1024
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
1025
#define CPUID_MTRR (1 << 12)
1026
#define CPUID_PGE  (1 << 13)
1027
#define CPUID_MCA  (1 << 14)
1028
#define CPUID_CMOV (1 << 15)
1029
/* ... */
1030
#define CPUID_MMX  (1 << 23)
1031
#define CPUID_FXSR (1 << 24)
1032
#define CPUID_SSE  (1 << 25)
1033
#define CPUID_SSE2 (1 << 26)
1034

    
1035
void helper_cpuid(void)
1036
{
1037
    switch(EAX) {
1038
    case 0:
1039
        EAX = 2; /* max EAX index supported */
1040
        EBX = 0x756e6547;
1041
        ECX = 0x6c65746e;
1042
        EDX = 0x49656e69;
1043
        break;
1044
    case 1:
1045
        {
1046
            int family, model, stepping;
1047
            /* EAX = 1 info */
1048
#if 0
1049
            /* pentium 75-200 */
1050
            family = 5;
1051
            model = 2;
1052
            stepping = 11;
1053
#else
1054
            /* pentium pro */
1055
            family = 6;
1056
            model = 1;
1057
            stepping = 3;
1058
#endif
1059
            EAX = (family << 8) | (model << 4) | stepping;
1060
            EBX = 0;
1061
            ECX = 0;
1062
            EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1063
                CPUID_TSC | CPUID_MSR | CPUID_MCE |
1064
                CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1065
        }
1066
        break;
1067
    default:
1068
        /* cache info: needed for Pentium Pro compatibility */
1069
        EAX = 0x410601;
1070
        EBX = 0;
1071
        ECX = 0;
1072
        EDX = 0;
1073
        break;
1074
    }
1075
}
1076

    
1077
void helper_lldt_T0(void)
1078
{
1079
    int selector;
1080
    SegmentCache *dt;
1081
    uint32_t e1, e2;
1082
    int index;
1083
    uint8_t *ptr;
1084
    
1085
    selector = T0 & 0xffff;
1086
    if ((selector & 0xfffc) == 0) {
1087
        /* XXX: NULL selector case: invalid LDT */
1088
        env->ldt.base = NULL;
1089
        env->ldt.limit = 0;
1090
    } else {
1091
        if (selector & 0x4)
1092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1093
        dt = &env->gdt;
1094
        index = selector & ~7;
1095
        if ((index + 7) > dt->limit)
1096
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1097
        ptr = dt->base + index;
1098
        e1 = ldl_kernel(ptr);
1099
        e2 = ldl_kernel(ptr + 4);
1100
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1101
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1102
        if (!(e2 & DESC_P_MASK))
1103
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1104
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
1105
    }
1106
    env->ldt.selector = selector;
1107
}
1108

    
1109
void helper_ltr_T0(void)
1110
{
1111
    int selector;
1112
    SegmentCache *dt;
1113
    uint32_t e1, e2;
1114
    int index, type;
1115
    uint8_t *ptr;
1116
    
1117
    selector = T0 & 0xffff;
1118
    if ((selector & 0xfffc) == 0) {
1119
        /* NULL selector case: invalid LDT */
1120
        env->tr.base = NULL;
1121
        env->tr.limit = 0;
1122
        env->tr.flags = 0;
1123
    } else {
1124
        if (selector & 0x4)
1125
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1126
        dt = &env->gdt;
1127
        index = selector & ~7;
1128
        if ((index + 7) > dt->limit)
1129
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1130
        ptr = dt->base + index;
1131
        e1 = ldl_kernel(ptr);
1132
        e2 = ldl_kernel(ptr + 4);
1133
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1134
        if ((e2 & DESC_S_MASK) || 
1135
            (type != 1 && type != 9))
1136
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1137
        if (!(e2 & DESC_P_MASK))
1138
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1139
        load_seg_cache_raw_dt(&env->tr, e1, e2);
1140
        e2 |= DESC_TSS_BUSY_MASK;
1141
        stl_kernel(ptr + 4, e2);
1142
    }
1143
    env->tr.selector = selector;
1144
}
1145

    
1146
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1147
void load_seg(int seg_reg, int selector)
1148
{
1149
    uint32_t e1, e2;
1150
    int cpl, dpl, rpl;
1151
    SegmentCache *dt;
1152
    int index;
1153
    uint8_t *ptr;
1154

    
1155
    selector &= 0xffff;
1156
    if ((selector & 0xfffc) == 0) {
1157
        /* null selector case */
1158
        if (seg_reg == R_SS)
1159
            raise_exception_err(EXCP0D_GPF, 0);
1160
        cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1161
    } else {
1162
        
1163
        if (selector & 0x4)
1164
            dt = &env->ldt;
1165
        else
1166
            dt = &env->gdt;
1167
        index = selector & ~7;
1168
        if ((index + 7) > dt->limit)
1169
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1170
        ptr = dt->base + index;
1171
        e1 = ldl_kernel(ptr);
1172
        e2 = ldl_kernel(ptr + 4);
1173

    
1174
        if (!(e2 & DESC_S_MASK))
1175
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1176
        rpl = selector & 3;
1177
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
        cpl = env->hflags & HF_CPL_MASK;
1179
        if (seg_reg == R_SS) {
1180
            /* must be writable segment */
1181
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1182
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1183
            if (rpl != cpl || dpl != cpl)
1184
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1185
        } else {
1186
            /* must be readable segment */
1187
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1188
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1189
            
1190
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1191
                /* if not conforming code, test rights */
1192
                if (dpl < cpl || dpl < rpl)
1193
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1194
            }
1195
        }
1196

    
1197
        if (!(e2 & DESC_P_MASK)) {
1198
            if (seg_reg == R_SS)
1199
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1200
            else
1201
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1202
        }
1203

    
1204
        /* set the access bit if not already set */
1205
        if (!(e2 & DESC_A_MASK)) {
1206
            e2 |= DESC_A_MASK;
1207
            stl_kernel(ptr + 4, e2);
1208
        }
1209

    
1210
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1211
                       get_seg_base(e1, e2),
1212
                       get_seg_limit(e1, e2),
1213
                       e2);
1214
#if 0
1215
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1216
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1217
#endif
1218
    }
1219
}
1220

    
1221
/* protected mode jump */
1222
void helper_ljmp_protected_T0_T1(int next_eip)
1223
{
1224
    int new_cs, new_eip, gate_cs, type;
1225
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1226

    
1227
    new_cs = T0;
1228
    new_eip = T1;
1229
    if ((new_cs & 0xfffc) == 0)
1230
        raise_exception_err(EXCP0D_GPF, 0);
1231
    if (load_segment(&e1, &e2, new_cs) != 0)
1232
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1233
    cpl = env->hflags & HF_CPL_MASK;
1234
    if (e2 & DESC_S_MASK) {
1235
        if (!(e2 & DESC_CS_MASK))
1236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1237
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1238
        if (e2 & DESC_C_MASK) {
1239
            /* conforming code segment */
1240
            if (dpl > cpl)
1241
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1242
        } else {
1243
            /* non conforming code segment */
1244
            rpl = new_cs & 3;
1245
            if (rpl > cpl)
1246
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1247
            if (dpl != cpl)
1248
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1249
        }
1250
        if (!(e2 & DESC_P_MASK))
1251
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1252
        limit = get_seg_limit(e1, e2);
1253
        if (new_eip > limit)
1254
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1255
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1256
                       get_seg_base(e1, e2), limit, e2);
1257
        EIP = new_eip;
1258
    } else {
1259
        /* jump to call or task gate */
1260
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1261
        rpl = new_cs & 3;
1262
        cpl = env->hflags & HF_CPL_MASK;
1263
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1264
        switch(type) {
1265
        case 1: /* 286 TSS */
1266
        case 9: /* 386 TSS */
1267
        case 5: /* task gate */
1268
            if (dpl < cpl || dpl < rpl)
1269
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1270
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1271
            break;
1272
        case 4: /* 286 call gate */
1273
        case 12: /* 386 call gate */
1274
            if ((dpl < cpl) || (dpl < rpl))
1275
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1276
            if (!(e2 & DESC_P_MASK))
1277
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1278
            gate_cs = e1 >> 16;
1279
            if (load_segment(&e1, &e2, gate_cs) != 0)
1280
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1281
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1282
            /* must be code segment */
1283
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1284
                 (DESC_S_MASK | DESC_CS_MASK)))
1285
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1286
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1287
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1288
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1289
            if (!(e2 & DESC_P_MASK))
1290
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1291
            new_eip = (e1 & 0xffff);
1292
            if (type == 12)
1293
                new_eip |= (e2 & 0xffff0000);
1294
            limit = get_seg_limit(e1, e2);
1295
            if (new_eip > limit)
1296
                raise_exception_err(EXCP0D_GPF, 0);
1297
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1298
                                   get_seg_base(e1, e2), limit, e2);
1299
            EIP = new_eip;
1300
            break;
1301
        default:
1302
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1303
            break;
1304
        }
1305
    }
1306
}
1307

    
1308
/* real mode call */
1309
void helper_lcall_real_T0_T1(int shift, int next_eip)
1310
{
1311
    int new_cs, new_eip;
1312
    uint32_t esp, esp_mask;
1313
    uint8_t *ssp;
1314

    
1315
    new_cs = T0;
1316
    new_eip = T1;
1317
    esp = ESP;
1318
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1319
    ssp = env->segs[R_SS].base;
1320
    if (shift) {
1321
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1322
        PUSHL(ssp, esp, esp_mask, next_eip);
1323
    } else {
1324
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1325
        PUSHW(ssp, esp, esp_mask, next_eip);
1326
    }
1327

    
1328
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1329
    env->eip = new_eip;
1330
    env->segs[R_CS].selector = new_cs;
1331
    env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1332
}
1333

    
1334
/* protected mode call */
1335
void helper_lcall_protected_T0_T1(int shift, int next_eip)
1336
{
1337
    int new_cs, new_eip, new_stack, i;
1338
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1339
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1340
    uint32_t val, limit, old_sp_mask;
1341
    uint8_t *ssp, *old_ssp;
1342
    
1343
    new_cs = T0;
1344
    new_eip = T1;
1345
#ifdef DEBUG_PCALL
1346
    if (loglevel & CPU_LOG_PCALL) {
1347
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
1348
                new_cs, new_eip, shift);
1349
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1350
    }
1351
#endif
1352
    if ((new_cs & 0xfffc) == 0)
1353
        raise_exception_err(EXCP0D_GPF, 0);
1354
    if (load_segment(&e1, &e2, new_cs) != 0)
1355
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1356
    cpl = env->hflags & HF_CPL_MASK;
1357
#ifdef DEBUG_PCALL
1358
    if (loglevel & CPU_LOG_PCALL) {
1359
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1360
    }
1361
#endif
1362
    if (e2 & DESC_S_MASK) {
1363
        if (!(e2 & DESC_CS_MASK))
1364
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1365
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1366
        if (e2 & DESC_C_MASK) {
1367
            /* conforming code segment */
1368
            if (dpl > cpl)
1369
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1370
        } else {
1371
            /* non conforming code segment */
1372
            rpl = new_cs & 3;
1373
            if (rpl > cpl)
1374
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1375
            if (dpl != cpl)
1376
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1377
        }
1378
        if (!(e2 & DESC_P_MASK))
1379
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1380

    
1381
        sp = ESP;
1382
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1383
        ssp = env->segs[R_SS].base;
1384
        if (shift) {
1385
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1386
            PUSHL(ssp, sp, sp_mask, next_eip);
1387
        } else {
1388
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1389
            PUSHW(ssp, sp, sp_mask, next_eip);
1390
        }
1391
        
1392
        limit = get_seg_limit(e1, e2);
1393
        if (new_eip > limit)
1394
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1395
        /* from this point, not restartable */
1396
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1397
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1398
                       get_seg_base(e1, e2), limit, e2);
1399
        EIP = new_eip;
1400
    } else {
1401
        /* check gate type */
1402
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1403
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1404
        rpl = new_cs & 3;
1405
        switch(type) {
1406
        case 1: /* available 286 TSS */
1407
        case 9: /* available 386 TSS */
1408
        case 5: /* task gate */
1409
            if (dpl < cpl || dpl < rpl)
1410
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1411
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1412
            return;
1413
        case 4: /* 286 call gate */
1414
        case 12: /* 386 call gate */
1415
            break;
1416
        default:
1417
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1418
            break;
1419
        }
1420
        shift = type >> 3;
1421

    
1422
        if (dpl < cpl || dpl < rpl)
1423
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1424
        /* check valid bit */
1425
        if (!(e2 & DESC_P_MASK))
1426
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1427
        selector = e1 >> 16;
1428
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1429
        param_count = e2 & 0x1f;
1430
        if ((selector & 0xfffc) == 0)
1431
            raise_exception_err(EXCP0D_GPF, 0);
1432

    
1433
        if (load_segment(&e1, &e2, selector) != 0)
1434
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1435
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1436
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1437
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1438
        if (dpl > cpl)
1439
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1440
        if (!(e2 & DESC_P_MASK))
1441
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1442

    
1443
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1444
            /* to inner priviledge */
1445
            get_ss_esp_from_tss(&ss, &sp, dpl);
1446
#ifdef DEBUG_PCALL
1447
            if (loglevel & CPU_LOG_PCALL)
1448
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", 
1449
                        ss, sp, param_count, ESP);
1450
#endif
1451
            if ((ss & 0xfffc) == 0)
1452
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1453
            if ((ss & 3) != dpl)
1454
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1455
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1456
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1457
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1458
            if (ss_dpl != dpl)
1459
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1460
            if (!(ss_e2 & DESC_S_MASK) ||
1461
                (ss_e2 & DESC_CS_MASK) ||
1462
                !(ss_e2 & DESC_W_MASK))
1463
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1464
            if (!(ss_e2 & DESC_P_MASK))
1465
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1466
            
1467
            //            push_size = ((param_count * 2) + 8) << shift;
1468

    
1469
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1470
            old_ssp = env->segs[R_SS].base;
1471
            
1472
            sp_mask = get_sp_mask(ss_e2);
1473
            ssp = get_seg_base(ss_e1, ss_e2);
1474
            if (shift) {
1475
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1476
                PUSHL(ssp, sp, sp_mask, ESP);
1477
                for(i = param_count - 1; i >= 0; i--) {
1478
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1479
                    PUSHL(ssp, sp, sp_mask, val);
1480
                }
1481
            } else {
1482
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1483
                PUSHW(ssp, sp, sp_mask, ESP);
1484
                for(i = param_count - 1; i >= 0; i--) {
1485
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1486
                    PUSHW(ssp, sp, sp_mask, val);
1487
                }
1488
            }
1489
            new_stack = 1;
1490
        } else {
1491
            /* to same priviledge */
1492
            sp = ESP;
1493
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1494
            ssp = env->segs[R_SS].base;
1495
            //            push_size = (4 << shift);
1496
            new_stack = 0;
1497
        }
1498

    
1499
        if (shift) {
1500
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1501
            PUSHL(ssp, sp, sp_mask, next_eip);
1502
        } else {
1503
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1504
            PUSHW(ssp, sp, sp_mask, next_eip);
1505
        }
1506

    
1507
        /* from this point, not restartable */
1508

    
1509
        if (new_stack) {
1510
            ss = (ss & ~3) | dpl;
1511
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1512
                                   ssp,
1513
                                   get_seg_limit(ss_e1, ss_e2),
1514
                                   ss_e2);
1515
        }
1516

    
1517
        selector = (selector & ~3) | dpl;
1518
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1519
                       get_seg_base(e1, e2),
1520
                       get_seg_limit(e1, e2),
1521
                       e2);
1522
        cpu_x86_set_cpl(env, dpl);
1523
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1524
        EIP = offset;
1525
    }
1526
}
1527

    
1528
/* real and vm86 mode iret */
1529
void helper_iret_real(int shift)
1530
{
1531
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1532
    uint8_t *ssp;
1533
    int eflags_mask;
1534

    
1535
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1536
    sp = ESP;
1537
    ssp = env->segs[R_SS].base;
1538
    if (shift == 1) {
1539
        /* 32 bits */
1540
        POPL(ssp, sp, sp_mask, new_eip);
1541
        POPL(ssp, sp, sp_mask, new_cs);
1542
        new_cs &= 0xffff;
1543
        POPL(ssp, sp, sp_mask, new_eflags);
1544
    } else {
1545
        /* 16 bits */
1546
        POPW(ssp, sp, sp_mask, new_eip);
1547
        POPW(ssp, sp, sp_mask, new_cs);
1548
        POPW(ssp, sp, sp_mask, new_eflags);
1549
    }
1550
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1551
    load_seg_vm(R_CS, new_cs);
1552
    env->eip = new_eip;
1553
    if (env->eflags & VM_MASK)
1554
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1555
    else
1556
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1557
    if (shift == 0)
1558
        eflags_mask &= 0xffff;
1559
    load_eflags(new_eflags, eflags_mask);
1560
}
1561

    
1562
static inline void validate_seg(int seg_reg, int cpl)
1563
{
1564
    int dpl;
1565
    uint32_t e2;
1566
    
1567
    e2 = env->segs[seg_reg].flags;
1568
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1569
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1570
        /* data or non conforming code segment */
1571
        if (dpl < cpl) {
1572
            cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1573
        }
1574
    }
1575
}
1576

    
1577
/* protected mode iret */
1578
static inline void helper_ret_protected(int shift, int is_iret, int addend)
1579
{
1580
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1581
    uint32_t new_es, new_ds, new_fs, new_gs;
1582
    uint32_t e1, e2, ss_e1, ss_e2;
1583
    int cpl, dpl, rpl, eflags_mask, iopl;
1584
    uint8_t *ssp;
1585
    
1586
    sp_mask = get_sp_mask(env->segs[R_SS].flags);
1587
    sp = ESP;
1588
    ssp = env->segs[R_SS].base;
1589
    if (shift == 1) {
1590
        /* 32 bits */
1591
        POPL(ssp, sp, sp_mask, new_eip);
1592
        POPL(ssp, sp, sp_mask, new_cs);
1593
        new_cs &= 0xffff;
1594
        if (is_iret) {
1595
            POPL(ssp, sp, sp_mask, new_eflags);
1596
            if (new_eflags & VM_MASK)
1597
                goto return_to_vm86;
1598
        }
1599
    } else {
1600
        /* 16 bits */
1601
        POPW(ssp, sp, sp_mask, new_eip);
1602
        POPW(ssp, sp, sp_mask, new_cs);
1603
        if (is_iret)
1604
            POPW(ssp, sp, sp_mask, new_eflags);
1605
    }
1606
#ifdef DEBUG_PCALL
1607
    if (loglevel & CPU_LOG_PCALL) {
1608
        fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
1609
                new_cs, new_eip, shift, addend);
1610
        cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1611
    }
1612
#endif
1613
    if ((new_cs & 0xfffc) == 0)
1614
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1615
    if (load_segment(&e1, &e2, new_cs) != 0)
1616
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1617
    if (!(e2 & DESC_S_MASK) ||
1618
        !(e2 & DESC_CS_MASK))
1619
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1620
    cpl = env->hflags & HF_CPL_MASK;
1621
    rpl = new_cs & 3; 
1622
    if (rpl < cpl)
1623
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1624
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1625
    if (e2 & DESC_C_MASK) {
1626
        if (dpl > rpl)
1627
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1628
    } else {
1629
        if (dpl != rpl)
1630
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1631
    }
1632
    if (!(e2 & DESC_P_MASK))
1633
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1634
    
1635
    sp += addend;
1636
    if (rpl == cpl) {
1637
        /* return to same priledge level */
1638
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1639
                       get_seg_base(e1, e2),
1640
                       get_seg_limit(e1, e2),
1641
                       e2);
1642
    } else {
1643
        /* return to different priviledge level */
1644
        if (shift == 1) {
1645
            /* 32 bits */
1646
            POPL(ssp, sp, sp_mask, new_esp);
1647
            POPL(ssp, sp, sp_mask, new_ss);
1648
            new_ss &= 0xffff;
1649
        } else {
1650
            /* 16 bits */
1651
            POPW(ssp, sp, sp_mask, new_esp);
1652
            POPW(ssp, sp, sp_mask, new_ss);
1653
        }
1654
#ifdef DEBUG_PCALL
1655
        if (loglevel & CPU_LOG_PCALL) {
1656
            fprintf(logfile, "new ss:esp=%04x:%08x\n",
1657
                    new_ss, new_esp);
1658
        }
1659
#endif
1660
        
1661
        if ((new_ss & 3) != rpl)
1662
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1663
        if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1664
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1665
        if (!(ss_e2 & DESC_S_MASK) ||
1666
            (ss_e2 & DESC_CS_MASK) ||
1667
            !(ss_e2 & DESC_W_MASK))
1668
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1669
        dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1670
        if (dpl != rpl)
1671
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1672
        if (!(ss_e2 & DESC_P_MASK))
1673
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1674

    
1675
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
1676
                       get_seg_base(e1, e2),
1677
                       get_seg_limit(e1, e2),
1678
                       e2);
1679
        cpu_x86_load_seg_cache(env, R_SS, new_ss, 
1680
                       get_seg_base(ss_e1, ss_e2),
1681
                       get_seg_limit(ss_e1, ss_e2),
1682
                       ss_e2);
1683
        cpu_x86_set_cpl(env, rpl);
1684
        sp = new_esp;
1685
        sp_mask = get_sp_mask(ss_e2);
1686

    
1687
        /* validate data segments */
1688
        validate_seg(R_ES, cpl);
1689
        validate_seg(R_DS, cpl);
1690
        validate_seg(R_FS, cpl);
1691
        validate_seg(R_GS, cpl);
1692

    
1693
        sp += addend;
1694
    }
1695
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1696
    env->eip = new_eip;
1697
    if (is_iret) {
1698
        /* NOTE: 'cpl' is the _old_ CPL */
1699
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1700
        if (cpl == 0)
1701
            eflags_mask |= IOPL_MASK;
1702
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
1703
        if (cpl <= iopl)
1704
            eflags_mask |= IF_MASK;
1705
        if (shift == 0)
1706
            eflags_mask &= 0xffff;
1707
        load_eflags(new_eflags, eflags_mask);
1708
    }
1709
    return;
1710

    
1711
 return_to_vm86:
1712
    POPL(ssp, sp, sp_mask, new_esp);
1713
    POPL(ssp, sp, sp_mask, new_ss);
1714
    POPL(ssp, sp, sp_mask, new_es);
1715
    POPL(ssp, sp, sp_mask, new_ds);
1716
    POPL(ssp, sp, sp_mask, new_fs);
1717
    POPL(ssp, sp, sp_mask, new_gs);
1718
    
1719
    /* modify processor state */
1720
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
1721
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1722
    load_seg_vm(R_CS, new_cs & 0xffff);
1723
    cpu_x86_set_cpl(env, 3);
1724
    load_seg_vm(R_SS, new_ss & 0xffff);
1725
    load_seg_vm(R_ES, new_es & 0xffff);
1726
    load_seg_vm(R_DS, new_ds & 0xffff);
1727
    load_seg_vm(R_FS, new_fs & 0xffff);
1728
    load_seg_vm(R_GS, new_gs & 0xffff);
1729

    
1730
    env->eip = new_eip;
1731
    ESP = new_esp;
1732
}
1733

    
1734
void helper_iret_protected(int shift, int next_eip)
1735
{
1736
    int tss_selector, type;
1737
    uint32_t e1, e2;
1738
    
1739
    /* specific case for TSS */
1740
    if (env->eflags & NT_MASK) {
1741
        tss_selector = lduw_kernel(env->tr.base + 0);
1742
        if (tss_selector & 4)
1743
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1744
        if (load_segment(&e1, &e2, tss_selector) != 0)
1745
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1746
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1747
        /* NOTE: we check both segment and busy TSS */
1748
        if (type != 3)
1749
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1750
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
1751
    } else {
1752
        helper_ret_protected(shift, 1, 0);
1753
    }
1754
}
1755

    
1756
void helper_lret_protected(int shift, int addend)
1757
{
1758
    helper_ret_protected(shift, 0, addend);
1759
}
1760

    
1761
void helper_movl_crN_T0(int reg)
1762
{
1763
    switch(reg) {
1764
    case 0:
1765
        cpu_x86_update_cr0(env, T0);
1766
        break;
1767
    case 3:
1768
        cpu_x86_update_cr3(env, T0);
1769
        break;
1770
    case 4:
1771
        cpu_x86_update_cr4(env, T0);
1772
        break;
1773
    default:
1774
        env->cr[reg] = T0;
1775
        break;
1776
    }
1777
}
1778

    
1779
/* XXX: do more */
1780
void helper_movl_drN_T0(int reg)
1781
{
1782
    env->dr[reg] = T0;
1783
}
1784

    
1785
void helper_invlpg(unsigned int addr)
1786
{
1787
    cpu_x86_flush_tlb(env, addr);
1788
}
1789

    
1790
/* rdtsc */
1791
#if !defined(__i386__) && !defined(__x86_64__)
1792
uint64_t emu_time;
1793
#endif
1794

    
1795
void helper_rdtsc(void)
1796
{
1797
    uint64_t val;
1798
#if defined(__i386__) || defined(__x86_64__)
1799
    asm("rdtsc" : "=A" (val));
1800
#else
1801
    /* better than nothing: the time increases */
1802
    val = emu_time++;
1803
#endif
1804
    EAX = val;
1805
    EDX = val >> 32;
1806
}
1807

    
1808
void helper_wrmsr(void)
1809
{
1810
    switch(ECX) {
1811
    case MSR_IA32_SYSENTER_CS:
1812
        env->sysenter_cs = EAX & 0xffff;
1813
        break;
1814
    case MSR_IA32_SYSENTER_ESP:
1815
        env->sysenter_esp = EAX;
1816
        break;
1817
    case MSR_IA32_SYSENTER_EIP:
1818
        env->sysenter_eip = EAX;
1819
        break;
1820
    default:
1821
        /* XXX: exception ? */
1822
        break; 
1823
    }
1824
}
1825

    
1826
void helper_rdmsr(void)
1827
{
1828
    switch(ECX) {
1829
    case MSR_IA32_SYSENTER_CS:
1830
        EAX = env->sysenter_cs;
1831
        EDX = 0;
1832
        break;
1833
    case MSR_IA32_SYSENTER_ESP:
1834
        EAX = env->sysenter_esp;
1835
        EDX = 0;
1836
        break;
1837
    case MSR_IA32_SYSENTER_EIP:
1838
        EAX = env->sysenter_eip;
1839
        EDX = 0;
1840
        break;
1841
    default:
1842
        /* XXX: exception ? */
1843
        break; 
1844
    }
1845
}
1846

    
1847
void helper_lsl(void)
1848
{
1849
    unsigned int selector, limit;
1850
    uint32_t e1, e2;
1851
    int rpl, dpl, cpl, type;
1852

    
1853
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1854
    selector = T0 & 0xffff;
1855
    if (load_segment(&e1, &e2, selector) != 0)
1856
        return;
1857
    rpl = selector & 3;
1858
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1859
    cpl = env->hflags & HF_CPL_MASK;
1860
    if (e2 & DESC_S_MASK) {
1861
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1862
            /* conforming */
1863
        } else {
1864
            if (dpl < cpl || dpl < rpl)
1865
                return;
1866
        }
1867
    } else {
1868
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1869
        switch(type) {
1870
        case 1:
1871
        case 2:
1872
        case 3:
1873
        case 9:
1874
        case 11:
1875
            break;
1876
        default:
1877
            return;
1878
        }
1879
        if (dpl < cpl || dpl < rpl)
1880
            return;
1881
    }
1882
    limit = get_seg_limit(e1, e2);
1883
    T1 = limit;
1884
    CC_SRC |= CC_Z;
1885
}
1886

    
1887
void helper_lar(void)
1888
{
1889
    unsigned int selector;
1890
    uint32_t e1, e2;
1891
    int rpl, dpl, cpl, type;
1892

    
1893
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1894
    selector = T0 & 0xffff;
1895
    if ((selector & 0xfffc) == 0)
1896
        return;
1897
    if (load_segment(&e1, &e2, selector) != 0)
1898
        return;
1899
    rpl = selector & 3;
1900
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1901
    cpl = env->hflags & HF_CPL_MASK;
1902
    if (e2 & DESC_S_MASK) {
1903
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1904
            /* conforming */
1905
        } else {
1906
            if (dpl < cpl || dpl < rpl)
1907
                return;
1908
        }
1909
    } else {
1910
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1911
        switch(type) {
1912
        case 1:
1913
        case 2:
1914
        case 3:
1915
        case 4:
1916
        case 5:
1917
        case 9:
1918
        case 11:
1919
        case 12:
1920
            break;
1921
        default:
1922
            return;
1923
        }
1924
        if (dpl < cpl || dpl < rpl)
1925
            return;
1926
    }
1927
    T1 = e2 & 0x00f0ff00;
1928
    CC_SRC |= CC_Z;
1929
}
1930

    
1931
void helper_verr(void)
1932
{
1933
    unsigned int selector;
1934
    uint32_t e1, e2;
1935
    int rpl, dpl, cpl;
1936

    
1937
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1938
    selector = T0 & 0xffff;
1939
    if ((selector & 0xfffc) == 0)
1940
        return;
1941
    if (load_segment(&e1, &e2, selector) != 0)
1942
        return;
1943
    if (!(e2 & DESC_S_MASK))
1944
        return;
1945
    rpl = selector & 3;
1946
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1947
    cpl = env->hflags & HF_CPL_MASK;
1948
    if (e2 & DESC_CS_MASK) {
1949
        if (!(e2 & DESC_R_MASK))
1950
            return;
1951
        if (!(e2 & DESC_C_MASK)) {
1952
            if (dpl < cpl || dpl < rpl)
1953
                return;
1954
        }
1955
    } else {
1956
        if (dpl < cpl || dpl < rpl)
1957
            return;
1958
    }
1959
    CC_SRC |= CC_Z;
1960
}
1961

    
1962
void helper_verw(void)
1963
{
1964
    unsigned int selector;
1965
    uint32_t e1, e2;
1966
    int rpl, dpl, cpl;
1967

    
1968
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1969
    selector = T0 & 0xffff;
1970
    if ((selector & 0xfffc) == 0)
1971
        return;
1972
    if (load_segment(&e1, &e2, selector) != 0)
1973
        return;
1974
    if (!(e2 & DESC_S_MASK))
1975
        return;
1976
    rpl = selector & 3;
1977
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1978
    cpl = env->hflags & HF_CPL_MASK;
1979
    if (e2 & DESC_CS_MASK) {
1980
        return;
1981
    } else {
1982
        if (dpl < cpl || dpl < rpl)
1983
            return;
1984
        if (!(e2 & DESC_W_MASK))
1985
            return;
1986
    }
1987
    CC_SRC |= CC_Z;
1988
}
1989

    
1990
/* FPU helpers */
1991

    
1992
void helper_fldt_ST0_A0(void)
1993
{
1994
    int new_fpstt;
1995
    new_fpstt = (env->fpstt - 1) & 7;
1996
    env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1997
    env->fpstt = new_fpstt;
1998
    env->fptags[new_fpstt] = 0; /* validate stack entry */
1999
}
2000

    
2001
void helper_fstt_ST0_A0(void)
2002
{
2003
    helper_fstt(ST0, (uint8_t *)A0);
2004
}
2005

    
2006
/* BCD ops */
2007

    
2008
void helper_fbld_ST0_A0(void)
2009
{
2010
    CPU86_LDouble tmp;
2011
    uint64_t val;
2012
    unsigned int v;
2013
    int i;
2014

    
2015
    val = 0;
2016
    for(i = 8; i >= 0; i--) {
2017
        v = ldub((uint8_t *)A0 + i);
2018
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2019
    }
2020
    tmp = val;
2021
    if (ldub((uint8_t *)A0 + 9) & 0x80)
2022
        tmp = -tmp;
2023
    fpush();
2024
    ST0 = tmp;
2025
}
2026

    
2027
void helper_fbst_ST0_A0(void)
2028
{
2029
    CPU86_LDouble tmp;
2030
    int v;
2031
    uint8_t *mem_ref, *mem_end;
2032
    int64_t val;
2033

    
2034
    tmp = rint(ST0);
2035
    val = (int64_t)tmp;
2036
    mem_ref = (uint8_t *)A0;
2037
    mem_end = mem_ref + 9;
2038
    if (val < 0) {
2039
        stb(mem_end, 0x80);
2040
        val = -val;
2041
    } else {
2042
        stb(mem_end, 0x00);
2043
    }
2044
    while (mem_ref < mem_end) {
2045
        if (val == 0)
2046
            break;
2047
        v = val % 100;
2048
        val = val / 100;
2049
        v = ((v / 10) << 4) | (v % 10);
2050
        stb(mem_ref++, v);
2051
    }
2052
    while (mem_ref < mem_end) {
2053
        stb(mem_ref++, 0);
2054
    }
2055
}
2056

    
2057
void helper_f2xm1(void)
2058
{
2059
    ST0 = pow(2.0,ST0) - 1.0;
2060
}
2061

    
2062
void helper_fyl2x(void)
2063
{
2064
    CPU86_LDouble fptemp;
2065
    
2066
    fptemp = ST0;
2067
    if (fptemp>0.0){
2068
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2069
        ST1 *= fptemp;
2070
        fpop();
2071
    } else { 
2072
        env->fpus &= (~0x4700);
2073
        env->fpus |= 0x400;
2074
    }
2075
}
2076

    
2077
void helper_fptan(void)
2078
{
2079
    CPU86_LDouble fptemp;
2080

    
2081
    fptemp = ST0;
2082
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2083
        env->fpus |= 0x400;
2084
    } else {
2085
        ST0 = tan(fptemp);
2086
        fpush();
2087
        ST0 = 1.0;
2088
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2089
        /* the above code is for  |arg| < 2**52 only */
2090
    }
2091
}
2092

    
2093
void helper_fpatan(void)
2094
{
2095
    CPU86_LDouble fptemp, fpsrcop;
2096

    
2097
    fpsrcop = ST1;
2098
    fptemp = ST0;
2099
    ST1 = atan2(fpsrcop,fptemp);
2100
    fpop();
2101
}
2102

    
2103
void helper_fxtract(void)
2104
{
2105
    CPU86_LDoubleU temp;
2106
    unsigned int expdif;
2107

    
2108
    temp.d = ST0;
2109
    expdif = EXPD(temp) - EXPBIAS;
2110
    /*DP exponent bias*/
2111
    ST0 = expdif;
2112
    fpush();
2113
    BIASEXPONENT(temp);
2114
    ST0 = temp.d;
2115
}
2116

    
2117
void helper_fprem1(void)
2118
{
2119
    CPU86_LDouble dblq, fpsrcop, fptemp;
2120
    CPU86_LDoubleU fpsrcop1, fptemp1;
2121
    int expdif;
2122
    int q;
2123

    
2124
    fpsrcop = ST0;
2125
    fptemp = ST1;
2126
    fpsrcop1.d = fpsrcop;
2127
    fptemp1.d = fptemp;
2128
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2129
    if (expdif < 53) {
2130
        dblq = fpsrcop / fptemp;
2131
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2132
        ST0 = fpsrcop - fptemp*dblq;
2133
        q = (int)dblq; /* cutting off top bits is assumed here */
2134
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2135
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2136
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2137
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2138
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2139
    } else {
2140
        env->fpus |= 0x400;  /* C2 <-- 1 */
2141
        fptemp = pow(2.0, expdif-50);
2142
        fpsrcop = (ST0 / ST1) / fptemp;
2143
        /* fpsrcop = integer obtained by rounding to the nearest */
2144
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2145
            floor(fpsrcop): ceil(fpsrcop);
2146
        ST0 -= (ST1 * fpsrcop * fptemp);
2147
    }
2148
}
2149

    
2150
void helper_fprem(void)
2151
{
2152
    CPU86_LDouble dblq, fpsrcop, fptemp;
2153
    CPU86_LDoubleU fpsrcop1, fptemp1;
2154
    int expdif;
2155
    int q;
2156
    
2157
    fpsrcop = ST0;
2158
    fptemp = ST1;
2159
    fpsrcop1.d = fpsrcop;
2160
    fptemp1.d = fptemp;
2161
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2162
    if ( expdif < 53 ) {
2163
        dblq = fpsrcop / fptemp;
2164
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2165
        ST0 = fpsrcop - fptemp*dblq;
2166
        q = (int)dblq; /* cutting off top bits is assumed here */
2167
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2168
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2169
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2170
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2171
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2172
    } else {
2173
        env->fpus |= 0x400;  /* C2 <-- 1 */
2174
        fptemp = pow(2.0, expdif-50);
2175
        fpsrcop = (ST0 / ST1) / fptemp;
2176
        /* fpsrcop = integer obtained by chopping */
2177
        fpsrcop = (fpsrcop < 0.0)?
2178
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2179
        ST0 -= (ST1 * fpsrcop * fptemp);
2180
    }
2181
}
2182

    
2183
void helper_fyl2xp1(void)
2184
{
2185
    CPU86_LDouble fptemp;
2186

    
2187
    fptemp = ST0;
2188
    if ((fptemp+1.0)>0.0) {
2189
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2190
        ST1 *= fptemp;
2191
        fpop();
2192
    } else { 
2193
        env->fpus &= (~0x4700);
2194
        env->fpus |= 0x400;
2195
    }
2196
}
2197

    
2198
void helper_fsqrt(void)
2199
{
2200
    CPU86_LDouble fptemp;
2201

    
2202
    fptemp = ST0;
2203
    if (fptemp<0.0) { 
2204
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2205
        env->fpus |= 0x400;
2206
    }
2207
    ST0 = sqrt(fptemp);
2208
}
2209

    
2210
void helper_fsincos(void)
2211
{
2212
    CPU86_LDouble fptemp;
2213

    
2214
    fptemp = ST0;
2215
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2216
        env->fpus |= 0x400;
2217
    } else {
2218
        ST0 = sin(fptemp);
2219
        fpush();
2220
        ST0 = cos(fptemp);
2221
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2222
        /* the above code is for  |arg| < 2**63 only */
2223
    }
2224
}
2225

    
2226
void helper_frndint(void)
2227
{
2228
    CPU86_LDouble a;
2229

    
2230
    a = ST0;
2231
#ifdef __arm__
2232
    switch(env->fpuc & RC_MASK) {
2233
    default:
2234
    case RC_NEAR:
2235
        asm("rndd %0, %1" : "=f" (a) : "f"(a));
2236
        break;
2237
    case RC_DOWN:
2238
        asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2239
        break;
2240
    case RC_UP:
2241
        asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2242
        break;
2243
    case RC_CHOP:
2244
        asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2245
        break;
2246
    }
2247
#else
2248
    a = rint(a);
2249
#endif
2250
    ST0 = a;
2251
}
2252

    
2253
void helper_fscale(void)
2254
{
2255
    CPU86_LDouble fpsrcop, fptemp;
2256

    
2257
    fpsrcop = 2.0;
2258
    fptemp = pow(fpsrcop,ST1);
2259
    ST0 *= fptemp;
2260
}
2261

    
2262
void helper_fsin(void)
2263
{
2264
    CPU86_LDouble fptemp;
2265

    
2266
    fptemp = ST0;
2267
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2268
        env->fpus |= 0x400;
2269
    } else {
2270
        ST0 = sin(fptemp);
2271
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2272
        /* the above code is for  |arg| < 2**53 only */
2273
    }
2274
}
2275

    
2276
void helper_fcos(void)
2277
{
2278
    CPU86_LDouble fptemp;
2279

    
2280
    fptemp = ST0;
2281
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2282
        env->fpus |= 0x400;
2283
    } else {
2284
        ST0 = cos(fptemp);
2285
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2286
        /* the above code is for  |arg5 < 2**63 only */
2287
    }
2288
}
2289

    
2290
void helper_fxam_ST0(void)
2291
{
2292
    CPU86_LDoubleU temp;
2293
    int expdif;
2294

    
2295
    temp.d = ST0;
2296

    
2297
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2298
    if (SIGND(temp))
2299
        env->fpus |= 0x200; /* C1 <-- 1 */
2300

    
2301
    expdif = EXPD(temp);
2302
    if (expdif == MAXEXPD) {
2303
        if (MANTD(temp) == 0)
2304
            env->fpus |=  0x500 /*Infinity*/;
2305
        else
2306
            env->fpus |=  0x100 /*NaN*/;
2307
    } else if (expdif == 0) {
2308
        if (MANTD(temp) == 0)
2309
            env->fpus |=  0x4000 /*Zero*/;
2310
        else
2311
            env->fpus |= 0x4400 /*Denormal*/;
2312
    } else {
2313
        env->fpus |= 0x400;
2314
    }
2315
}
2316

    
2317
void helper_fstenv(uint8_t *ptr, int data32)
2318
{
2319
    int fpus, fptag, exp, i;
2320
    uint64_t mant;
2321
    CPU86_LDoubleU tmp;
2322

    
2323
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2324
    fptag = 0;
2325
    for (i=7; i>=0; i--) {
2326
        fptag <<= 2;
2327
        if (env->fptags[i]) {
2328
            fptag |= 3;
2329
        } else {
2330
            tmp.d = env->fpregs[i];
2331
            exp = EXPD(tmp);
2332
            mant = MANTD(tmp);
2333
            if (exp == 0 && mant == 0) {
2334
                /* zero */
2335
                fptag |= 1;
2336
            } else if (exp == 0 || exp == MAXEXPD
2337
#ifdef USE_X86LDOUBLE
2338
                       || (mant & (1LL << 63)) == 0
2339
#endif
2340
                       ) {
2341
                /* NaNs, infinity, denormal */
2342
                fptag |= 2;
2343
            }
2344
        }
2345
    }
2346
    if (data32) {
2347
        /* 32 bit */
2348
        stl(ptr, env->fpuc);
2349
        stl(ptr + 4, fpus);
2350
        stl(ptr + 8, fptag);
2351
        stl(ptr + 12, 0); /* fpip */
2352
        stl(ptr + 16, 0); /* fpcs */
2353
        stl(ptr + 20, 0); /* fpoo */
2354
        stl(ptr + 24, 0); /* fpos */
2355
    } else {
2356
        /* 16 bit */
2357
        stw(ptr, env->fpuc);
2358
        stw(ptr + 2, fpus);
2359
        stw(ptr + 4, fptag);
2360
        stw(ptr + 6, 0);
2361
        stw(ptr + 8, 0);
2362
        stw(ptr + 10, 0);
2363
        stw(ptr + 12, 0);
2364
    }
2365
}
2366

    
2367
void helper_fldenv(uint8_t *ptr, int data32)
2368
{
2369
    int i, fpus, fptag;
2370

    
2371
    if (data32) {
2372
        env->fpuc = lduw(ptr);
2373
        fpus = lduw(ptr + 4);
2374
        fptag = lduw(ptr + 8);
2375
    }
2376
    else {
2377
        env->fpuc = lduw(ptr);
2378
        fpus = lduw(ptr + 2);
2379
        fptag = lduw(ptr + 4);
2380
    }
2381
    env->fpstt = (fpus >> 11) & 7;
2382
    env->fpus = fpus & ~0x3800;
2383
    for(i = 0;i < 8; i++) {
2384
        env->fptags[i] = ((fptag & 3) == 3);
2385
        fptag >>= 2;
2386
    }
2387
}
2388

    
2389
void helper_fsave(uint8_t *ptr, int data32)
2390
{
2391
    CPU86_LDouble tmp;
2392
    int i;
2393

    
2394
    helper_fstenv(ptr, data32);
2395

    
2396
    ptr += (14 << data32);
2397
    for(i = 0;i < 8; i++) {
2398
        tmp = ST(i);
2399
        helper_fstt(tmp, ptr);
2400
        ptr += 10;
2401
    }
2402

    
2403
    /* fninit */
2404
    env->fpus = 0;
2405
    env->fpstt = 0;
2406
    env->fpuc = 0x37f;
2407
    env->fptags[0] = 1;
2408
    env->fptags[1] = 1;
2409
    env->fptags[2] = 1;
2410
    env->fptags[3] = 1;
2411
    env->fptags[4] = 1;
2412
    env->fptags[5] = 1;
2413
    env->fptags[6] = 1;
2414
    env->fptags[7] = 1;
2415
}
2416

    
2417
void helper_frstor(uint8_t *ptr, int data32)
2418
{
2419
    CPU86_LDouble tmp;
2420
    int i;
2421

    
2422
    helper_fldenv(ptr, data32);
2423
    ptr += (14 << data32);
2424

    
2425
    for(i = 0;i < 8; i++) {
2426
        tmp = helper_fldt(ptr);
2427
        ST(i) = tmp;
2428
        ptr += 10;
2429
    }
2430
}
2431

    
2432
/* XXX: merge with helper_fstt ? */
2433

    
2434
#ifndef USE_X86LDOUBLE
2435

    
2436
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2437
{
2438
    CPU86_LDoubleU temp;
2439
    int e;
2440

    
2441
    temp.d = f;
2442
    /* mantissa */
2443
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
2444
    /* exponent + sign */
2445
    e = EXPD(temp) - EXPBIAS + 16383;
2446
    e |= SIGND(temp) >> 16;
2447
    *pexp = e;
2448
}
2449

    
2450
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2451
{
2452
    CPU86_LDoubleU temp;
2453
    int e;
2454
    uint64_t ll;
2455

    
2456
    /* XXX: handle overflow ? */
2457
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
2458
    e |= (upper >> 4) & 0x800; /* sign */
2459
    ll = (mant >> 11) & ((1LL << 52) - 1);
2460
#ifdef __arm__
2461
    temp.l.upper = (e << 20) | (ll >> 32);
2462
    temp.l.lower = ll;
2463
#else
2464
    temp.ll = ll | ((uint64_t)e << 52);
2465
#endif
2466
    return temp.d;
2467
}
2468

    
2469
#else
2470

    
2471
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
2472
{
2473
    CPU86_LDoubleU temp;
2474

    
2475
    temp.d = f;
2476
    *pmant = temp.l.lower;
2477
    *pexp = temp.l.upper;
2478
}
2479

    
2480
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
2481
{
2482
    CPU86_LDoubleU temp;
2483

    
2484
    temp.l.upper = upper;
2485
    temp.l.lower = mant;
2486
    return temp.d;
2487
}
2488
#endif
2489

    
2490
#if !defined(CONFIG_USER_ONLY) 
2491

    
2492
#define MMUSUFFIX _mmu
2493
#define GETPC() (__builtin_return_address(0))
2494

    
2495
#define SHIFT 0
2496
#include "softmmu_template.h"
2497

    
2498
#define SHIFT 1
2499
#include "softmmu_template.h"
2500

    
2501
#define SHIFT 2
2502
#include "softmmu_template.h"
2503

    
2504
#define SHIFT 3
2505
#include "softmmu_template.h"
2506

    
2507
#endif
2508

    
2509
/* try to fill the TLB and return an exception if error. If retaddr is
2510
   NULL, it means that the function was called in C code (i.e. not
2511
   from generated code or from helper.c) */
2512
/* XXX: fix it to restore all registers */
2513
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2514
{
2515
    TranslationBlock *tb;
2516
    int ret;
2517
    unsigned long pc;
2518
    CPUX86State *saved_env;
2519

    
2520
    /* XXX: hack to restore env in all cases, even if not called from
2521
       generated code */
2522
    saved_env = env;
2523
    env = cpu_single_env;
2524

    
2525
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2526
    if (ret) {
2527
        if (retaddr) {
2528
            /* now we have a real cpu fault */
2529
            pc = (unsigned long)retaddr;
2530
            tb = tb_find_pc(pc);
2531
            if (tb) {
2532
                /* the PC is inside the translated code. It means that we have
2533
                   a virtual CPU fault */
2534
                cpu_restore_state(tb, env, pc, NULL);
2535
            }
2536
        }
2537
        raise_exception_err(EXCP0E_PAGE, env->error_code);
2538
    }
2539
    env = saved_env;
2540
}