Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 1e8a7cfd

History | View | Annotate | Download (96.8 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28
    (raise_exception_err)(a, b);\
29
} while (0)
30
#endif
31

    
32
const uint8_t parity_table[256] = {
33
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
};
66

    
67
/* modulo 17 table */
68
const uint8_t rclw_table[32] = {
69
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    8, 9,10,11,12,13,14,15,
71
   16, 0, 1, 2, 3, 4, 5, 6,
72
    7, 8, 9,10,11,12,13,14,
73
};
74

    
75
/* modulo 9 table */
76
const uint8_t rclb_table[32] = {
77
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    8, 0, 1, 2, 3, 4, 5, 6,
79
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    6, 7, 8, 0, 1, 2, 3, 4,
81
};
82

    
83
const CPU86_LDouble f15rk[7] =
84
{
85
    0.00000000000000000000L,
86
    1.00000000000000000000L,
87
    3.14159265358979323851L,  /*pi*/
88
    0.30102999566398119523L,  /*lg2*/
89
    0.69314718055994530943L,  /*ln2*/
90
    1.44269504088896340739L,  /*l2e*/
91
    3.32192809488736234781L,  /*l2t*/
92
};
93
    
94
/* thread support */
95

    
96
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97

    
98
void cpu_lock(void)
99
{
100
    spin_lock(&global_cpu_lock);
101
}
102

    
103
void cpu_unlock(void)
104
{
105
    spin_unlock(&global_cpu_lock);
106
}
107

    
108
void cpu_loop_exit(void)
109
{
110
    /* NOTE: the register at this point must be saved by hand because
111
       longjmp restore them */
112
    regs_to_env();
113
    longjmp(env->jmp_env, 1);
114
}
115

    
116
/* return non zero if error */
117
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
118
                               int selector)
119
{
120
    SegmentCache *dt;
121
    int index;
122
    target_ulong ptr;
123

    
124
    if (selector & 0x4)
125
        dt = &env->ldt;
126
    else
127
        dt = &env->gdt;
128
    index = selector & ~7;
129
    if ((index + 7) > dt->limit)
130
        return -1;
131
    ptr = dt->base + index;
132
    *e1_ptr = ldl_kernel(ptr);
133
    *e2_ptr = ldl_kernel(ptr + 4);
134
    return 0;
135
}
136
                                     
137
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
138
{
139
    unsigned int limit;
140
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
141
    if (e2 & DESC_G_MASK)
142
        limit = (limit << 12) | 0xfff;
143
    return limit;
144
}
145

    
146
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
147
{
148
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
149
}
150

    
151
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
152
{
153
    sc->base = get_seg_base(e1, e2);
154
    sc->limit = get_seg_limit(e1, e2);
155
    sc->flags = e2;
156
}
157

    
158
/* init the segment cache in vm86 mode. */
159
static inline void load_seg_vm(int seg, int selector)
160
{
161
    selector &= 0xffff;
162
    cpu_x86_load_seg_cache(env, seg, selector, 
163
                           (selector << 4), 0xffff, 0);
164
}
165

    
166
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
167
                                       uint32_t *esp_ptr, int dpl)
168
{
169
    int type, index, shift;
170
    
171
#if 0
172
    {
173
        int i;
174
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
175
        for(i=0;i<env->tr.limit;i++) {
176
            printf("%02x ", env->tr.base[i]);
177
            if ((i & 7) == 7) printf("\n");
178
        }
179
        printf("\n");
180
    }
181
#endif
182

    
183
    if (!(env->tr.flags & DESC_P_MASK))
184
        cpu_abort(env, "invalid tss");
185
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
186
    if ((type & 7) != 1)
187
        cpu_abort(env, "invalid tss type");
188
    shift = type >> 3;
189
    index = (dpl * 4 + 2) << shift;
190
    if (index + (4 << shift) - 1 > env->tr.limit)
191
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
192
    if (shift == 0) {
193
        *esp_ptr = lduw_kernel(env->tr.base + index);
194
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
195
    } else {
196
        *esp_ptr = ldl_kernel(env->tr.base + index);
197
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
198
    }
199
}
200

    
201
/* XXX: merge with load_seg() */
202
static void tss_load_seg(int seg_reg, int selector)
203
{
204
    uint32_t e1, e2;
205
    int rpl, dpl, cpl;
206

    
207
    if ((selector & 0xfffc) != 0) {
208
        if (load_segment(&e1, &e2, selector) != 0)
209
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
210
        if (!(e2 & DESC_S_MASK))
211
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
        rpl = selector & 3;
213
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
214
        cpl = env->hflags & HF_CPL_MASK;
215
        if (seg_reg == R_CS) {
216
            if (!(e2 & DESC_CS_MASK))
217
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
            if (dpl != rpl)
219
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220
            if ((e2 & DESC_C_MASK) && dpl > rpl)
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
                
223
        } else if (seg_reg == R_SS) {
224
            /* SS must be writable data */
225
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            if (dpl != cpl || dpl != rpl)
228
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        } else {
230
            /* not readable code */
231
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            /* if data or non conforming code, checks the rights */
234
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
235
                if (dpl < cpl || dpl < rpl)
236
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            }
238
        }
239
        if (!(e2 & DESC_P_MASK))
240
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
241
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
242
                       get_seg_base(e1, e2),
243
                       get_seg_limit(e1, e2),
244
                       e2);
245
    } else {
246
        if (seg_reg == R_SS || seg_reg == R_CS) 
247
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
    }
249
}
250

    
251
#define SWITCH_TSS_JMP  0
252
#define SWITCH_TSS_IRET 1
253
#define SWITCH_TSS_CALL 2
254

    
255
/* XXX: restore CPU state in registers (PowerPC case) */
256
static void switch_tss(int tss_selector, 
257
                       uint32_t e1, uint32_t e2, int source,
258
                       uint32_t next_eip)
259
{
260
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
261
    target_ulong tss_base;
262
    uint32_t new_regs[8], new_segs[6];
263
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
264
    uint32_t old_eflags, eflags_mask;
265
    SegmentCache *dt;
266
    int index;
267
    target_ulong ptr;
268

    
269
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
270
#ifdef DEBUG_PCALL
271
    if (loglevel & CPU_LOG_PCALL)
272
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
273
#endif
274

    
275
    /* if task gate, we read the TSS segment and we load it */
276
    if (type == 5) {
277
        if (!(e2 & DESC_P_MASK))
278
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
279
        tss_selector = e1 >> 16;
280
        if (tss_selector & 4)
281
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
282
        if (load_segment(&e1, &e2, tss_selector) != 0)
283
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284
        if (e2 & DESC_S_MASK)
285
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
286
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
287
        if ((type & 7) != 1)
288
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
289
    }
290

    
291
    if (!(e2 & DESC_P_MASK))
292
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
293

    
294
    if (type & 8)
295
        tss_limit_max = 103;
296
    else
297
        tss_limit_max = 43;
298
    tss_limit = get_seg_limit(e1, e2);
299
    tss_base = get_seg_base(e1, e2);
300
    if ((tss_selector & 4) != 0 || 
301
        tss_limit < tss_limit_max)
302
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
303
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
304
    if (old_type & 8)
305
        old_tss_limit_max = 103;
306
    else
307
        old_tss_limit_max = 43;
308

    
309
    /* read all the registers from the new TSS */
310
    if (type & 8) {
311
        /* 32 bit */
312
        new_cr3 = ldl_kernel(tss_base + 0x1c);
313
        new_eip = ldl_kernel(tss_base + 0x20);
314
        new_eflags = ldl_kernel(tss_base + 0x24);
315
        for(i = 0; i < 8; i++)
316
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
317
        for(i = 0; i < 6; i++)
318
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
319
        new_ldt = lduw_kernel(tss_base + 0x60);
320
        new_trap = ldl_kernel(tss_base + 0x64);
321
    } else {
322
        /* 16 bit */
323
        new_cr3 = 0;
324
        new_eip = lduw_kernel(tss_base + 0x0e);
325
        new_eflags = lduw_kernel(tss_base + 0x10);
326
        for(i = 0; i < 8; i++)
327
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
328
        for(i = 0; i < 4; i++)
329
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
330
        new_ldt = lduw_kernel(tss_base + 0x2a);
331
        new_segs[R_FS] = 0;
332
        new_segs[R_GS] = 0;
333
        new_trap = 0;
334
    }
335
    
336
    /* NOTE: we must avoid memory exceptions during the task switch,
337
       so we make dummy accesses before */
338
    /* XXX: it can still fail in some cases, so a bigger hack is
339
       necessary to valid the TLB after having done the accesses */
340

    
341
    v1 = ldub_kernel(env->tr.base);
342
    v2 = ldub(env->tr.base + old_tss_limit_max);
343
    stb_kernel(env->tr.base, v1);
344
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
345
    
346
    /* clear busy bit (it is restartable) */
347
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
348
        target_ulong ptr;
349
        uint32_t e2;
350
        ptr = env->gdt.base + (env->tr.selector & ~7);
351
        e2 = ldl_kernel(ptr + 4);
352
        e2 &= ~DESC_TSS_BUSY_MASK;
353
        stl_kernel(ptr + 4, e2);
354
    }
355
    old_eflags = compute_eflags();
356
    if (source == SWITCH_TSS_IRET)
357
        old_eflags &= ~NT_MASK;
358
    
359
    /* save the current state in the old TSS */
360
    if (type & 8) {
361
        /* 32 bit */
362
        stl_kernel(env->tr.base + 0x20, next_eip);
363
        stl_kernel(env->tr.base + 0x24, old_eflags);
364
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
365
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
366
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
367
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
368
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
369
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
370
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
371
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
372
        for(i = 0; i < 6; i++)
373
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
374
    } else {
375
        /* 16 bit */
376
        stw_kernel(env->tr.base + 0x0e, next_eip);
377
        stw_kernel(env->tr.base + 0x10, old_eflags);
378
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
379
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
380
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
381
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
382
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
383
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
384
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
385
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
386
        for(i = 0; i < 4; i++)
387
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
388
    }
389
    
390
    /* now if an exception occurs, it will occurs in the next task
391
       context */
392

    
393
    if (source == SWITCH_TSS_CALL) {
394
        stw_kernel(tss_base, env->tr.selector);
395
        new_eflags |= NT_MASK;
396
    }
397

    
398
    /* set busy bit */
399
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
400
        target_ulong ptr;
401
        uint32_t e2;
402
        ptr = env->gdt.base + (tss_selector & ~7);
403
        e2 = ldl_kernel(ptr + 4);
404
        e2 |= DESC_TSS_BUSY_MASK;
405
        stl_kernel(ptr + 4, e2);
406
    }
407

    
408
    /* set the new CPU state */
409
    /* from this point, any exception which occurs can give problems */
410
    env->cr[0] |= CR0_TS_MASK;
411
    env->hflags |= HF_TS_MASK;
412
    env->tr.selector = tss_selector;
413
    env->tr.base = tss_base;
414
    env->tr.limit = tss_limit;
415
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
416
    
417
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
418
        cpu_x86_update_cr3(env, new_cr3);
419
    }
420
    
421
    /* load all registers without an exception, then reload them with
422
       possible exception */
423
    env->eip = new_eip;
424
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
425
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
426
    if (!(type & 8))
427
        eflags_mask &= 0xffff;
428
    load_eflags(new_eflags, eflags_mask);
429
    /* XXX: what to do in 16 bit case ? */
430
    EAX = new_regs[0];
431
    ECX = new_regs[1];
432
    EDX = new_regs[2];
433
    EBX = new_regs[3];
434
    ESP = new_regs[4];
435
    EBP = new_regs[5];
436
    ESI = new_regs[6];
437
    EDI = new_regs[7];
438
    if (new_eflags & VM_MASK) {
439
        for(i = 0; i < 6; i++) 
440
            load_seg_vm(i, new_segs[i]);
441
        /* in vm86, CPL is always 3 */
442
        cpu_x86_set_cpl(env, 3);
443
    } else {
444
        /* CPL is set the RPL of CS */
445
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
446
        /* first just selectors as the rest may trigger exceptions */
447
        for(i = 0; i < 6; i++)
448
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
449
    }
450
    
451
    env->ldt.selector = new_ldt & ~4;
452
    env->ldt.base = 0;
453
    env->ldt.limit = 0;
454
    env->ldt.flags = 0;
455

    
456
    /* load the LDT */
457
    if (new_ldt & 4)
458
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459

    
460
    if ((new_ldt & 0xfffc) != 0) {
461
        dt = &env->gdt;
462
        index = new_ldt & ~7;
463
        if ((index + 7) > dt->limit)
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        ptr = dt->base + index;
466
        e1 = ldl_kernel(ptr);
467
        e2 = ldl_kernel(ptr + 4);
468
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
469
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
470
        if (!(e2 & DESC_P_MASK))
471
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
472
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
473
    }
474
    
475
    /* load the segments */
476
    if (!(new_eflags & VM_MASK)) {
477
        tss_load_seg(R_CS, new_segs[R_CS]);
478
        tss_load_seg(R_SS, new_segs[R_SS]);
479
        tss_load_seg(R_ES, new_segs[R_ES]);
480
        tss_load_seg(R_DS, new_segs[R_DS]);
481
        tss_load_seg(R_FS, new_segs[R_FS]);
482
        tss_load_seg(R_GS, new_segs[R_GS]);
483
    }
484
    
485
    /* check that EIP is in the CS segment limits */
486
    if (new_eip > env->segs[R_CS].limit) {
487
        /* XXX: different exception if CALL ? */
488
        raise_exception_err(EXCP0D_GPF, 0);
489
    }
490
}
491

    
492
/* check if Port I/O is allowed in TSS */
493
static inline void check_io(int addr, int size)
494
{
495
    int io_offset, val, mask;
496
    
497
    /* TSS must be a valid 32 bit one */
498
    if (!(env->tr.flags & DESC_P_MASK) ||
499
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
500
        env->tr.limit < 103)
501
        goto fail;
502
    io_offset = lduw_kernel(env->tr.base + 0x66);
503
    io_offset += (addr >> 3);
504
    /* Note: the check needs two bytes */
505
    if ((io_offset + 1) > env->tr.limit)
506
        goto fail;
507
    val = lduw_kernel(env->tr.base + io_offset);
508
    val >>= (addr & 7);
509
    mask = (1 << size) - 1;
510
    /* all bits must be zero to allow the I/O */
511
    if ((val & mask) != 0) {
512
    fail:
513
        raise_exception_err(EXCP0D_GPF, 0);
514
    }
515
}
516

    
517
void check_iob_T0(void)
518
{
519
    check_io(T0, 1);
520
}
521

    
522
void check_iow_T0(void)
523
{
524
    check_io(T0, 2);
525
}
526

    
527
void check_iol_T0(void)
528
{
529
    check_io(T0, 4);
530
}
531

    
532
void check_iob_DX(void)
533
{
534
    check_io(EDX & 0xffff, 1);
535
}
536

    
537
void check_iow_DX(void)
538
{
539
    check_io(EDX & 0xffff, 2);
540
}
541

    
542
void check_iol_DX(void)
543
{
544
    check_io(EDX & 0xffff, 4);
545
}
546

    
547
static inline unsigned int get_sp_mask(unsigned int e2)
548
{
549
    if (e2 & DESC_B_MASK)
550
        return 0xffffffff;
551
    else
552
        return 0xffff;
553
}
554

    
555
/* XXX: add a is_user flag to have proper security support */
556
#define PUSHW(ssp, sp, sp_mask, val)\
557
{\
558
    sp -= 2;\
559
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
560
}
561

    
562
#define PUSHL(ssp, sp, sp_mask, val)\
563
{\
564
    sp -= 4;\
565
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
566
}
567

    
568
#define POPW(ssp, sp, sp_mask, val)\
569
{\
570
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
571
    sp += 2;\
572
}
573

    
574
#define POPL(ssp, sp, sp_mask, val)\
575
{\
576
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
577
    sp += 4;\
578
}
579

    
580
/* protected mode interrupt */
581
static void do_interrupt_protected(int intno, int is_int, int error_code,
582
                                   unsigned int next_eip, int is_hw)
583
{
584
    SegmentCache *dt;
585
    target_ulong ptr, ssp;
586
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
587
    int has_error_code, new_stack, shift;
588
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
589
    uint32_t old_eip;
590

    
591
    has_error_code = 0;
592
    if (!is_int && !is_hw) {
593
        switch(intno) {
594
        case 8:
595
        case 10:
596
        case 11:
597
        case 12:
598
        case 13:
599
        case 14:
600
        case 17:
601
            has_error_code = 1;
602
            break;
603
        }
604
    }
605
    if (is_int)
606
        old_eip = next_eip;
607
    else
608
        old_eip = env->eip;
609

    
610
    dt = &env->idt;
611
    if (intno * 8 + 7 > dt->limit)
612
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
613
    ptr = dt->base + intno * 8;
614
    e1 = ldl_kernel(ptr);
615
    e2 = ldl_kernel(ptr + 4);
616
    /* check gate type */
617
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
618
    switch(type) {
619
    case 5: /* task gate */
620
        /* must do that check here to return the correct error code */
621
        if (!(e2 & DESC_P_MASK))
622
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
623
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
624
        if (has_error_code) {
625
            int mask;
626
            /* push the error code */
627
            shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
628
            if (env->segs[R_SS].flags & DESC_B_MASK)
629
                mask = 0xffffffff;
630
            else
631
                mask = 0xffff;
632
            esp = (ESP - (2 << shift)) & mask;
633
            ssp = env->segs[R_SS].base + esp;
634
            if (shift)
635
                stl_kernel(ssp, error_code);
636
            else
637
                stw_kernel(ssp, error_code);
638
            ESP = (esp & mask) | (ESP & ~mask);
639
        }
640
        return;
641
    case 6: /* 286 interrupt gate */
642
    case 7: /* 286 trap gate */
643
    case 14: /* 386 interrupt gate */
644
    case 15: /* 386 trap gate */
645
        break;
646
    default:
647
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648
        break;
649
    }
650
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
651
    cpl = env->hflags & HF_CPL_MASK;
652
    /* check privledge if software int */
653
    if (is_int && dpl < cpl)
654
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655
    /* check valid bit */
656
    if (!(e2 & DESC_P_MASK))
657
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658
    selector = e1 >> 16;
659
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
660
    if ((selector & 0xfffc) == 0)
661
        raise_exception_err(EXCP0D_GPF, 0);
662

    
663
    if (load_segment(&e1, &e2, selector) != 0)
664
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
665
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
666
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
667
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668
    if (dpl > cpl)
669
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
670
    if (!(e2 & DESC_P_MASK))
671
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
672
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
673
        /* to inner priviledge */
674
        get_ss_esp_from_tss(&ss, &esp, dpl);
675
        if ((ss & 0xfffc) == 0)
676
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
677
        if ((ss & 3) != dpl)
678
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
679
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
680
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
682
        if (ss_dpl != dpl)
683
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
684
        if (!(ss_e2 & DESC_S_MASK) ||
685
            (ss_e2 & DESC_CS_MASK) ||
686
            !(ss_e2 & DESC_W_MASK))
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (!(ss_e2 & DESC_P_MASK))
689
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
690
        new_stack = 1;
691
        sp_mask = get_sp_mask(ss_e2);
692
        ssp = get_seg_base(ss_e1, ss_e2);
693
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
694
        /* to same priviledge */
695
        if (env->eflags & VM_MASK)
696
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
697
        new_stack = 0;
698
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
699
        ssp = env->segs[R_SS].base;
700
        esp = ESP;
701
        dpl = cpl;
702
    } else {
703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
        new_stack = 0; /* avoid warning */
705
        sp_mask = 0; /* avoid warning */
706
        ssp = 0; /* avoid warning */
707
        esp = 0; /* avoid warning */
708
    }
709

    
710
    shift = type >> 3;
711

    
712
#if 0
713
    /* XXX: check that enough room is available */
714
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
715
    if (env->eflags & VM_MASK)
716
        push_size += 8;
717
    push_size <<= shift;
718
#endif
719
    if (shift == 1) {
720
        if (new_stack) {
721
            if (env->eflags & VM_MASK) {
722
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
723
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
724
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
725
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
726
            }
727
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
728
            PUSHL(ssp, esp, sp_mask, ESP);
729
        }
730
        PUSHL(ssp, esp, sp_mask, compute_eflags());
731
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
732
        PUSHL(ssp, esp, sp_mask, old_eip);
733
        if (has_error_code) {
734
            PUSHL(ssp, esp, sp_mask, error_code);
735
        }
736
    } else {
737
        if (new_stack) {
738
            if (env->eflags & VM_MASK) {
739
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
740
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
741
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
742
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
743
            }
744
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
745
            PUSHW(ssp, esp, sp_mask, ESP);
746
        }
747
        PUSHW(ssp, esp, sp_mask, compute_eflags());
748
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
749
        PUSHW(ssp, esp, sp_mask, old_eip);
750
        if (has_error_code) {
751
            PUSHW(ssp, esp, sp_mask, error_code);
752
        }
753
    }
754
    
755
    if (new_stack) {
756
        if (env->eflags & VM_MASK) {
757
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
758
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
759
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
760
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
761
        }
762
        ss = (ss & ~3) | dpl;
763
        cpu_x86_load_seg_cache(env, R_SS, ss, 
764
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
765
    }
766
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
767

    
768
    selector = (selector & ~3) | dpl;
769
    cpu_x86_load_seg_cache(env, R_CS, selector, 
770
                   get_seg_base(e1, e2),
771
                   get_seg_limit(e1, e2),
772
                   e2);
773
    cpu_x86_set_cpl(env, dpl);
774
    env->eip = offset;
775

    
776
    /* interrupt gate clear IF mask */
777
    if ((type & 1) == 0) {
778
        env->eflags &= ~IF_MASK;
779
    }
780
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
781
}
782

    
783
#ifdef TARGET_X86_64
784

    
785
#define PUSHQ(sp, val)\
786
{\
787
    sp -= 8;\
788
    stq_kernel(sp, (val));\
789
}
790

    
791
#define POPQ(sp, val)\
792
{\
793
    val = ldq_kernel(sp);\
794
    sp += 8;\
795
}
796

    
797
static inline target_ulong get_rsp_from_tss(int level)
798
{
799
    int index;
800
    
801
#if 0
802
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
803
           env->tr.base, env->tr.limit);
804
#endif
805

    
806
    if (!(env->tr.flags & DESC_P_MASK))
807
        cpu_abort(env, "invalid tss");
808
    index = 8 * level + 4;
809
    if ((index + 7) > env->tr.limit)
810
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
811
    return ldq_kernel(env->tr.base + index);
812
}
813

    
814
/* 64 bit interrupt */
815
static void do_interrupt64(int intno, int is_int, int error_code,
816
                           target_ulong next_eip, int is_hw)
817
{
818
    SegmentCache *dt;
819
    target_ulong ptr;
820
    int type, dpl, selector, cpl, ist;
821
    int has_error_code, new_stack;
822
    uint32_t e1, e2, e3, ss;
823
    target_ulong old_eip, esp, offset;
824

    
825
    has_error_code = 0;
826
    if (!is_int && !is_hw) {
827
        switch(intno) {
828
        case 8:
829
        case 10:
830
        case 11:
831
        case 12:
832
        case 13:
833
        case 14:
834
        case 17:
835
            has_error_code = 1;
836
            break;
837
        }
838
    }
839
    if (is_int)
840
        old_eip = next_eip;
841
    else
842
        old_eip = env->eip;
843

    
844
    dt = &env->idt;
845
    if (intno * 16 + 15 > dt->limit)
846
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
847
    ptr = dt->base + intno * 16;
848
    e1 = ldl_kernel(ptr);
849
    e2 = ldl_kernel(ptr + 4);
850
    e3 = ldl_kernel(ptr + 8);
851
    /* check gate type */
852
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
853
    switch(type) {
854
    case 14: /* 386 interrupt gate */
855
    case 15: /* 386 trap gate */
856
        break;
857
    default:
858
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
859
        break;
860
    }
861
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
862
    cpl = env->hflags & HF_CPL_MASK;
863
    /* check privledge if software int */
864
    if (is_int && dpl < cpl)
865
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
866
    /* check valid bit */
867
    if (!(e2 & DESC_P_MASK))
868
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
869
    selector = e1 >> 16;
870
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
871
    ist = e2 & 7;
872
    if ((selector & 0xfffc) == 0)
873
        raise_exception_err(EXCP0D_GPF, 0);
874

    
875
    if (load_segment(&e1, &e2, selector) != 0)
876
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
877
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
878
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
879
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
880
    if (dpl > cpl)
881
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
882
    if (!(e2 & DESC_P_MASK))
883
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
884
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
885
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
886
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
887
        /* to inner priviledge */
888
        if (ist != 0)
889
            esp = get_rsp_from_tss(ist + 3);
890
        else
891
            esp = get_rsp_from_tss(dpl);
892
        ss = 0;
893
        new_stack = 1;
894
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
895
        /* to same priviledge */
896
        if (env->eflags & VM_MASK)
897
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
898
        new_stack = 0;
899
        esp = ESP & ~0xf; /* align stack */
900
        dpl = cpl;
901
    } else {
902
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
903
        new_stack = 0; /* avoid warning */
904
        esp = 0; /* avoid warning */
905
    }
906

    
907
    PUSHQ(esp, env->segs[R_SS].selector);
908
    PUSHQ(esp, ESP);
909
    PUSHQ(esp, compute_eflags());
910
    PUSHQ(esp, env->segs[R_CS].selector);
911
    PUSHQ(esp, old_eip);
912
    if (has_error_code) {
913
        PUSHQ(esp, error_code);
914
    }
915
    
916
    if (new_stack) {
917
        ss = 0 | dpl;
918
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
919
    }
920
    ESP = esp;
921

    
922
    selector = (selector & ~3) | dpl;
923
    cpu_x86_load_seg_cache(env, R_CS, selector, 
924
                   get_seg_base(e1, e2),
925
                   get_seg_limit(e1, e2),
926
                   e2);
927
    cpu_x86_set_cpl(env, dpl);
928
    env->eip = offset;
929

    
930
    /* interrupt gate clear IF mask */
931
    if ((type & 1) == 0) {
932
        env->eflags &= ~IF_MASK;
933
    }
934
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
935
}
936
#endif
937

    
938
void helper_syscall(int next_eip_addend)
939
{
940
    int selector;
941

    
942
    if (!(env->efer & MSR_EFER_SCE)) {
943
        raise_exception_err(EXCP06_ILLOP, 0);
944
    }
945
    selector = (env->star >> 32) & 0xffff;
946
#ifdef TARGET_X86_64
947
    if (env->hflags & HF_LMA_MASK) {
948
        ECX = env->eip + next_eip_addend;
949
        env->regs[11] = compute_eflags();
950

    
951
        cpu_x86_set_cpl(env, 0);
952
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
953
                           0, 0xffffffff, 
954
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
955
                               DESC_S_MASK |
956
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
957
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
958
                               0, 0xffffffff,
959
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
960
                               DESC_S_MASK |
961
                               DESC_W_MASK | DESC_A_MASK);
962
        env->eflags &= ~env->fmask;
963
        if (env->hflags & HF_CS64_MASK)
964
            env->eip = env->lstar;
965
        else
966
            env->eip = env->cstar;
967
    } else 
968
#endif
969
    {
970
        ECX = (uint32_t)(env->eip + next_eip_addend);
971
        
972
        cpu_x86_set_cpl(env, 0);
973
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
974
                           0, 0xffffffff, 
975
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
976
                               DESC_S_MASK |
977
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
978
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
979
                               0, 0xffffffff,
980
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981
                               DESC_S_MASK |
982
                               DESC_W_MASK | DESC_A_MASK);
983
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
984
        env->eip = (uint32_t)env->star;
985
    }
986
}
987

    
988
void helper_sysret(int dflag)
989
{
990
    int cpl, selector;
991

    
992
    if (!(env->efer & MSR_EFER_SCE)) {
993
        raise_exception_err(EXCP06_ILLOP, 0);
994
    }
995
    cpl = env->hflags & HF_CPL_MASK;
996
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
997
        raise_exception_err(EXCP0D_GPF, 0);
998
    }
999
    selector = (env->star >> 48) & 0xffff;
1000
#ifdef TARGET_X86_64
1001
    if (env->hflags & HF_LMA_MASK) {
1002
        if (dflag == 2) {
1003
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 
1004
                                   0, 0xffffffff, 
1005
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1006
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1007
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
1008
                                   DESC_L_MASK);
1009
            env->eip = ECX;
1010
        } else {
1011
            cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1012
                                   0, 0xffffffff, 
1013
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1014
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1015
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1016
            env->eip = (uint32_t)ECX;
1017
        }
1018
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1019
                               0, 0xffffffff,
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1022
                               DESC_W_MASK | DESC_A_MASK);
1023
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 
1024
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1025
        cpu_x86_set_cpl(env, 3);
1026
    } else 
1027
#endif
1028
    {
1029
        cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1030
                               0, 0xffffffff, 
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1033
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034
        env->eip = (uint32_t)ECX;
1035
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1036
                               0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1039
                               DESC_W_MASK | DESC_A_MASK);
1040
        env->eflags |= IF_MASK;
1041
        cpu_x86_set_cpl(env, 3);
1042
    }
1043
#ifdef USE_KQEMU
1044
    if (kqemu_is_ok(env)) {
1045
        if (env->hflags & HF_LMA_MASK)
1046
            CC_OP = CC_OP_EFLAGS;
1047
        env->exception_index = -1;
1048
        cpu_loop_exit();
1049
    }
1050
#endif
1051
}
1052

    
1053
/* real mode interrupt */
1054
static void do_interrupt_real(int intno, int is_int, int error_code,
1055
                              unsigned int next_eip)
1056
{
1057
    SegmentCache *dt;
1058
    target_ulong ptr, ssp;
1059
    int selector;
1060
    uint32_t offset, esp;
1061
    uint32_t old_cs, old_eip;
1062

    
1063
    /* real mode (simpler !) */
1064
    dt = &env->idt;
1065
    if (intno * 4 + 3 > dt->limit)
1066
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1067
    ptr = dt->base + intno * 4;
1068
    offset = lduw_kernel(ptr);
1069
    selector = lduw_kernel(ptr + 2);
1070
    esp = ESP;
1071
    ssp = env->segs[R_SS].base;
1072
    if (is_int)
1073
        old_eip = next_eip;
1074
    else
1075
        old_eip = env->eip;
1076
    old_cs = env->segs[R_CS].selector;
1077
    /* XXX: use SS segment size ? */
1078
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1079
    PUSHW(ssp, esp, 0xffff, old_cs);
1080
    PUSHW(ssp, esp, 0xffff, old_eip);
1081
    
1082
    /* update processor state */
1083
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1084
    env->eip = offset;
1085
    env->segs[R_CS].selector = selector;
1086
    env->segs[R_CS].base = (selector << 4);
1087
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1088
}
1089

    
1090
/* fake user mode interrupt */
1091
void do_interrupt_user(int intno, int is_int, int error_code, 
1092
                       target_ulong next_eip)
1093
{
1094
    SegmentCache *dt;
1095
    target_ulong ptr;
1096
    int dpl, cpl;
1097
    uint32_t e2;
1098

    
1099
    dt = &env->idt;
1100
    ptr = dt->base + (intno * 8);
1101
    e2 = ldl_kernel(ptr + 4);
1102
    
1103
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1104
    cpl = env->hflags & HF_CPL_MASK;
1105
    /* check privledge if software int */
1106
    if (is_int && dpl < cpl)
1107
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1108

    
1109
    /* Since we emulate only user space, we cannot do more than
1110
       exiting the emulation with the suitable exception and error
1111
       code */
1112
    if (is_int)
1113
        EIP = next_eip;
1114
}
1115

    
1116
/*
1117
 * Begin execution of an interruption. is_int is TRUE if coming from
1118
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1119
 * instruction. It is only relevant if is_int is TRUE.  
1120
 */
1121
void do_interrupt(int intno, int is_int, int error_code, 
1122
                  target_ulong next_eip, int is_hw)
1123
{
1124
#ifdef DEBUG_PCALL
1125
    if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
1126
        if ((env->cr[0] & CR0_PE_MASK)) {
1127
            static int count;
1128
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1129
                    count, intno, error_code, is_int,
1130
                    env->hflags & HF_CPL_MASK,
1131
                    env->segs[R_CS].selector, EIP,
1132
                    (int)env->segs[R_CS].base + EIP,
1133
                    env->segs[R_SS].selector, ESP);
1134
            if (intno == 0x0e) {
1135
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1136
            } else {
1137
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1138
            }
1139
            fprintf(logfile, "\n");
1140
#if 0
1141
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1142
            {
1143
                int i;
1144
                uint8_t *ptr;
1145
                fprintf(logfile, "       code=");
1146
                ptr = env->segs[R_CS].base + env->eip;
1147
                for(i = 0; i < 16; i++) {
1148
                    fprintf(logfile, " %02x", ldub(ptr + i));
1149
                }
1150
                fprintf(logfile, "\n");
1151
            }
1152
#endif
1153
            count++;
1154
        }
1155
    }
1156
#endif
1157
    if (env->cr[0] & CR0_PE_MASK) {
1158
#if TARGET_X86_64
1159
        if (env->hflags & HF_LMA_MASK) {
1160
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1161
        } else
1162
#endif
1163
        {
1164
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1165
        }
1166
    } else {
1167
        do_interrupt_real(intno, is_int, error_code, next_eip);
1168
    }
1169
}
1170

    
1171
/*
1172
 * Signal an interruption. It is executed in the main CPU loop.
1173
 * is_int is TRUE if coming from the int instruction. next_eip is the
1174
 * EIP value AFTER the interrupt instruction. It is only relevant if
1175
 * is_int is TRUE.  
1176
 */
1177
void raise_interrupt(int intno, int is_int, int error_code, 
1178
                     int next_eip_addend)
1179
{
1180
    env->exception_index = intno;
1181
    env->error_code = error_code;
1182
    env->exception_is_int = is_int;
1183
    env->exception_next_eip = env->eip + next_eip_addend;
1184
    cpu_loop_exit();
1185
}
1186

    
1187
/* same as raise_exception_err, but do not restore global registers */
1188
static void raise_exception_err_norestore(int exception_index, int error_code)
1189
{
1190
    env->exception_index = exception_index;
1191
    env->error_code = error_code;
1192
    env->exception_is_int = 0;
1193
    env->exception_next_eip = 0;
1194
    longjmp(env->jmp_env, 1);
1195
}
1196

    
1197
/* shortcuts to generate exceptions */
1198

    
1199
void (raise_exception_err)(int exception_index, int error_code)
1200
{
1201
    raise_interrupt(exception_index, 0, error_code, 0);
1202
}
1203

    
1204
void raise_exception(int exception_index)
1205
{
1206
    raise_interrupt(exception_index, 0, 0, 0);
1207
}
1208

    
1209
#ifdef BUGGY_GCC_DIV64
1210
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1211
   call it from another function */
1212
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1213
{
1214
    *q_ptr = num / den;
1215
    return num % den;
1216
}
1217

    
1218
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1219
{
1220
    *q_ptr = num / den;
1221
    return num % den;
1222
}
1223
#endif
1224

    
1225
void helper_divl_EAX_T0(void)
1226
{
1227
    unsigned int den, r;
1228
    uint64_t num, q;
1229
    
1230
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1231
    den = T0;
1232
    if (den == 0) {
1233
        raise_exception(EXCP00_DIVZ);
1234
    }
1235
#ifdef BUGGY_GCC_DIV64
1236
    r = div32(&q, num, den);
1237
#else
1238
    q = (num / den);
1239
    r = (num % den);
1240
#endif
1241
    if (q > 0xffffffff)
1242
        raise_exception(EXCP00_DIVZ);
1243
    EAX = (uint32_t)q;
1244
    EDX = (uint32_t)r;
1245
}
1246

    
1247
void helper_idivl_EAX_T0(void)
1248
{
1249
    int den, r;
1250
    int64_t num, q;
1251
    
1252
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1253
    den = T0;
1254
    if (den == 0) {
1255
        raise_exception(EXCP00_DIVZ);
1256
    }
1257
#ifdef BUGGY_GCC_DIV64
1258
    r = idiv32(&q, num, den);
1259
#else
1260
    q = (num / den);
1261
    r = (num % den);
1262
#endif
1263
    if (q != (int32_t)q)
1264
        raise_exception(EXCP00_DIVZ);
1265
    EAX = (uint32_t)q;
1266
    EDX = (uint32_t)r;
1267
}
1268

    
1269
void helper_cmpxchg8b(void)
1270
{
1271
    uint64_t d;
1272
    int eflags;
1273

    
1274
    eflags = cc_table[CC_OP].compute_all();
1275
    d = ldq(A0);
1276
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1277
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1278
        eflags |= CC_Z;
1279
    } else {
1280
        EDX = d >> 32;
1281
        EAX = d;
1282
        eflags &= ~CC_Z;
1283
    }
1284
    CC_SRC = eflags;
1285
}
1286

    
1287
void helper_cpuid(void)
1288
{
1289
    uint32_t index;
1290
    index = (uint32_t)EAX;
1291
    
1292
    /* test if maximum index reached */
1293
    if (index & 0x80000000) {
1294
        if (index > env->cpuid_xlevel) 
1295
            index = env->cpuid_level;
1296
    } else {
1297
        if (index > env->cpuid_level) 
1298
            index = env->cpuid_level;
1299
    }
1300
        
1301
    switch(index) {
1302
    case 0:
1303
        EAX = env->cpuid_level;
1304
        EBX = env->cpuid_vendor1;
1305
        EDX = env->cpuid_vendor2;
1306
        ECX = env->cpuid_vendor3;
1307
        break;
1308
    case 1:
1309
        EAX = env->cpuid_version;
1310
        EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1311
        ECX = env->cpuid_ext_features;
1312
        EDX = env->cpuid_features;
1313
        break;
1314
    case 2:
1315
        /* cache info: needed for Pentium Pro compatibility */
1316
        EAX = 0x410601;
1317
        EBX = 0;
1318
        ECX = 0;
1319
        EDX = 0;
1320
        break;
1321
    case 0x80000000:
1322
        EAX = env->cpuid_xlevel;
1323
        EBX = env->cpuid_vendor1;
1324
        EDX = env->cpuid_vendor2;
1325
        ECX = env->cpuid_vendor3;
1326
        break;
1327
    case 0x80000001:
1328
        EAX = env->cpuid_features;
1329
        EBX = 0;
1330
        ECX = 0;
1331
        EDX = env->cpuid_ext2_features;
1332
        break;
1333
    case 0x80000002:
1334
    case 0x80000003:
1335
    case 0x80000004:
1336
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1337
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1338
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1339
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1340
        break;
1341
    case 0x80000005:
1342
        /* cache info (L1 cache) */
1343
        EAX = 0x01ff01ff;
1344
        EBX = 0x01ff01ff;
1345
        ECX = 0x40020140;
1346
        EDX = 0x40020140;
1347
        break;
1348
    case 0x80000006:
1349
        /* cache info (L2 cache) */
1350
        EAX = 0;
1351
        EBX = 0x42004200;
1352
        ECX = 0x02008140;
1353
        EDX = 0;
1354
        break;
1355
    case 0x80000008:
1356
        /* virtual & phys address size in low 2 bytes. */
1357
        EAX = 0x00003028;
1358
        EBX = 0;
1359
        ECX = 0;
1360
        EDX = 0;
1361
        break;
1362
    default:
1363
        /* reserved values: zero */
1364
        EAX = 0;
1365
        EBX = 0;
1366
        ECX = 0;
1367
        EDX = 0;
1368
        break;
1369
    }
1370
}
1371

    
1372
void helper_enter_level(int level, int data32)
1373
{
1374
    target_ulong ssp;
1375
    uint32_t esp_mask, esp, ebp;
1376

    
1377
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1378
    ssp = env->segs[R_SS].base;
1379
    ebp = EBP;
1380
    esp = ESP;
1381
    if (data32) {
1382
        /* 32 bit */
1383
        esp -= 4;
1384
        while (--level) {
1385
            esp -= 4;
1386
            ebp -= 4;
1387
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1388
        }
1389
        esp -= 4;
1390
        stl(ssp + (esp & esp_mask), T1);
1391
    } else {
1392
        /* 16 bit */
1393
        esp -= 2;
1394
        while (--level) {
1395
            esp -= 2;
1396
            ebp -= 2;
1397
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1398
        }
1399
        esp -= 2;
1400
        stw(ssp + (esp & esp_mask), T1);
1401
    }
1402
}
1403

    
1404
#ifdef TARGET_X86_64
1405
void helper_enter64_level(int level, int data64)
1406
{
1407
    target_ulong esp, ebp;
1408
    ebp = EBP;
1409
    esp = ESP;
1410

    
1411
    if (data64) {
1412
        /* 64 bit */
1413
        esp -= 8;
1414
        while (--level) {
1415
            esp -= 8;
1416
            ebp -= 8;
1417
            stq(esp, ldq(ebp));
1418
        }
1419
        esp -= 8;
1420
        stq(esp, T1);
1421
    } else {
1422
        /* 16 bit */
1423
        esp -= 2;
1424
        while (--level) {
1425
            esp -= 2;
1426
            ebp -= 2;
1427
            stw(esp, lduw(ebp));
1428
        }
1429
        esp -= 2;
1430
        stw(esp, T1);
1431
    }
1432
}
1433
#endif
1434

    
1435
void helper_lldt_T0(void)
1436
{
1437
    int selector;
1438
    SegmentCache *dt;
1439
    uint32_t e1, e2;
1440
    int index, entry_limit;
1441
    target_ulong ptr;
1442
    
1443
    selector = T0 & 0xffff;
1444
    if ((selector & 0xfffc) == 0) {
1445
        /* XXX: NULL selector case: invalid LDT */
1446
        env->ldt.base = 0;
1447
        env->ldt.limit = 0;
1448
    } else {
1449
        if (selector & 0x4)
1450
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1451
        dt = &env->gdt;
1452
        index = selector & ~7;
1453
#ifdef TARGET_X86_64
1454
        if (env->hflags & HF_LMA_MASK)
1455
            entry_limit = 15;
1456
        else
1457
#endif            
1458
            entry_limit = 7;
1459
        if ((index + entry_limit) > dt->limit)
1460
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1461
        ptr = dt->base + index;
1462
        e1 = ldl_kernel(ptr);
1463
        e2 = ldl_kernel(ptr + 4);
1464
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1465
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1466
        if (!(e2 & DESC_P_MASK))
1467
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1468
#ifdef TARGET_X86_64
1469
        if (env->hflags & HF_LMA_MASK) {
1470
            uint32_t e3;
1471
            e3 = ldl_kernel(ptr + 8);
1472
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1473
            env->ldt.base |= (target_ulong)e3 << 32;
1474
        } else
1475
#endif
1476
        {
1477
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1478
        }
1479
    }
1480
    env->ldt.selector = selector;
1481
}
1482

    
1483
void helper_ltr_T0(void)
1484
{
1485
    int selector;
1486
    SegmentCache *dt;
1487
    uint32_t e1, e2;
1488
    int index, type, entry_limit;
1489
    target_ulong ptr;
1490
    
1491
    selector = T0 & 0xffff;
1492
    if ((selector & 0xfffc) == 0) {
1493
        /* NULL selector case: invalid TR */
1494
        env->tr.base = 0;
1495
        env->tr.limit = 0;
1496
        env->tr.flags = 0;
1497
    } else {
1498
        if (selector & 0x4)
1499
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1500
        dt = &env->gdt;
1501
        index = selector & ~7;
1502
#ifdef TARGET_X86_64
1503
        if (env->hflags & HF_LMA_MASK)
1504
            entry_limit = 15;
1505
        else
1506
#endif            
1507
            entry_limit = 7;
1508
        if ((index + entry_limit) > dt->limit)
1509
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1510
        ptr = dt->base + index;
1511
        e1 = ldl_kernel(ptr);
1512
        e2 = ldl_kernel(ptr + 4);
1513
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1514
        if ((e2 & DESC_S_MASK) || 
1515
            (type != 1 && type != 9))
1516
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1517
        if (!(e2 & DESC_P_MASK))
1518
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1519
#ifdef TARGET_X86_64
1520
        if (env->hflags & HF_LMA_MASK) {
1521
            uint32_t e3;
1522
            e3 = ldl_kernel(ptr + 8);
1523
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1524
            env->tr.base |= (target_ulong)e3 << 32;
1525
        } else 
1526
#endif
1527
        {
1528
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1529
        }
1530
        e2 |= DESC_TSS_BUSY_MASK;
1531
        stl_kernel(ptr + 4, e2);
1532
    }
1533
    env->tr.selector = selector;
1534
}
1535

    
1536
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1537
void load_seg(int seg_reg, int selector)
1538
{
1539
    uint32_t e1, e2;
1540
    int cpl, dpl, rpl;
1541
    SegmentCache *dt;
1542
    int index;
1543
    target_ulong ptr;
1544

    
1545
    selector &= 0xffff;
1546
    cpl = env->hflags & HF_CPL_MASK;
1547
    if ((selector & 0xfffc) == 0) {
1548
        /* null selector case */
1549
        if (seg_reg == R_SS
1550
#ifdef TARGET_X86_64
1551
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1552
#endif
1553
            )
1554
            raise_exception_err(EXCP0D_GPF, 0);
1555
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1556
    } else {
1557
        
1558
        if (selector & 0x4)
1559
            dt = &env->ldt;
1560
        else
1561
            dt = &env->gdt;
1562
        index = selector & ~7;
1563
        if ((index + 7) > dt->limit)
1564
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1565
        ptr = dt->base + index;
1566
        e1 = ldl_kernel(ptr);
1567
        e2 = ldl_kernel(ptr + 4);
1568
        
1569
        if (!(e2 & DESC_S_MASK))
1570
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1571
        rpl = selector & 3;
1572
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1573
        if (seg_reg == R_SS) {
1574
            /* must be writable segment */
1575
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1576
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1577
            if (rpl != cpl || dpl != cpl)
1578
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1579
        } else {
1580
            /* must be readable segment */
1581
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1582
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1583
            
1584
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1585
                /* if not conforming code, test rights */
1586
                if (dpl < cpl || dpl < rpl)
1587
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1588
            }
1589
        }
1590

    
1591
        if (!(e2 & DESC_P_MASK)) {
1592
            if (seg_reg == R_SS)
1593
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1594
            else
1595
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1596
        }
1597

    
1598
        /* set the access bit if not already set */
1599
        if (!(e2 & DESC_A_MASK)) {
1600
            e2 |= DESC_A_MASK;
1601
            stl_kernel(ptr + 4, e2);
1602
        }
1603

    
1604
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1605
                       get_seg_base(e1, e2),
1606
                       get_seg_limit(e1, e2),
1607
                       e2);
1608
#if 0
1609
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1610
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1611
#endif
1612
    }
1613
}
1614

    
1615
/* protected mode jump */
1616
void helper_ljmp_protected_T0_T1(int next_eip_addend)
1617
{
1618
    int new_cs, gate_cs, type;
1619
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1620
    target_ulong new_eip, next_eip;
1621
    
1622
    new_cs = T0;
1623
    new_eip = T1;
1624
    if ((new_cs & 0xfffc) == 0)
1625
        raise_exception_err(EXCP0D_GPF, 0);
1626
    if (load_segment(&e1, &e2, new_cs) != 0)
1627
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1628
    cpl = env->hflags & HF_CPL_MASK;
1629
    if (e2 & DESC_S_MASK) {
1630
        if (!(e2 & DESC_CS_MASK))
1631
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1632
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1633
        if (e2 & DESC_C_MASK) {
1634
            /* conforming code segment */
1635
            if (dpl > cpl)
1636
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1637
        } else {
1638
            /* non conforming code segment */
1639
            rpl = new_cs & 3;
1640
            if (rpl > cpl)
1641
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1642
            if (dpl != cpl)
1643
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1644
        }
1645
        if (!(e2 & DESC_P_MASK))
1646
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1647
        limit = get_seg_limit(e1, e2);
1648
        if (new_eip > limit && 
1649
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
1650
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1651
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1652
                       get_seg_base(e1, e2), limit, e2);
1653
        EIP = new_eip;
1654
    } else {
1655
        /* jump to call or task gate */
1656
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1657
        rpl = new_cs & 3;
1658
        cpl = env->hflags & HF_CPL_MASK;
1659
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1660
        switch(type) {
1661
        case 1: /* 286 TSS */
1662
        case 9: /* 386 TSS */
1663
        case 5: /* task gate */
1664
            if (dpl < cpl || dpl < rpl)
1665
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1666
            next_eip = env->eip + next_eip_addend;
1667
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1668
            break;
1669
        case 4: /* 286 call gate */
1670
        case 12: /* 386 call gate */
1671
            if ((dpl < cpl) || (dpl < rpl))
1672
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1673
            if (!(e2 & DESC_P_MASK))
1674
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1675
            gate_cs = e1 >> 16;
1676
            new_eip = (e1 & 0xffff);
1677
            if (type == 12)
1678
                new_eip |= (e2 & 0xffff0000);
1679
            if (load_segment(&e1, &e2, gate_cs) != 0)
1680
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1681
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1682
            /* must be code segment */
1683
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
1684
                 (DESC_S_MASK | DESC_CS_MASK)))
1685
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1686
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 
1687
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1688
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1689
            if (!(e2 & DESC_P_MASK))
1690
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1691
            limit = get_seg_limit(e1, e2);
1692
            if (new_eip > limit)
1693
                raise_exception_err(EXCP0D_GPF, 0);
1694
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1695
                                   get_seg_base(e1, e2), limit, e2);
1696
            EIP = new_eip;
1697
            break;
1698
        default:
1699
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1700
            break;
1701
        }
1702
    }
1703
}
1704

    
1705
/* real mode call */
1706
void helper_lcall_real_T0_T1(int shift, int next_eip)
1707
{
1708
    int new_cs, new_eip;
1709
    uint32_t esp, esp_mask;
1710
    target_ulong ssp;
1711

    
1712
    new_cs = T0;
1713
    new_eip = T1;
1714
    esp = ESP;
1715
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1716
    ssp = env->segs[R_SS].base;
1717
    if (shift) {
1718
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1719
        PUSHL(ssp, esp, esp_mask, next_eip);
1720
    } else {
1721
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1722
        PUSHW(ssp, esp, esp_mask, next_eip);
1723
    }
1724

    
1725
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1726
    env->eip = new_eip;
1727
    env->segs[R_CS].selector = new_cs;
1728
    env->segs[R_CS].base = (new_cs << 4);
1729
}
1730

    
1731
/* protected mode call */
1732
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
1733
{
1734
    int new_cs, new_eip, new_stack, i;
1735
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1736
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1737
    uint32_t val, limit, old_sp_mask;
1738
    target_ulong ssp, old_ssp, next_eip;
1739
    
1740
    new_cs = T0;
1741
    new_eip = T1;
1742
    next_eip = env->eip + next_eip_addend;
1743
#ifdef DEBUG_PCALL
1744
    if (loglevel & CPU_LOG_PCALL) {
1745
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
1746
                new_cs, new_eip, shift);
1747
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1748
    }
1749
#endif
1750
    if ((new_cs & 0xfffc) == 0)
1751
        raise_exception_err(EXCP0D_GPF, 0);
1752
    if (load_segment(&e1, &e2, new_cs) != 0)
1753
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1754
    cpl = env->hflags & HF_CPL_MASK;
1755
#ifdef DEBUG_PCALL
1756
    if (loglevel & CPU_LOG_PCALL) {
1757
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1758
    }
1759
#endif
1760
    if (e2 & DESC_S_MASK) {
1761
        if (!(e2 & DESC_CS_MASK))
1762
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1763
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1764
        if (e2 & DESC_C_MASK) {
1765
            /* conforming code segment */
1766
            if (dpl > cpl)
1767
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1768
        } else {
1769
            /* non conforming code segment */
1770
            rpl = new_cs & 3;
1771
            if (rpl > cpl)
1772
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1773
            if (dpl != cpl)
1774
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1775
        }
1776
        if (!(e2 & DESC_P_MASK))
1777
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1778

    
1779
#ifdef TARGET_X86_64
1780
        /* XXX: check 16/32 bit cases in long mode */
1781
        if (shift == 2) {
1782
            target_ulong rsp;
1783
            /* 64 bit case */
1784
            rsp = ESP;
1785
            PUSHQ(rsp, env->segs[R_CS].selector);
1786
            PUSHQ(rsp, next_eip);
1787
            /* from this point, not restartable */
1788
            ESP = rsp;
1789
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1790
                                   get_seg_base(e1, e2), 
1791
                                   get_seg_limit(e1, e2), e2);
1792
            EIP = new_eip;
1793
        } else 
1794
#endif
1795
        {
1796
            sp = ESP;
1797
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1798
            ssp = env->segs[R_SS].base;
1799
            if (shift) {
1800
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1801
                PUSHL(ssp, sp, sp_mask, next_eip);
1802
            } else {
1803
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1804
                PUSHW(ssp, sp, sp_mask, next_eip);
1805
            }
1806
            
1807
            limit = get_seg_limit(e1, e2);
1808
            if (new_eip > limit)
1809
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1810
            /* from this point, not restartable */
1811
            ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1812
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1813
                                   get_seg_base(e1, e2), limit, e2);
1814
            EIP = new_eip;
1815
        }
1816
    } else {
1817
        /* check gate type */
1818
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1819
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1820
        rpl = new_cs & 3;
1821
        switch(type) {
1822
        case 1: /* available 286 TSS */
1823
        case 9: /* available 386 TSS */
1824
        case 5: /* task gate */
1825
            if (dpl < cpl || dpl < rpl)
1826
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1827
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1828
            return;
1829
        case 4: /* 286 call gate */
1830
        case 12: /* 386 call gate */
1831
            break;
1832
        default:
1833
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1834
            break;
1835
        }
1836
        shift = type >> 3;
1837

    
1838
        if (dpl < cpl || dpl < rpl)
1839
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1840
        /* check valid bit */
1841
        if (!(e2 & DESC_P_MASK))
1842
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
1843
        selector = e1 >> 16;
1844
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1845
        param_count = e2 & 0x1f;
1846
        if ((selector & 0xfffc) == 0)
1847
            raise_exception_err(EXCP0D_GPF, 0);
1848

    
1849
        if (load_segment(&e1, &e2, selector) != 0)
1850
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1851
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1852
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1853
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1854
        if (dpl > cpl)
1855
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1856
        if (!(e2 & DESC_P_MASK))
1857
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1858

    
1859
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1860
            /* to inner priviledge */
1861
            get_ss_esp_from_tss(&ss, &sp, dpl);
1862
#ifdef DEBUG_PCALL
1863
            if (loglevel & CPU_LOG_PCALL)
1864
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 
1865
                        ss, sp, param_count, ESP);
1866
#endif
1867
            if ((ss & 0xfffc) == 0)
1868
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1869
            if ((ss & 3) != dpl)
1870
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1871
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1872
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1873
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1874
            if (ss_dpl != dpl)
1875
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1876
            if (!(ss_e2 & DESC_S_MASK) ||
1877
                (ss_e2 & DESC_CS_MASK) ||
1878
                !(ss_e2 & DESC_W_MASK))
1879
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1880
            if (!(ss_e2 & DESC_P_MASK))
1881
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1882
            
1883
            //            push_size = ((param_count * 2) + 8) << shift;
1884

    
1885
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1886
            old_ssp = env->segs[R_SS].base;
1887
            
1888
            sp_mask = get_sp_mask(ss_e2);
1889
            ssp = get_seg_base(ss_e1, ss_e2);
1890
            if (shift) {
1891
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1892
                PUSHL(ssp, sp, sp_mask, ESP);
1893
                for(i = param_count - 1; i >= 0; i--) {
1894
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1895
                    PUSHL(ssp, sp, sp_mask, val);
1896
                }
1897
            } else {
1898
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1899
                PUSHW(ssp, sp, sp_mask, ESP);
1900
                for(i = param_count - 1; i >= 0; i--) {
1901
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1902
                    PUSHW(ssp, sp, sp_mask, val);
1903
                }
1904
            }
1905
            new_stack = 1;
1906
        } else {
1907
            /* to same priviledge */
1908
            sp = ESP;
1909
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1910
            ssp = env->segs[R_SS].base;
1911
            //            push_size = (4 << shift);
1912
            new_stack = 0;
1913
        }
1914

    
1915
        if (shift) {
1916
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1917
            PUSHL(ssp, sp, sp_mask, next_eip);
1918
        } else {
1919
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1920
            PUSHW(ssp, sp, sp_mask, next_eip);
1921
        }
1922

    
1923
        /* from this point, not restartable */
1924

    
1925
        if (new_stack) {
1926
            ss = (ss & ~3) | dpl;
1927
            cpu_x86_load_seg_cache(env, R_SS, ss, 
1928
                                   ssp,
1929
                                   get_seg_limit(ss_e1, ss_e2),
1930
                                   ss_e2);
1931
        }
1932

    
1933
        selector = (selector & ~3) | dpl;
1934
        cpu_x86_load_seg_cache(env, R_CS, selector, 
1935
                       get_seg_base(e1, e2),
1936
                       get_seg_limit(e1, e2),
1937
                       e2);
1938
        cpu_x86_set_cpl(env, dpl);
1939
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1940
        EIP = offset;
1941
    }
1942
#ifdef USE_KQEMU
1943
    if (kqemu_is_ok(env)) {
1944
        env->exception_index = -1;
1945
        cpu_loop_exit();
1946
    }
1947
#endif
1948
}
1949

    
1950
/* real and vm86 mode iret */
1951
void helper_iret_real(int shift)
1952
{
1953
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1954
    target_ulong ssp;
1955
    int eflags_mask;
1956

    
1957
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1958
    sp = ESP;
1959
    ssp = env->segs[R_SS].base;
1960
    if (shift == 1) {
1961
        /* 32 bits */
1962
        POPL(ssp, sp, sp_mask, new_eip);
1963
        POPL(ssp, sp, sp_mask, new_cs);
1964
        new_cs &= 0xffff;
1965
        POPL(ssp, sp, sp_mask, new_eflags);
1966
    } else {
1967
        /* 16 bits */
1968
        POPW(ssp, sp, sp_mask, new_eip);
1969
        POPW(ssp, sp, sp_mask, new_cs);
1970
        POPW(ssp, sp, sp_mask, new_eflags);
1971
    }
1972
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1973
    load_seg_vm(R_CS, new_cs);
1974
    env->eip = new_eip;
1975
    if (env->eflags & VM_MASK)
1976
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
1977
    else
1978
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
1979
    if (shift == 0)
1980
        eflags_mask &= 0xffff;
1981
    load_eflags(new_eflags, eflags_mask);
1982
}
1983

    
1984
static inline void validate_seg(int seg_reg, int cpl)
1985
{
1986
    int dpl;
1987
    uint32_t e2;
1988
    
1989
    e2 = env->segs[seg_reg].flags;
1990
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1991
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1992
        /* data or non conforming code segment */
1993
        if (dpl < cpl) {
1994
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1995
        }
1996
    }
1997
}
1998

    
1999
/* protected mode iret */
2000
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2001
{
2002
    uint32_t new_cs, new_eflags, new_ss;
2003
    uint32_t new_es, new_ds, new_fs, new_gs;
2004
    uint32_t e1, e2, ss_e1, ss_e2;
2005
    int cpl, dpl, rpl, eflags_mask, iopl;
2006
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2007
    
2008
#ifdef TARGET_X86_64
2009
    if (shift == 2)
2010
        sp_mask = -1;
2011
    else
2012
#endif
2013
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2014
    sp = ESP;
2015
    ssp = env->segs[R_SS].base;
2016
    new_eflags = 0; /* avoid warning */
2017
#ifdef TARGET_X86_64
2018
    if (shift == 2) {
2019
        POPQ(sp, new_eip);
2020
        POPQ(sp, new_cs);
2021
        new_cs &= 0xffff;
2022
        if (is_iret) {
2023
            POPQ(sp, new_eflags);
2024
        }
2025
    } else
2026
#endif
2027
    if (shift == 1) {
2028
        /* 32 bits */
2029
        POPL(ssp, sp, sp_mask, new_eip);
2030
        POPL(ssp, sp, sp_mask, new_cs);
2031
        new_cs &= 0xffff;
2032
        if (is_iret) {
2033
            POPL(ssp, sp, sp_mask, new_eflags);
2034
            if (new_eflags & VM_MASK)
2035
                goto return_to_vm86;
2036
        }
2037
    } else {
2038
        /* 16 bits */
2039
        POPW(ssp, sp, sp_mask, new_eip);
2040
        POPW(ssp, sp, sp_mask, new_cs);
2041
        if (is_iret)
2042
            POPW(ssp, sp, sp_mask, new_eflags);
2043
    }
2044
#ifdef DEBUG_PCALL
2045
    if (loglevel & CPU_LOG_PCALL) {
2046
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2047
                new_cs, new_eip, shift, addend);
2048
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2049
    }
2050
#endif
2051
    if ((new_cs & 0xfffc) == 0)
2052
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2053
    if (load_segment(&e1, &e2, new_cs) != 0)
2054
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2055
    if (!(e2 & DESC_S_MASK) ||
2056
        !(e2 & DESC_CS_MASK))
2057
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2058
    cpl = env->hflags & HF_CPL_MASK;
2059
    rpl = new_cs & 3; 
2060
    if (rpl < cpl)
2061
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2062
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2063
    if (e2 & DESC_C_MASK) {
2064
        if (dpl > rpl)
2065
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2066
    } else {
2067
        if (dpl != rpl)
2068
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2069
    }
2070
    if (!(e2 & DESC_P_MASK))
2071
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2072
    
2073
    sp += addend;
2074
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 
2075
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2076
        /* return to same priledge level */
2077
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2078
                       get_seg_base(e1, e2),
2079
                       get_seg_limit(e1, e2),
2080
                       e2);
2081
    } else {
2082
        /* return to different priviledge level */
2083
#ifdef TARGET_X86_64
2084
        if (shift == 2) {
2085
            POPQ(sp, new_esp);
2086
            POPQ(sp, new_ss);
2087
            new_ss &= 0xffff;
2088
        } else
2089
#endif
2090
        if (shift == 1) {
2091
            /* 32 bits */
2092
            POPL(ssp, sp, sp_mask, new_esp);
2093
            POPL(ssp, sp, sp_mask, new_ss);
2094
            new_ss &= 0xffff;
2095
        } else {
2096
            /* 16 bits */
2097
            POPW(ssp, sp, sp_mask, new_esp);
2098
            POPW(ssp, sp, sp_mask, new_ss);
2099
        }
2100
#ifdef DEBUG_PCALL
2101
        if (loglevel & CPU_LOG_PCALL) {
2102
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2103
                    new_ss, new_esp);
2104
        }
2105
#endif
2106
        if ((new_ss & 0xfffc) == 0) {
2107
#ifdef TARGET_X86_64
2108
            /* NULL ss is allowed in long mode if cpl != 3*/
2109
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2110
                cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2111
                                       0, 0xffffffff,
2112
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2113
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2114
                                       DESC_W_MASK | DESC_A_MASK);
2115
            } else 
2116
#endif
2117
            {
2118
                raise_exception_err(EXCP0D_GPF, 0);
2119
            }
2120
        } else {
2121
            if ((new_ss & 3) != rpl)
2122
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2123
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2124
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2125
            if (!(ss_e2 & DESC_S_MASK) ||
2126
                (ss_e2 & DESC_CS_MASK) ||
2127
                !(ss_e2 & DESC_W_MASK))
2128
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2129
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2130
            if (dpl != rpl)
2131
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2132
            if (!(ss_e2 & DESC_P_MASK))
2133
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2134
            cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2135
                                   get_seg_base(ss_e1, ss_e2),
2136
                                   get_seg_limit(ss_e1, ss_e2),
2137
                                   ss_e2);
2138
        }
2139

    
2140
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2141
                       get_seg_base(e1, e2),
2142
                       get_seg_limit(e1, e2),
2143
                       e2);
2144
        cpu_x86_set_cpl(env, rpl);
2145
        sp = new_esp;
2146
#ifdef TARGET_X86_64
2147
        if (env->hflags & HF_CS64_MASK)
2148
            sp_mask = -1;
2149
        else
2150
#endif
2151
            sp_mask = get_sp_mask(ss_e2);
2152

    
2153
        /* validate data segments */
2154
        validate_seg(R_ES, cpl);
2155
        validate_seg(R_DS, cpl);
2156
        validate_seg(R_FS, cpl);
2157
        validate_seg(R_GS, cpl);
2158

    
2159
        sp += addend;
2160
    }
2161
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2162
    env->eip = new_eip;
2163
    if (is_iret) {
2164
        /* NOTE: 'cpl' is the _old_ CPL */
2165
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2166
        if (cpl == 0)
2167
            eflags_mask |= IOPL_MASK;
2168
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2169
        if (cpl <= iopl)
2170
            eflags_mask |= IF_MASK;
2171
        if (shift == 0)
2172
            eflags_mask &= 0xffff;
2173
        load_eflags(new_eflags, eflags_mask);
2174
    }
2175
    return;
2176

    
2177
 return_to_vm86:
2178
    POPL(ssp, sp, sp_mask, new_esp);
2179
    POPL(ssp, sp, sp_mask, new_ss);
2180
    POPL(ssp, sp, sp_mask, new_es);
2181
    POPL(ssp, sp, sp_mask, new_ds);
2182
    POPL(ssp, sp, sp_mask, new_fs);
2183
    POPL(ssp, sp, sp_mask, new_gs);
2184
    
2185
    /* modify processor state */
2186
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
2187
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2188
    load_seg_vm(R_CS, new_cs & 0xffff);
2189
    cpu_x86_set_cpl(env, 3);
2190
    load_seg_vm(R_SS, new_ss & 0xffff);
2191
    load_seg_vm(R_ES, new_es & 0xffff);
2192
    load_seg_vm(R_DS, new_ds & 0xffff);
2193
    load_seg_vm(R_FS, new_fs & 0xffff);
2194
    load_seg_vm(R_GS, new_gs & 0xffff);
2195

    
2196
    env->eip = new_eip & 0xffff;
2197
    ESP = new_esp;
2198
}
2199

    
2200
void helper_iret_protected(int shift, int next_eip)
2201
{
2202
    int tss_selector, type;
2203
    uint32_t e1, e2;
2204
    
2205
    /* specific case for TSS */
2206
    if (env->eflags & NT_MASK) {
2207
#ifdef TARGET_X86_64
2208
        if (env->hflags & HF_LMA_MASK)
2209
            raise_exception_err(EXCP0D_GPF, 0);
2210
#endif
2211
        tss_selector = lduw_kernel(env->tr.base + 0);
2212
        if (tss_selector & 4)
2213
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2214
        if (load_segment(&e1, &e2, tss_selector) != 0)
2215
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2216
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2217
        /* NOTE: we check both segment and busy TSS */
2218
        if (type != 3)
2219
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2220
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2221
    } else {
2222
        helper_ret_protected(shift, 1, 0);
2223
    }
2224
#ifdef USE_KQEMU
2225
    if (kqemu_is_ok(env)) {
2226
        CC_OP = CC_OP_EFLAGS;
2227
        env->exception_index = -1;
2228
        cpu_loop_exit();
2229
    }
2230
#endif
2231
}
2232

    
2233
void helper_lret_protected(int shift, int addend)
2234
{
2235
    helper_ret_protected(shift, 0, addend);
2236
#ifdef USE_KQEMU
2237
    if (kqemu_is_ok(env)) {
2238
        env->exception_index = -1;
2239
        cpu_loop_exit();
2240
    }
2241
#endif
2242
}
2243

    
2244
void helper_sysenter(void)
2245
{
2246
    if (env->sysenter_cs == 0) {
2247
        raise_exception_err(EXCP0D_GPF, 0);
2248
    }
2249
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2250
    cpu_x86_set_cpl(env, 0);
2251
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
2252
                           0, 0xffffffff, 
2253
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2254
                           DESC_S_MASK |
2255
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2256
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
2257
                           0, 0xffffffff,
2258
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2259
                           DESC_S_MASK |
2260
                           DESC_W_MASK | DESC_A_MASK);
2261
    ESP = env->sysenter_esp;
2262
    EIP = env->sysenter_eip;
2263
}
2264

    
2265
void helper_sysexit(void)
2266
{
2267
    int cpl;
2268

    
2269
    cpl = env->hflags & HF_CPL_MASK;
2270
    if (env->sysenter_cs == 0 || cpl != 0) {
2271
        raise_exception_err(EXCP0D_GPF, 0);
2272
    }
2273
    cpu_x86_set_cpl(env, 3);
2274
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
2275
                           0, 0xffffffff, 
2276
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2277
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2278
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2279
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
2280
                           0, 0xffffffff,
2281
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2282
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2283
                           DESC_W_MASK | DESC_A_MASK);
2284
    ESP = ECX;
2285
    EIP = EDX;
2286
#ifdef USE_KQEMU
2287
    if (kqemu_is_ok(env)) {
2288
        env->exception_index = -1;
2289
        cpu_loop_exit();
2290
    }
2291
#endif
2292
}
2293

    
2294
void helper_movl_crN_T0(int reg)
2295
{
2296
#if !defined(CONFIG_USER_ONLY) 
2297
    switch(reg) {
2298
    case 0:
2299
        cpu_x86_update_cr0(env, T0);
2300
        break;
2301
    case 3:
2302
        cpu_x86_update_cr3(env, T0);
2303
        break;
2304
    case 4:
2305
        cpu_x86_update_cr4(env, T0);
2306
        break;
2307
    case 8:
2308
        cpu_set_apic_tpr(env, T0);
2309
        break;
2310
    default:
2311
        env->cr[reg] = T0;
2312
        break;
2313
    }
2314
#endif
2315
}
2316

    
2317
/* XXX: do more */
2318
void helper_movl_drN_T0(int reg)
2319
{
2320
    env->dr[reg] = T0;
2321
}
2322

    
2323
void helper_invlpg(target_ulong addr)
2324
{
2325
    cpu_x86_flush_tlb(env, addr);
2326
}
2327

    
2328
void helper_rdtsc(void)
2329
{
2330
    uint64_t val;
2331
    
2332
    val = cpu_get_tsc(env);
2333
    EAX = (uint32_t)(val);
2334
    EDX = (uint32_t)(val >> 32);
2335
}
2336

    
2337
#if defined(CONFIG_USER_ONLY) 
2338
void helper_wrmsr(void)
2339
{
2340
}
2341

    
2342
void helper_rdmsr(void)
2343
{
2344
}
2345
#else
2346
void helper_wrmsr(void)
2347
{
2348
    uint64_t val;
2349

    
2350
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2351

    
2352
    switch((uint32_t)ECX) {
2353
    case MSR_IA32_SYSENTER_CS:
2354
        env->sysenter_cs = val & 0xffff;
2355
        break;
2356
    case MSR_IA32_SYSENTER_ESP:
2357
        env->sysenter_esp = val;
2358
        break;
2359
    case MSR_IA32_SYSENTER_EIP:
2360
        env->sysenter_eip = val;
2361
        break;
2362
    case MSR_IA32_APICBASE:
2363
        cpu_set_apic_base(env, val);
2364
        break;
2365
    case MSR_EFER:
2366
        {
2367
            uint64_t update_mask;
2368
            update_mask = 0;
2369
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2370
                update_mask |= MSR_EFER_SCE;
2371
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2372
                update_mask |= MSR_EFER_LME;
2373
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2374
                update_mask |= MSR_EFER_FFXSR;
2375
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2376
                update_mask |= MSR_EFER_NXE;
2377
            env->efer = (env->efer & ~update_mask) | 
2378
            (val & update_mask);
2379
        }
2380
        break;
2381
    case MSR_STAR:
2382
        env->star = val;
2383
        break;
2384
    case MSR_PAT:
2385
        env->pat = val;
2386
        break;
2387
#ifdef TARGET_X86_64
2388
    case MSR_LSTAR:
2389
        env->lstar = val;
2390
        break;
2391
    case MSR_CSTAR:
2392
        env->cstar = val;
2393
        break;
2394
    case MSR_FMASK:
2395
        env->fmask = val;
2396
        break;
2397
    case MSR_FSBASE:
2398
        env->segs[R_FS].base = val;
2399
        break;
2400
    case MSR_GSBASE:
2401
        env->segs[R_GS].base = val;
2402
        break;
2403
    case MSR_KERNELGSBASE:
2404
        env->kernelgsbase = val;
2405
        break;
2406
#endif
2407
    default:
2408
        /* XXX: exception ? */
2409
        break; 
2410
    }
2411
}
2412

    
2413
void helper_rdmsr(void)
2414
{
2415
    uint64_t val;
2416
    switch((uint32_t)ECX) {
2417
    case MSR_IA32_SYSENTER_CS:
2418
        val = env->sysenter_cs;
2419
        break;
2420
    case MSR_IA32_SYSENTER_ESP:
2421
        val = env->sysenter_esp;
2422
        break;
2423
    case MSR_IA32_SYSENTER_EIP:
2424
        val = env->sysenter_eip;
2425
        break;
2426
    case MSR_IA32_APICBASE:
2427
        val = cpu_get_apic_base(env);
2428
        break;
2429
    case MSR_EFER:
2430
        val = env->efer;
2431
        break;
2432
    case MSR_STAR:
2433
        val = env->star;
2434
        break;
2435
    case MSR_PAT:
2436
        val = env->pat;
2437
        break;
2438
#ifdef TARGET_X86_64
2439
    case MSR_LSTAR:
2440
        val = env->lstar;
2441
        break;
2442
    case MSR_CSTAR:
2443
        val = env->cstar;
2444
        break;
2445
    case MSR_FMASK:
2446
        val = env->fmask;
2447
        break;
2448
    case MSR_FSBASE:
2449
        val = env->segs[R_FS].base;
2450
        break;
2451
    case MSR_GSBASE:
2452
        val = env->segs[R_GS].base;
2453
        break;
2454
    case MSR_KERNELGSBASE:
2455
        val = env->kernelgsbase;
2456
        break;
2457
#endif
2458
    default:
2459
        /* XXX: exception ? */
2460
        val = 0;
2461
        break; 
2462
    }
2463
    EAX = (uint32_t)(val);
2464
    EDX = (uint32_t)(val >> 32);
2465
}
2466
#endif
2467

    
2468
void helper_lsl(void)
2469
{
2470
    unsigned int selector, limit;
2471
    uint32_t e1, e2, eflags;
2472
    int rpl, dpl, cpl, type;
2473

    
2474
    eflags = cc_table[CC_OP].compute_all();
2475
    selector = T0 & 0xffff;
2476
    if (load_segment(&e1, &e2, selector) != 0)
2477
        goto fail;
2478
    rpl = selector & 3;
2479
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2480
    cpl = env->hflags & HF_CPL_MASK;
2481
    if (e2 & DESC_S_MASK) {
2482
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2483
            /* conforming */
2484
        } else {
2485
            if (dpl < cpl || dpl < rpl)
2486
                goto fail;
2487
        }
2488
    } else {
2489
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2490
        switch(type) {
2491
        case 1:
2492
        case 2:
2493
        case 3:
2494
        case 9:
2495
        case 11:
2496
            break;
2497
        default:
2498
            goto fail;
2499
        }
2500
        if (dpl < cpl || dpl < rpl) {
2501
        fail:
2502
            CC_SRC = eflags & ~CC_Z;
2503
            return;
2504
        }
2505
    }
2506
    limit = get_seg_limit(e1, e2);
2507
    T1 = limit;
2508
    CC_SRC = eflags | CC_Z;
2509
}
2510

    
2511
void helper_lar(void)
2512
{
2513
    unsigned int selector;
2514
    uint32_t e1, e2, eflags;
2515
    int rpl, dpl, cpl, type;
2516

    
2517
    eflags = cc_table[CC_OP].compute_all();
2518
    selector = T0 & 0xffff;
2519
    if ((selector & 0xfffc) == 0)
2520
        goto fail;
2521
    if (load_segment(&e1, &e2, selector) != 0)
2522
        goto fail;
2523
    rpl = selector & 3;
2524
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2525
    cpl = env->hflags & HF_CPL_MASK;
2526
    if (e2 & DESC_S_MASK) {
2527
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2528
            /* conforming */
2529
        } else {
2530
            if (dpl < cpl || dpl < rpl)
2531
                goto fail;
2532
        }
2533
    } else {
2534
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2535
        switch(type) {
2536
        case 1:
2537
        case 2:
2538
        case 3:
2539
        case 4:
2540
        case 5:
2541
        case 9:
2542
        case 11:
2543
        case 12:
2544
            break;
2545
        default:
2546
            goto fail;
2547
        }
2548
        if (dpl < cpl || dpl < rpl) {
2549
        fail:
2550
            CC_SRC = eflags & ~CC_Z;
2551
            return;
2552
        }
2553
    }
2554
    T1 = e2 & 0x00f0ff00;
2555
    CC_SRC = eflags | CC_Z;
2556
}
2557

    
2558
void helper_verr(void)
2559
{
2560
    unsigned int selector;
2561
    uint32_t e1, e2, eflags;
2562
    int rpl, dpl, cpl;
2563

    
2564
    eflags = cc_table[CC_OP].compute_all();
2565
    selector = T0 & 0xffff;
2566
    if ((selector & 0xfffc) == 0)
2567
        goto fail;
2568
    if (load_segment(&e1, &e2, selector) != 0)
2569
        goto fail;
2570
    if (!(e2 & DESC_S_MASK))
2571
        goto fail;
2572
    rpl = selector & 3;
2573
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2574
    cpl = env->hflags & HF_CPL_MASK;
2575
    if (e2 & DESC_CS_MASK) {
2576
        if (!(e2 & DESC_R_MASK))
2577
            goto fail;
2578
        if (!(e2 & DESC_C_MASK)) {
2579
            if (dpl < cpl || dpl < rpl)
2580
                goto fail;
2581
        }
2582
    } else {
2583
        if (dpl < cpl || dpl < rpl) {
2584
        fail:
2585
            CC_SRC = eflags & ~CC_Z;
2586
            return;
2587
        }
2588
    }
2589
    CC_SRC = eflags | CC_Z;
2590
}
2591

    
2592
void helper_verw(void)
2593
{
2594
    unsigned int selector;
2595
    uint32_t e1, e2, eflags;
2596
    int rpl, dpl, cpl;
2597

    
2598
    eflags = cc_table[CC_OP].compute_all();
2599
    selector = T0 & 0xffff;
2600
    if ((selector & 0xfffc) == 0)
2601
        goto fail;
2602
    if (load_segment(&e1, &e2, selector) != 0)
2603
        goto fail;
2604
    if (!(e2 & DESC_S_MASK))
2605
        goto fail;
2606
    rpl = selector & 3;
2607
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608
    cpl = env->hflags & HF_CPL_MASK;
2609
    if (e2 & DESC_CS_MASK) {
2610
        goto fail;
2611
    } else {
2612
        if (dpl < cpl || dpl < rpl)
2613
            goto fail;
2614
        if (!(e2 & DESC_W_MASK)) {
2615
        fail:
2616
            CC_SRC = eflags & ~CC_Z;
2617
            return;
2618
        }
2619
    }
2620
    CC_SRC = eflags | CC_Z;
2621
}
2622

    
2623
/* FPU helpers */
2624

    
2625
void helper_fldt_ST0_A0(void)
2626
{
2627
    int new_fpstt;
2628
    new_fpstt = (env->fpstt - 1) & 7;
2629
    env->fpregs[new_fpstt].d = helper_fldt(A0);
2630
    env->fpstt = new_fpstt;
2631
    env->fptags[new_fpstt] = 0; /* validate stack entry */
2632
}
2633

    
2634
void helper_fstt_ST0_A0(void)
2635
{
2636
    helper_fstt(ST0, A0);
2637
}
2638

    
2639
void fpu_set_exception(int mask)
2640
{
2641
    env->fpus |= mask;
2642
    if (env->fpus & (~env->fpuc & FPUC_EM))
2643
        env->fpus |= FPUS_SE | FPUS_B;
2644
}
2645

    
2646
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2647
{
2648
    if (b == 0.0) 
2649
        fpu_set_exception(FPUS_ZE);
2650
    return a / b;
2651
}
2652

    
2653
void fpu_raise_exception(void)
2654
{
2655
    if (env->cr[0] & CR0_NE_MASK) {
2656
        raise_exception(EXCP10_COPR);
2657
    } 
2658
#if !defined(CONFIG_USER_ONLY) 
2659
    else {
2660
        cpu_set_ferr(env);
2661
    }
2662
#endif
2663
}
2664

    
2665
/* BCD ops */
2666

    
2667
void helper_fbld_ST0_A0(void)
2668
{
2669
    CPU86_LDouble tmp;
2670
    uint64_t val;
2671
    unsigned int v;
2672
    int i;
2673

    
2674
    val = 0;
2675
    for(i = 8; i >= 0; i--) {
2676
        v = ldub(A0 + i);
2677
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2678
    }
2679
    tmp = val;
2680
    if (ldub(A0 + 9) & 0x80)
2681
        tmp = -tmp;
2682
    fpush();
2683
    ST0 = tmp;
2684
}
2685

    
2686
void helper_fbst_ST0_A0(void)
2687
{
2688
    int v;
2689
    target_ulong mem_ref, mem_end;
2690
    int64_t val;
2691

    
2692
    val = floatx_to_int64(ST0, &env->fp_status);
2693
    mem_ref = A0;
2694
    mem_end = mem_ref + 9;
2695
    if (val < 0) {
2696
        stb(mem_end, 0x80);
2697
        val = -val;
2698
    } else {
2699
        stb(mem_end, 0x00);
2700
    }
2701
    while (mem_ref < mem_end) {
2702
        if (val == 0)
2703
            break;
2704
        v = val % 100;
2705
        val = val / 100;
2706
        v = ((v / 10) << 4) | (v % 10);
2707
        stb(mem_ref++, v);
2708
    }
2709
    while (mem_ref < mem_end) {
2710
        stb(mem_ref++, 0);
2711
    }
2712
}
2713

    
2714
void helper_f2xm1(void)
2715
{
2716
    ST0 = pow(2.0,ST0) - 1.0;
2717
}
2718

    
2719
void helper_fyl2x(void)
2720
{
2721
    CPU86_LDouble fptemp;
2722
    
2723
    fptemp = ST0;
2724
    if (fptemp>0.0){
2725
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
2726
        ST1 *= fptemp;
2727
        fpop();
2728
    } else { 
2729
        env->fpus &= (~0x4700);
2730
        env->fpus |= 0x400;
2731
    }
2732
}
2733

    
2734
void helper_fptan(void)
2735
{
2736
    CPU86_LDouble fptemp;
2737

    
2738
    fptemp = ST0;
2739
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2740
        env->fpus |= 0x400;
2741
    } else {
2742
        ST0 = tan(fptemp);
2743
        fpush();
2744
        ST0 = 1.0;
2745
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2746
        /* the above code is for  |arg| < 2**52 only */
2747
    }
2748
}
2749

    
2750
void helper_fpatan(void)
2751
{
2752
    CPU86_LDouble fptemp, fpsrcop;
2753

    
2754
    fpsrcop = ST1;
2755
    fptemp = ST0;
2756
    ST1 = atan2(fpsrcop,fptemp);
2757
    fpop();
2758
}
2759

    
2760
void helper_fxtract(void)
2761
{
2762
    CPU86_LDoubleU temp;
2763
    unsigned int expdif;
2764

    
2765
    temp.d = ST0;
2766
    expdif = EXPD(temp) - EXPBIAS;
2767
    /*DP exponent bias*/
2768
    ST0 = expdif;
2769
    fpush();
2770
    BIASEXPONENT(temp);
2771
    ST0 = temp.d;
2772
}
2773

    
2774
void helper_fprem1(void)
2775
{
2776
    CPU86_LDouble dblq, fpsrcop, fptemp;
2777
    CPU86_LDoubleU fpsrcop1, fptemp1;
2778
    int expdif;
2779
    int q;
2780

    
2781
    fpsrcop = ST0;
2782
    fptemp = ST1;
2783
    fpsrcop1.d = fpsrcop;
2784
    fptemp1.d = fptemp;
2785
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2786
    if (expdif < 53) {
2787
        dblq = fpsrcop / fptemp;
2788
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2789
        ST0 = fpsrcop - fptemp*dblq;
2790
        q = (int)dblq; /* cutting off top bits is assumed here */
2791
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2792
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2793
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2794
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2795
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2796
    } else {
2797
        env->fpus |= 0x400;  /* C2 <-- 1 */
2798
        fptemp = pow(2.0, expdif-50);
2799
        fpsrcop = (ST0 / ST1) / fptemp;
2800
        /* fpsrcop = integer obtained by rounding to the nearest */
2801
        fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2802
            floor(fpsrcop): ceil(fpsrcop);
2803
        ST0 -= (ST1 * fpsrcop * fptemp);
2804
    }
2805
}
2806

    
2807
void helper_fprem(void)
2808
{
2809
    CPU86_LDouble dblq, fpsrcop, fptemp;
2810
    CPU86_LDoubleU fpsrcop1, fptemp1;
2811
    int expdif;
2812
    int q;
2813
    
2814
    fpsrcop = ST0;
2815
    fptemp = ST1;
2816
    fpsrcop1.d = fpsrcop;
2817
    fptemp1.d = fptemp;
2818
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2819
    if ( expdif < 53 ) {
2820
        dblq = fpsrcop / fptemp;
2821
        dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2822
        ST0 = fpsrcop - fptemp*dblq;
2823
        q = (int)dblq; /* cutting off top bits is assumed here */
2824
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2825
                                /* (C0,C1,C3) <-- (q2,q1,q0) */
2826
        env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2827
        env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2828
        env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2829
    } else {
2830
        env->fpus |= 0x400;  /* C2 <-- 1 */
2831
        fptemp = pow(2.0, expdif-50);
2832
        fpsrcop = (ST0 / ST1) / fptemp;
2833
        /* fpsrcop = integer obtained by chopping */
2834
        fpsrcop = (fpsrcop < 0.0)?
2835
            -(floor(fabs(fpsrcop))): floor(fpsrcop);
2836
        ST0 -= (ST1 * fpsrcop * fptemp);
2837
    }
2838
}
2839

    
2840
void helper_fyl2xp1(void)
2841
{
2842
    CPU86_LDouble fptemp;
2843

    
2844
    fptemp = ST0;
2845
    if ((fptemp+1.0)>0.0) {
2846
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2847
        ST1 *= fptemp;
2848
        fpop();
2849
    } else { 
2850
        env->fpus &= (~0x4700);
2851
        env->fpus |= 0x400;
2852
    }
2853
}
2854

    
2855
void helper_fsqrt(void)
2856
{
2857
    CPU86_LDouble fptemp;
2858

    
2859
    fptemp = ST0;
2860
    if (fptemp<0.0) { 
2861
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2862
        env->fpus |= 0x400;
2863
    }
2864
    ST0 = sqrt(fptemp);
2865
}
2866

    
2867
void helper_fsincos(void)
2868
{
2869
    CPU86_LDouble fptemp;
2870

    
2871
    fptemp = ST0;
2872
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2873
        env->fpus |= 0x400;
2874
    } else {
2875
        ST0 = sin(fptemp);
2876
        fpush();
2877
        ST0 = cos(fptemp);
2878
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2879
        /* the above code is for  |arg| < 2**63 only */
2880
    }
2881
}
2882

    
2883
void helper_frndint(void)
2884
{
2885
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
2886
}
2887

    
2888
void helper_fscale(void)
2889
{
2890
    ST0 = ldexp (ST0, (int)(ST1)); 
2891
}
2892

    
2893
void helper_fsin(void)
2894
{
2895
    CPU86_LDouble fptemp;
2896

    
2897
    fptemp = ST0;
2898
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2899
        env->fpus |= 0x400;
2900
    } else {
2901
        ST0 = sin(fptemp);
2902
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2903
        /* the above code is for  |arg| < 2**53 only */
2904
    }
2905
}
2906

    
2907
void helper_fcos(void)
2908
{
2909
    CPU86_LDouble fptemp;
2910

    
2911
    fptemp = ST0;
2912
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2913
        env->fpus |= 0x400;
2914
    } else {
2915
        ST0 = cos(fptemp);
2916
        env->fpus &= (~0x400);  /* C2 <-- 0 */
2917
        /* the above code is for  |arg5 < 2**63 only */
2918
    }
2919
}
2920

    
2921
void helper_fxam_ST0(void)
2922
{
2923
    CPU86_LDoubleU temp;
2924
    int expdif;
2925

    
2926
    temp.d = ST0;
2927

    
2928
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
2929
    if (SIGND(temp))
2930
        env->fpus |= 0x200; /* C1 <-- 1 */
2931

    
2932
    expdif = EXPD(temp);
2933
    if (expdif == MAXEXPD) {
2934
        if (MANTD(temp) == 0)
2935
            env->fpus |=  0x500 /*Infinity*/;
2936
        else
2937
            env->fpus |=  0x100 /*NaN*/;
2938
    } else if (expdif == 0) {
2939
        if (MANTD(temp) == 0)
2940
            env->fpus |=  0x4000 /*Zero*/;
2941
        else
2942
            env->fpus |= 0x4400 /*Denormal*/;
2943
    } else {
2944
        env->fpus |= 0x400;
2945
    }
2946
}
2947

    
2948
void helper_fstenv(target_ulong ptr, int data32)
2949
{
2950
    int fpus, fptag, exp, i;
2951
    uint64_t mant;
2952
    CPU86_LDoubleU tmp;
2953

    
2954
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2955
    fptag = 0;
2956
    for (i=7; i>=0; i--) {
2957
        fptag <<= 2;
2958
        if (env->fptags[i]) {
2959
            fptag |= 3;
2960
        } else {
2961
            tmp.d = env->fpregs[i].d;
2962
            exp = EXPD(tmp);
2963
            mant = MANTD(tmp);
2964
            if (exp == 0 && mant == 0) {
2965
                /* zero */
2966
                fptag |= 1;
2967
            } else if (exp == 0 || exp == MAXEXPD
2968
#ifdef USE_X86LDOUBLE
2969
                       || (mant & (1LL << 63)) == 0
2970
#endif
2971
                       ) {
2972
                /* NaNs, infinity, denormal */
2973
                fptag |= 2;
2974
            }
2975
        }
2976
    }
2977
    if (data32) {
2978
        /* 32 bit */
2979
        stl(ptr, env->fpuc);
2980
        stl(ptr + 4, fpus);
2981
        stl(ptr + 8, fptag);
2982
        stl(ptr + 12, 0); /* fpip */
2983
        stl(ptr + 16, 0); /* fpcs */
2984
        stl(ptr + 20, 0); /* fpoo */
2985
        stl(ptr + 24, 0); /* fpos */
2986
    } else {
2987
        /* 16 bit */
2988
        stw(ptr, env->fpuc);
2989
        stw(ptr + 2, fpus);
2990
        stw(ptr + 4, fptag);
2991
        stw(ptr + 6, 0);
2992
        stw(ptr + 8, 0);
2993
        stw(ptr + 10, 0);
2994
        stw(ptr + 12, 0);
2995
    }
2996
}
2997

    
2998
void helper_fldenv(target_ulong ptr, int data32)
2999
{
3000
    int i, fpus, fptag;
3001

    
3002
    if (data32) {
3003
        env->fpuc = lduw(ptr);
3004
        fpus = lduw(ptr + 4);
3005
        fptag = lduw(ptr + 8);
3006
    }
3007
    else {
3008
        env->fpuc = lduw(ptr);
3009
        fpus = lduw(ptr + 2);
3010
        fptag = lduw(ptr + 4);
3011
    }
3012
    env->fpstt = (fpus >> 11) & 7;
3013
    env->fpus = fpus & ~0x3800;
3014
    for(i = 0;i < 8; i++) {
3015
        env->fptags[i] = ((fptag & 3) == 3);
3016
        fptag >>= 2;
3017
    }
3018
}
3019

    
3020
void helper_fsave(target_ulong ptr, int data32)
3021
{
3022
    CPU86_LDouble tmp;
3023
    int i;
3024

    
3025
    helper_fstenv(ptr, data32);
3026

    
3027
    ptr += (14 << data32);
3028
    for(i = 0;i < 8; i++) {
3029
        tmp = ST(i);
3030
        helper_fstt(tmp, ptr);
3031
        ptr += 10;
3032
    }
3033

    
3034
    /* fninit */
3035
    env->fpus = 0;
3036
    env->fpstt = 0;
3037
    env->fpuc = 0x37f;
3038
    env->fptags[0] = 1;
3039
    env->fptags[1] = 1;
3040
    env->fptags[2] = 1;
3041
    env->fptags[3] = 1;
3042
    env->fptags[4] = 1;
3043
    env->fptags[5] = 1;
3044
    env->fptags[6] = 1;
3045
    env->fptags[7] = 1;
3046
}
3047

    
3048
void helper_frstor(target_ulong ptr, int data32)
3049
{
3050
    CPU86_LDouble tmp;
3051
    int i;
3052

    
3053
    helper_fldenv(ptr, data32);
3054
    ptr += (14 << data32);
3055

    
3056
    for(i = 0;i < 8; i++) {
3057
        tmp = helper_fldt(ptr);
3058
        ST(i) = tmp;
3059
        ptr += 10;
3060
    }
3061
}
3062

    
3063
void helper_fxsave(target_ulong ptr, int data64)
3064
{
3065
    int fpus, fptag, i, nb_xmm_regs;
3066
    CPU86_LDouble tmp;
3067
    target_ulong addr;
3068

    
3069
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3070
    fptag = 0;
3071
    for(i = 0; i < 8; i++) {
3072
        fptag |= (env->fptags[i] << i);
3073
    }
3074
    stw(ptr, env->fpuc);
3075
    stw(ptr + 2, fpus);
3076
    stw(ptr + 4, fptag ^ 0xff);
3077

    
3078
    addr = ptr + 0x20;
3079
    for(i = 0;i < 8; i++) {
3080
        tmp = ST(i);
3081
        helper_fstt(tmp, addr);
3082
        addr += 16;
3083
    }
3084
    
3085
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3086
        /* XXX: finish it */
3087
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3088
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3089
        nb_xmm_regs = 8 << data64;
3090
        addr = ptr + 0xa0;
3091
        for(i = 0; i < nb_xmm_regs; i++) {
3092
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3093
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3094
            addr += 16;
3095
        }
3096
    }
3097
}
3098

    
3099
void helper_fxrstor(target_ulong ptr, int data64)
3100
{
3101
    int i, fpus, fptag, nb_xmm_regs;
3102
    CPU86_LDouble tmp;
3103
    target_ulong addr;
3104

    
3105
    env->fpuc = lduw(ptr);
3106
    fpus = lduw(ptr + 2);
3107
    fptag = lduw(ptr + 4);
3108
    env->fpstt = (fpus >> 11) & 7;
3109
    env->fpus = fpus & ~0x3800;
3110
    fptag ^= 0xff;
3111
    for(i = 0;i < 8; i++) {
3112
        env->fptags[i] = ((fptag >> i) & 1);
3113
    }
3114

    
3115
    addr = ptr + 0x20;
3116
    for(i = 0;i < 8; i++) {
3117
        tmp = helper_fldt(addr);
3118
        ST(i) = tmp;
3119
        addr += 16;
3120
    }
3121

    
3122
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3123
        /* XXX: finish it */
3124
        env->mxcsr = ldl(ptr + 0x18);
3125
        //ldl(ptr + 0x1c);
3126
        nb_xmm_regs = 8 << data64;
3127
        addr = ptr + 0xa0;
3128
        for(i = 0; i < nb_xmm_regs; i++) {
3129
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3130
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3131
            addr += 16;
3132
        }
3133
    }
3134
}
3135

    
3136
#ifndef USE_X86LDOUBLE
3137

    
3138
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3139
{
3140
    CPU86_LDoubleU temp;
3141
    int e;
3142

    
3143
    temp.d = f;
3144
    /* mantissa */
3145
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3146
    /* exponent + sign */
3147
    e = EXPD(temp) - EXPBIAS + 16383;
3148
    e |= SIGND(temp) >> 16;
3149
    *pexp = e;
3150
}
3151

    
3152
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3153
{
3154
    CPU86_LDoubleU temp;
3155
    int e;
3156
    uint64_t ll;
3157

    
3158
    /* XXX: handle overflow ? */
3159
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3160
    e |= (upper >> 4) & 0x800; /* sign */
3161
    ll = (mant >> 11) & ((1LL << 52) - 1);
3162
#ifdef __arm__
3163
    temp.l.upper = (e << 20) | (ll >> 32);
3164
    temp.l.lower = ll;
3165
#else
3166
    temp.ll = ll | ((uint64_t)e << 52);
3167
#endif
3168
    return temp.d;
3169
}
3170

    
3171
#else
3172

    
3173
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3174
{
3175
    CPU86_LDoubleU temp;
3176

    
3177
    temp.d = f;
3178
    *pmant = temp.l.lower;
3179
    *pexp = temp.l.upper;
3180
}
3181

    
3182
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3183
{
3184
    CPU86_LDoubleU temp;
3185

    
3186
    temp.l.upper = upper;
3187
    temp.l.lower = mant;
3188
    return temp.d;
3189
}
3190
#endif
3191

    
3192
#ifdef TARGET_X86_64
3193

    
3194
//#define DEBUG_MULDIV
3195

    
3196
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3197
{
3198
    *plow += a;
3199
    /* carry test */
3200
    if (*plow < a)
3201
        (*phigh)++;
3202
    *phigh += b;
3203
}
3204

    
3205
static void neg128(uint64_t *plow, uint64_t *phigh)
3206
{
3207
    *plow = ~ *plow;
3208
    *phigh = ~ *phigh;
3209
    add128(plow, phigh, 1, 0);
3210
}
3211

    
3212
static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3213
{
3214
    uint32_t a0, a1, b0, b1;
3215
    uint64_t v;
3216

    
3217
    a0 = a;
3218
    a1 = a >> 32;
3219

    
3220
    b0 = b;
3221
    b1 = b >> 32;
3222
    
3223
    v = (uint64_t)a0 * (uint64_t)b0;
3224
    *plow = v;
3225
    *phigh = 0;
3226

    
3227
    v = (uint64_t)a0 * (uint64_t)b1;
3228
    add128(plow, phigh, v << 32, v >> 32);
3229
    
3230
    v = (uint64_t)a1 * (uint64_t)b0;
3231
    add128(plow, phigh, v << 32, v >> 32);
3232
    
3233
    v = (uint64_t)a1 * (uint64_t)b1;
3234
    *phigh += v;
3235
#ifdef DEBUG_MULDIV
3236
    printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
3237
           a, b, *phigh, *plow);
3238
#endif
3239
}
3240

    
3241
static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3242
{
3243
    int sa, sb;
3244
    sa = (a < 0);
3245
    if (sa)
3246
        a = -a;
3247
    sb = (b < 0);
3248
    if (sb)
3249
        b = -b;
3250
    mul64(plow, phigh, a, b);
3251
    if (sa ^ sb) {
3252
        neg128(plow, phigh);
3253
    }
3254
}
3255

    
3256
/* return TRUE if overflow */
3257
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3258
{
3259
    uint64_t q, r, a1, a0;
3260
    int i, qb;
3261

    
3262
    a0 = *plow;
3263
    a1 = *phigh;
3264
    if (a1 == 0) {
3265
        q = a0 / b;
3266
        r = a0 % b;
3267
        *plow = q;
3268
        *phigh = r;
3269
    } else {
3270
        if (a1 >= b)
3271
            return 1;
3272
        /* XXX: use a better algorithm */
3273
        for(i = 0; i < 64; i++) {
3274
            a1 = (a1 << 1) | (a0 >> 63);
3275
            if (a1 >= b) {
3276
                a1 -= b;
3277
                qb = 1;
3278
            } else {
3279
                qb = 0;
3280
            }
3281
            a0 = (a0 << 1) | qb;
3282
        }
3283
#if defined(DEBUG_MULDIV)
3284
        printf("div: 0x%016llx%016llx / 0x%016llx: q=0x%016llx r=0x%016llx\n",
3285
               *phigh, *plow, b, a0, a1);
3286
#endif
3287
        *plow = a0;
3288
        *phigh = a1;
3289
    }
3290
    return 0;
3291
}
3292

    
3293
/* return TRUE if overflow */
3294
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3295
{
3296
    int sa, sb;
3297
    sa = ((int64_t)*phigh < 0);
3298
    if (sa)
3299
        neg128(plow, phigh);
3300
    sb = (b < 0);
3301
    if (sb)
3302
        b = -b;
3303
    if (div64(plow, phigh, b) != 0)
3304
        return 1;
3305
    if (sa ^ sb) {
3306
        if (*plow > (1ULL << 63))
3307
            return 1;
3308
        *plow = - *plow;
3309
    } else {
3310
        if (*plow >= (1ULL << 63))
3311
            return 1;
3312
    }
3313
    if (sa)
3314
        *phigh = - *phigh;
3315
    return 0;
3316
}
3317

    
3318
void helper_mulq_EAX_T0(void)
3319
{
3320
    uint64_t r0, r1;
3321

    
3322
    mul64(&r0, &r1, EAX, T0);
3323
    EAX = r0;
3324
    EDX = r1;
3325
    CC_DST = r0;
3326
    CC_SRC = r1;
3327
}
3328

    
3329
void helper_imulq_EAX_T0(void)
3330
{
3331
    uint64_t r0, r1;
3332

    
3333
    imul64(&r0, &r1, EAX, T0);
3334
    EAX = r0;
3335
    EDX = r1;
3336
    CC_DST = r0;
3337
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3338
}
3339

    
3340
void helper_imulq_T0_T1(void)
3341
{
3342
    uint64_t r0, r1;
3343

    
3344
    imul64(&r0, &r1, T0, T1);
3345
    T0 = r0;
3346
    CC_DST = r0;
3347
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3348
}
3349

    
3350
void helper_divq_EAX_T0(void)
3351
{
3352
    uint64_t r0, r1;
3353
    if (T0 == 0) {
3354
        raise_exception(EXCP00_DIVZ);
3355
    }
3356
    r0 = EAX;
3357
    r1 = EDX;
3358
    if (div64(&r0, &r1, T0))
3359
        raise_exception(EXCP00_DIVZ);
3360
    EAX = r0;
3361
    EDX = r1;
3362
}
3363

    
3364
void helper_idivq_EAX_T0(void)
3365
{
3366
    uint64_t r0, r1;
3367
    if (T0 == 0) {
3368
        raise_exception(EXCP00_DIVZ);
3369
    }
3370
    r0 = EAX;
3371
    r1 = EDX;
3372
    if (idiv64(&r0, &r1, T0))
3373
        raise_exception(EXCP00_DIVZ);
3374
    EAX = r0;
3375
    EDX = r1;
3376
}
3377

    
3378
#endif
3379

    
3380
float approx_rsqrt(float a)
3381
{
3382
    return 1.0 / sqrt(a);
3383
}
3384

    
3385
float approx_rcp(float a)
3386
{
3387
    return 1.0 / a;
3388
}
3389

    
3390
void update_fp_status(void)
3391
{
3392
    int rnd_type;
3393

    
3394
    /* set rounding mode */
3395
    switch(env->fpuc & RC_MASK) {
3396
    default:
3397
    case RC_NEAR:
3398
        rnd_type = float_round_nearest_even;
3399
        break;
3400
    case RC_DOWN:
3401
        rnd_type = float_round_down;
3402
        break;
3403
    case RC_UP:
3404
        rnd_type = float_round_up;
3405
        break;
3406
    case RC_CHOP:
3407
        rnd_type = float_round_to_zero;
3408
        break;
3409
    }
3410
    set_float_rounding_mode(rnd_type, &env->fp_status);
3411
#ifdef FLOATX80
3412
    switch((env->fpuc >> 8) & 3) {
3413
    case 0:
3414
        rnd_type = 32;
3415
        break;
3416
    case 2:
3417
        rnd_type = 64;
3418
        break;
3419
    case 3:
3420
    default:
3421
        rnd_type = 80;
3422
        break;
3423
    }
3424
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3425
#endif
3426
}
3427

    
3428
#if !defined(CONFIG_USER_ONLY) 
3429

    
3430
#define MMUSUFFIX _mmu
3431
#define GETPC() (__builtin_return_address(0))
3432

    
3433
#define SHIFT 0
3434
#include "softmmu_template.h"
3435

    
3436
#define SHIFT 1
3437
#include "softmmu_template.h"
3438

    
3439
#define SHIFT 2
3440
#include "softmmu_template.h"
3441

    
3442
#define SHIFT 3
3443
#include "softmmu_template.h"
3444

    
3445
#endif
3446

    
3447
/* try to fill the TLB and return an exception if error. If retaddr is
3448
   NULL, it means that the function was called in C code (i.e. not
3449
   from generated code or from helper.c) */
3450
/* XXX: fix it to restore all registers */
3451
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
3452
{
3453
    TranslationBlock *tb;
3454
    int ret;
3455
    unsigned long pc;
3456
    CPUX86State *saved_env;
3457

    
3458
    /* XXX: hack to restore env in all cases, even if not called from
3459
       generated code */
3460
    saved_env = env;
3461
    env = cpu_single_env;
3462

    
3463
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
3464
    if (ret) {
3465
        if (retaddr) {
3466
            /* now we have a real cpu fault */
3467
            pc = (unsigned long)retaddr;
3468
            tb = tb_find_pc(pc);
3469
            if (tb) {
3470
                /* the PC is inside the translated code. It means that we have
3471
                   a virtual CPU fault */
3472
                cpu_restore_state(tb, env, pc, NULL);
3473
            }
3474
        }
3475
        if (retaddr)
3476
            raise_exception_err(EXCP0E_PAGE, env->error_code);
3477
        else
3478
            raise_exception_err_norestore(EXCP0E_PAGE, env->error_code);
3479
    }
3480
    env = saved_env;
3481
}