Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ bfed01fc

History | View | Annotate | Download (109.7 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    if (logfile)\
28
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
29
    (raise_exception_err)(a, b);\
30
} while (0)
31
#endif
32

    
33
const uint8_t parity_table[256] = {
34
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
};
67

    
68
/* modulo 17 table */
69
const uint8_t rclw_table[32] = {
70
    0, 1, 2, 3, 4, 5, 6, 7, 
71
    8, 9,10,11,12,13,14,15,
72
   16, 0, 1, 2, 3, 4, 5, 6,
73
    7, 8, 9,10,11,12,13,14,
74
};
75

    
76
/* modulo 9 table */
77
const uint8_t rclb_table[32] = {
78
    0, 1, 2, 3, 4, 5, 6, 7, 
79
    8, 0, 1, 2, 3, 4, 5, 6,
80
    7, 8, 0, 1, 2, 3, 4, 5, 
81
    6, 7, 8, 0, 1, 2, 3, 4,
82
};
83

    
84
const CPU86_LDouble f15rk[7] =
85
{
86
    0.00000000000000000000L,
87
    1.00000000000000000000L,
88
    3.14159265358979323851L,  /*pi*/
89
    0.30102999566398119523L,  /*lg2*/
90
    0.69314718055994530943L,  /*ln2*/
91
    1.44269504088896340739L,  /*l2e*/
92
    3.32192809488736234781L,  /*l2t*/
93
};
94
    
95
/* thread support */
96

    
97
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
98

    
99
void cpu_lock(void)
100
{
101
    spin_lock(&global_cpu_lock);
102
}
103

    
104
void cpu_unlock(void)
105
{
106
    spin_unlock(&global_cpu_lock);
107
}
108

    
109
/* return non zero if error */
110
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
111
                               int selector)
112
{
113
    SegmentCache *dt;
114
    int index;
115
    target_ulong ptr;
116

    
117
    if (selector & 0x4)
118
        dt = &env->ldt;
119
    else
120
        dt = &env->gdt;
121
    index = selector & ~7;
122
    if ((index + 7) > dt->limit)
123
        return -1;
124
    ptr = dt->base + index;
125
    *e1_ptr = ldl_kernel(ptr);
126
    *e2_ptr = ldl_kernel(ptr + 4);
127
    return 0;
128
}
129
                                     
130
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131
{
132
    unsigned int limit;
133
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
134
    if (e2 & DESC_G_MASK)
135
        limit = (limit << 12) | 0xfff;
136
    return limit;
137
}
138

    
139
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
140
{
141
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
142
}
143

    
144
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
145
{
146
    sc->base = get_seg_base(e1, e2);
147
    sc->limit = get_seg_limit(e1, e2);
148
    sc->flags = e2;
149
}
150

    
151
/* init the segment cache in vm86 mode. */
152
static inline void load_seg_vm(int seg, int selector)
153
{
154
    selector &= 0xffff;
155
    cpu_x86_load_seg_cache(env, seg, selector, 
156
                           (selector << 4), 0xffff, 0);
157
}
158

    
159
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
160
                                       uint32_t *esp_ptr, int dpl)
161
{
162
    int type, index, shift;
163
    
164
#if 0
165
    {
166
        int i;
167
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
168
        for(i=0;i<env->tr.limit;i++) {
169
            printf("%02x ", env->tr.base[i]);
170
            if ((i & 7) == 7) printf("\n");
171
        }
172
        printf("\n");
173
    }
174
#endif
175

    
176
    if (!(env->tr.flags & DESC_P_MASK))
177
        cpu_abort(env, "invalid tss");
178
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
179
    if ((type & 7) != 1)
180
        cpu_abort(env, "invalid tss type");
181
    shift = type >> 3;
182
    index = (dpl * 4 + 2) << shift;
183
    if (index + (4 << shift) - 1 > env->tr.limit)
184
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
185
    if (shift == 0) {
186
        *esp_ptr = lduw_kernel(env->tr.base + index);
187
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
188
    } else {
189
        *esp_ptr = ldl_kernel(env->tr.base + index);
190
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
191
    }
192
}
193

    
194
/* XXX: merge with load_seg() */
195
static void tss_load_seg(int seg_reg, int selector)
196
{
197
    uint32_t e1, e2;
198
    int rpl, dpl, cpl;
199

    
200
    if ((selector & 0xfffc) != 0) {
201
        if (load_segment(&e1, &e2, selector) != 0)
202
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
203
        if (!(e2 & DESC_S_MASK))
204
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205
        rpl = selector & 3;
206
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
207
        cpl = env->hflags & HF_CPL_MASK;
208
        if (seg_reg == R_CS) {
209
            if (!(e2 & DESC_CS_MASK))
210
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
211
            /* XXX: is it correct ? */
212
            if (dpl != rpl)
213
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214
            if ((e2 & DESC_C_MASK) && dpl > rpl)
215
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
        } else if (seg_reg == R_SS) {
217
            /* SS must be writable data */
218
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
219
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220
            if (dpl != cpl || dpl != rpl)
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
        } else {
223
            /* not readable code */
224
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
225
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226
            /* if data or non conforming code, checks the rights */
227
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
228
                if (dpl < cpl || dpl < rpl)
229
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            }
231
        }
232
        if (!(e2 & DESC_P_MASK))
233
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
234
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
235
                       get_seg_base(e1, e2),
236
                       get_seg_limit(e1, e2),
237
                       e2);
238
    } else {
239
        if (seg_reg == R_SS || seg_reg == R_CS) 
240
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
    }
242
}
243

    
244
#define SWITCH_TSS_JMP  0
245
#define SWITCH_TSS_IRET 1
246
#define SWITCH_TSS_CALL 2
247

    
248
/* XXX: restore CPU state in registers (PowerPC case) */
249
static void switch_tss(int tss_selector, 
250
                       uint32_t e1, uint32_t e2, int source,
251
                       uint32_t next_eip)
252
{
253
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
254
    target_ulong tss_base;
255
    uint32_t new_regs[8], new_segs[6];
256
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
257
    uint32_t old_eflags, eflags_mask;
258
    SegmentCache *dt;
259
    int index;
260
    target_ulong ptr;
261

    
262
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
263
#ifdef DEBUG_PCALL
264
    if (loglevel & CPU_LOG_PCALL)
265
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
266
#endif
267

    
268
    /* if task gate, we read the TSS segment and we load it */
269
    if (type == 5) {
270
        if (!(e2 & DESC_P_MASK))
271
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
272
        tss_selector = e1 >> 16;
273
        if (tss_selector & 4)
274
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
275
        if (load_segment(&e1, &e2, tss_selector) != 0)
276
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
277
        if (e2 & DESC_S_MASK)
278
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280
        if ((type & 7) != 1)
281
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
282
    }
283

    
284
    if (!(e2 & DESC_P_MASK))
285
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
286

    
287
    if (type & 8)
288
        tss_limit_max = 103;
289
    else
290
        tss_limit_max = 43;
291
    tss_limit = get_seg_limit(e1, e2);
292
    tss_base = get_seg_base(e1, e2);
293
    if ((tss_selector & 4) != 0 || 
294
        tss_limit < tss_limit_max)
295
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
297
    if (old_type & 8)
298
        old_tss_limit_max = 103;
299
    else
300
        old_tss_limit_max = 43;
301

    
302
    /* read all the registers from the new TSS */
303
    if (type & 8) {
304
        /* 32 bit */
305
        new_cr3 = ldl_kernel(tss_base + 0x1c);
306
        new_eip = ldl_kernel(tss_base + 0x20);
307
        new_eflags = ldl_kernel(tss_base + 0x24);
308
        for(i = 0; i < 8; i++)
309
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
310
        for(i = 0; i < 6; i++)
311
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
312
        new_ldt = lduw_kernel(tss_base + 0x60);
313
        new_trap = ldl_kernel(tss_base + 0x64);
314
    } else {
315
        /* 16 bit */
316
        new_cr3 = 0;
317
        new_eip = lduw_kernel(tss_base + 0x0e);
318
        new_eflags = lduw_kernel(tss_base + 0x10);
319
        for(i = 0; i < 8; i++)
320
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
321
        for(i = 0; i < 4; i++)
322
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
323
        new_ldt = lduw_kernel(tss_base + 0x2a);
324
        new_segs[R_FS] = 0;
325
        new_segs[R_GS] = 0;
326
        new_trap = 0;
327
    }
328
    
329
    /* NOTE: we must avoid memory exceptions during the task switch,
330
       so we make dummy accesses before */
331
    /* XXX: it can still fail in some cases, so a bigger hack is
332
       necessary to valid the TLB after having done the accesses */
333

    
334
    v1 = ldub_kernel(env->tr.base);
335
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
336
    stb_kernel(env->tr.base, v1);
337
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
338
    
339
    /* clear busy bit (it is restartable) */
340
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
341
        target_ulong ptr;
342
        uint32_t e2;
343
        ptr = env->gdt.base + (env->tr.selector & ~7);
344
        e2 = ldl_kernel(ptr + 4);
345
        e2 &= ~DESC_TSS_BUSY_MASK;
346
        stl_kernel(ptr + 4, e2);
347
    }
348
    old_eflags = compute_eflags();
349
    if (source == SWITCH_TSS_IRET)
350
        old_eflags &= ~NT_MASK;
351
    
352
    /* save the current state in the old TSS */
353
    if (type & 8) {
354
        /* 32 bit */
355
        stl_kernel(env->tr.base + 0x20, next_eip);
356
        stl_kernel(env->tr.base + 0x24, old_eflags);
357
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
358
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
359
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
360
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
361
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
362
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
363
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
364
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
365
        for(i = 0; i < 6; i++)
366
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
367
    } else {
368
        /* 16 bit */
369
        stw_kernel(env->tr.base + 0x0e, next_eip);
370
        stw_kernel(env->tr.base + 0x10, old_eflags);
371
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
372
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
373
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
374
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
375
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
376
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
377
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
378
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
379
        for(i = 0; i < 4; i++)
380
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
381
    }
382
    
383
    /* now if an exception occurs, it will occurs in the next task
384
       context */
385

    
386
    if (source == SWITCH_TSS_CALL) {
387
        stw_kernel(tss_base, env->tr.selector);
388
        new_eflags |= NT_MASK;
389
    }
390

    
391
    /* set busy bit */
392
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
393
        target_ulong ptr;
394
        uint32_t e2;
395
        ptr = env->gdt.base + (tss_selector & ~7);
396
        e2 = ldl_kernel(ptr + 4);
397
        e2 |= DESC_TSS_BUSY_MASK;
398
        stl_kernel(ptr + 4, e2);
399
    }
400

    
401
    /* set the new CPU state */
402
    /* from this point, any exception which occurs can give problems */
403
    env->cr[0] |= CR0_TS_MASK;
404
    env->hflags |= HF_TS_MASK;
405
    env->tr.selector = tss_selector;
406
    env->tr.base = tss_base;
407
    env->tr.limit = tss_limit;
408
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
409
    
410
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
411
        cpu_x86_update_cr3(env, new_cr3);
412
    }
413
    
414
    /* load all registers without an exception, then reload them with
415
       possible exception */
416
    env->eip = new_eip;
417
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
418
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
419
    if (!(type & 8))
420
        eflags_mask &= 0xffff;
421
    load_eflags(new_eflags, eflags_mask);
422
    /* XXX: what to do in 16 bit case ? */
423
    EAX = new_regs[0];
424
    ECX = new_regs[1];
425
    EDX = new_regs[2];
426
    EBX = new_regs[3];
427
    ESP = new_regs[4];
428
    EBP = new_regs[5];
429
    ESI = new_regs[6];
430
    EDI = new_regs[7];
431
    if (new_eflags & VM_MASK) {
432
        for(i = 0; i < 6; i++) 
433
            load_seg_vm(i, new_segs[i]);
434
        /* in vm86, CPL is always 3 */
435
        cpu_x86_set_cpl(env, 3);
436
    } else {
437
        /* CPL is set the RPL of CS */
438
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
439
        /* first just selectors as the rest may trigger exceptions */
440
        for(i = 0; i < 6; i++)
441
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
442
    }
443
    
444
    env->ldt.selector = new_ldt & ~4;
445
    env->ldt.base = 0;
446
    env->ldt.limit = 0;
447
    env->ldt.flags = 0;
448

    
449
    /* load the LDT */
450
    if (new_ldt & 4)
451
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452

    
453
    if ((new_ldt & 0xfffc) != 0) {
454
        dt = &env->gdt;
455
        index = new_ldt & ~7;
456
        if ((index + 7) > dt->limit)
457
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
458
        ptr = dt->base + index;
459
        e1 = ldl_kernel(ptr);
460
        e2 = ldl_kernel(ptr + 4);
461
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
462
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463
        if (!(e2 & DESC_P_MASK))
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
466
    }
467
    
468
    /* load the segments */
469
    if (!(new_eflags & VM_MASK)) {
470
        tss_load_seg(R_CS, new_segs[R_CS]);
471
        tss_load_seg(R_SS, new_segs[R_SS]);
472
        tss_load_seg(R_ES, new_segs[R_ES]);
473
        tss_load_seg(R_DS, new_segs[R_DS]);
474
        tss_load_seg(R_FS, new_segs[R_FS]);
475
        tss_load_seg(R_GS, new_segs[R_GS]);
476
    }
477
    
478
    /* check that EIP is in the CS segment limits */
479
    if (new_eip > env->segs[R_CS].limit) {
480
        /* XXX: different exception if CALL ? */
481
        raise_exception_err(EXCP0D_GPF, 0);
482
    }
483
}
484

    
485
/* check if Port I/O is allowed in TSS */
486
static inline void check_io(int addr, int size)
487
{
488
    int io_offset, val, mask;
489
    
490
    /* TSS must be a valid 32 bit one */
491
    if (!(env->tr.flags & DESC_P_MASK) ||
492
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
493
        env->tr.limit < 103)
494
        goto fail;
495
    io_offset = lduw_kernel(env->tr.base + 0x66);
496
    io_offset += (addr >> 3);
497
    /* Note: the check needs two bytes */
498
    if ((io_offset + 1) > env->tr.limit)
499
        goto fail;
500
    val = lduw_kernel(env->tr.base + io_offset);
501
    val >>= (addr & 7);
502
    mask = (1 << size) - 1;
503
    /* all bits must be zero to allow the I/O */
504
    if ((val & mask) != 0) {
505
    fail:
506
        raise_exception_err(EXCP0D_GPF, 0);
507
    }
508
}
509

    
510
void check_iob_T0(void)
511
{
512
    check_io(T0, 1);
513
}
514

    
515
void check_iow_T0(void)
516
{
517
    check_io(T0, 2);
518
}
519

    
520
void check_iol_T0(void)
521
{
522
    check_io(T0, 4);
523
}
524

    
525
void check_iob_DX(void)
526
{
527
    check_io(EDX & 0xffff, 1);
528
}
529

    
530
void check_iow_DX(void)
531
{
532
    check_io(EDX & 0xffff, 2);
533
}
534

    
535
void check_iol_DX(void)
536
{
537
    check_io(EDX & 0xffff, 4);
538
}
539

    
540
static inline unsigned int get_sp_mask(unsigned int e2)
541
{
542
    if (e2 & DESC_B_MASK)
543
        return 0xffffffff;
544
    else
545
        return 0xffff;
546
}
547

    
548
#ifdef TARGET_X86_64
549
#define SET_ESP(val, sp_mask)\
550
do {\
551
    if ((sp_mask) == 0xffff)\
552
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
553
    else if ((sp_mask) == 0xffffffffLL)\
554
        ESP = (uint32_t)(val);\
555
    else\
556
        ESP = (val);\
557
} while (0)
558
#else
559
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
560
#endif
561

    
562
/* XXX: add a is_user flag to have proper security support */
563
#define PUSHW(ssp, sp, sp_mask, val)\
564
{\
565
    sp -= 2;\
566
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
567
}
568

    
569
#define PUSHL(ssp, sp, sp_mask, val)\
570
{\
571
    sp -= 4;\
572
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
573
}
574

    
575
#define POPW(ssp, sp, sp_mask, val)\
576
{\
577
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578
    sp += 2;\
579
}
580

    
581
#define POPL(ssp, sp, sp_mask, val)\
582
{\
583
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
584
    sp += 4;\
585
}
586

    
587
/* protected mode interrupt */
588
static void do_interrupt_protected(int intno, int is_int, int error_code,
589
                                   unsigned int next_eip, int is_hw)
590
{
591
    SegmentCache *dt;
592
    target_ulong ptr, ssp;
593
    int type, dpl, selector, ss_dpl, cpl;
594
    int has_error_code, new_stack, shift;
595
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
596
    uint32_t old_eip, sp_mask;
597

    
598
    has_error_code = 0;
599
    if (!is_int && !is_hw) {
600
        switch(intno) {
601
        case 8:
602
        case 10:
603
        case 11:
604
        case 12:
605
        case 13:
606
        case 14:
607
        case 17:
608
            has_error_code = 1;
609
            break;
610
        }
611
    }
612
    if (is_int)
613
        old_eip = next_eip;
614
    else
615
        old_eip = env->eip;
616

    
617
    dt = &env->idt;
618
    if (intno * 8 + 7 > dt->limit)
619
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
620
    ptr = dt->base + intno * 8;
621
    e1 = ldl_kernel(ptr);
622
    e2 = ldl_kernel(ptr + 4);
623
    /* check gate type */
624
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
625
    switch(type) {
626
    case 5: /* task gate */
627
        /* must do that check here to return the correct error code */
628
        if (!(e2 & DESC_P_MASK))
629
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
630
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
631
        if (has_error_code) {
632
            int type;
633
            uint32_t mask;
634
            /* push the error code */
635
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
636
            shift = type >> 3;
637
            if (env->segs[R_SS].flags & DESC_B_MASK)
638
                mask = 0xffffffff;
639
            else
640
                mask = 0xffff;
641
            esp = (ESP - (2 << shift)) & mask;
642
            ssp = env->segs[R_SS].base + esp;
643
            if (shift)
644
                stl_kernel(ssp, error_code);
645
            else
646
                stw_kernel(ssp, error_code);
647
            SET_ESP(esp, mask);
648
        }
649
        return;
650
    case 6: /* 286 interrupt gate */
651
    case 7: /* 286 trap gate */
652
    case 14: /* 386 interrupt gate */
653
    case 15: /* 386 trap gate */
654
        break;
655
    default:
656
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
657
        break;
658
    }
659
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
660
    cpl = env->hflags & HF_CPL_MASK;
661
    /* check privledge if software int */
662
    if (is_int && dpl < cpl)
663
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
664
    /* check valid bit */
665
    if (!(e2 & DESC_P_MASK))
666
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
667
    selector = e1 >> 16;
668
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
669
    if ((selector & 0xfffc) == 0)
670
        raise_exception_err(EXCP0D_GPF, 0);
671

    
672
    if (load_segment(&e1, &e2, selector) != 0)
673
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
675
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
676
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
677
    if (dpl > cpl)
678
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
679
    if (!(e2 & DESC_P_MASK))
680
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
681
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
682
        /* to inner privilege */
683
        get_ss_esp_from_tss(&ss, &esp, dpl);
684
        if ((ss & 0xfffc) == 0)
685
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
686
        if ((ss & 3) != dpl)
687
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
689
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
690
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
691
        if (ss_dpl != dpl)
692
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
693
        if (!(ss_e2 & DESC_S_MASK) ||
694
            (ss_e2 & DESC_CS_MASK) ||
695
            !(ss_e2 & DESC_W_MASK))
696
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
697
        if (!(ss_e2 & DESC_P_MASK))
698
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699
        new_stack = 1;
700
        sp_mask = get_sp_mask(ss_e2);
701
        ssp = get_seg_base(ss_e1, ss_e2);
702
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
703
        /* to same privilege */
704
        if (env->eflags & VM_MASK)
705
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
706
        new_stack = 0;
707
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
708
        ssp = env->segs[R_SS].base;
709
        esp = ESP;
710
        dpl = cpl;
711
    } else {
712
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
713
        new_stack = 0; /* avoid warning */
714
        sp_mask = 0; /* avoid warning */
715
        ssp = 0; /* avoid warning */
716
        esp = 0; /* avoid warning */
717
    }
718

    
719
    shift = type >> 3;
720

    
721
#if 0
722
    /* XXX: check that enough room is available */
723
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
724
    if (env->eflags & VM_MASK)
725
        push_size += 8;
726
    push_size <<= shift;
727
#endif
728
    if (shift == 1) {
729
        if (new_stack) {
730
            if (env->eflags & VM_MASK) {
731
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
732
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
733
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
734
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
735
            }
736
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
737
            PUSHL(ssp, esp, sp_mask, ESP);
738
        }
739
        PUSHL(ssp, esp, sp_mask, compute_eflags());
740
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
741
        PUSHL(ssp, esp, sp_mask, old_eip);
742
        if (has_error_code) {
743
            PUSHL(ssp, esp, sp_mask, error_code);
744
        }
745
    } else {
746
        if (new_stack) {
747
            if (env->eflags & VM_MASK) {
748
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
749
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
750
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
751
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
752
            }
753
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
754
            PUSHW(ssp, esp, sp_mask, ESP);
755
        }
756
        PUSHW(ssp, esp, sp_mask, compute_eflags());
757
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
758
        PUSHW(ssp, esp, sp_mask, old_eip);
759
        if (has_error_code) {
760
            PUSHW(ssp, esp, sp_mask, error_code);
761
        }
762
    }
763
    
764
    if (new_stack) {
765
        if (env->eflags & VM_MASK) {
766
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
767
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
768
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
769
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
770
        }
771
        ss = (ss & ~3) | dpl;
772
        cpu_x86_load_seg_cache(env, R_SS, ss, 
773
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
774
    }
775
    SET_ESP(esp, sp_mask);
776

    
777
    selector = (selector & ~3) | dpl;
778
    cpu_x86_load_seg_cache(env, R_CS, selector, 
779
                   get_seg_base(e1, e2),
780
                   get_seg_limit(e1, e2),
781
                   e2);
782
    cpu_x86_set_cpl(env, dpl);
783
    env->eip = offset;
784

    
785
    /* interrupt gate clear IF mask */
786
    if ((type & 1) == 0) {
787
        env->eflags &= ~IF_MASK;
788
    }
789
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
790
}
791

    
792
#ifdef TARGET_X86_64
793

    
794
#define PUSHQ(sp, val)\
795
{\
796
    sp -= 8;\
797
    stq_kernel(sp, (val));\
798
}
799

    
800
#define POPQ(sp, val)\
801
{\
802
    val = ldq_kernel(sp);\
803
    sp += 8;\
804
}
805

    
806
static inline target_ulong get_rsp_from_tss(int level)
807
{
808
    int index;
809
    
810
#if 0
811
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
812
           env->tr.base, env->tr.limit);
813
#endif
814

    
815
    if (!(env->tr.flags & DESC_P_MASK))
816
        cpu_abort(env, "invalid tss");
817
    index = 8 * level + 4;
818
    if ((index + 7) > env->tr.limit)
819
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
820
    return ldq_kernel(env->tr.base + index);
821
}
822

    
823
/* 64 bit interrupt */
824
static void do_interrupt64(int intno, int is_int, int error_code,
825
                           target_ulong next_eip, int is_hw)
826
{
827
    SegmentCache *dt;
828
    target_ulong ptr;
829
    int type, dpl, selector, cpl, ist;
830
    int has_error_code, new_stack;
831
    uint32_t e1, e2, e3, ss;
832
    target_ulong old_eip, esp, offset;
833

    
834
    has_error_code = 0;
835
    if (!is_int && !is_hw) {
836
        switch(intno) {
837
        case 8:
838
        case 10:
839
        case 11:
840
        case 12:
841
        case 13:
842
        case 14:
843
        case 17:
844
            has_error_code = 1;
845
            break;
846
        }
847
    }
848
    if (is_int)
849
        old_eip = next_eip;
850
    else
851
        old_eip = env->eip;
852

    
853
    dt = &env->idt;
854
    if (intno * 16 + 15 > dt->limit)
855
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
856
    ptr = dt->base + intno * 16;
857
    e1 = ldl_kernel(ptr);
858
    e2 = ldl_kernel(ptr + 4);
859
    e3 = ldl_kernel(ptr + 8);
860
    /* check gate type */
861
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
862
    switch(type) {
863
    case 14: /* 386 interrupt gate */
864
    case 15: /* 386 trap gate */
865
        break;
866
    default:
867
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
868
        break;
869
    }
870
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
871
    cpl = env->hflags & HF_CPL_MASK;
872
    /* check privledge if software int */
873
    if (is_int && dpl < cpl)
874
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
875
    /* check valid bit */
876
    if (!(e2 & DESC_P_MASK))
877
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
878
    selector = e1 >> 16;
879
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
880
    ist = e2 & 7;
881
    if ((selector & 0xfffc) == 0)
882
        raise_exception_err(EXCP0D_GPF, 0);
883

    
884
    if (load_segment(&e1, &e2, selector) != 0)
885
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
886
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
887
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
888
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889
    if (dpl > cpl)
890
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
891
    if (!(e2 & DESC_P_MASK))
892
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
893
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
894
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
895
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
896
        /* to inner privilege */
897
        if (ist != 0)
898
            esp = get_rsp_from_tss(ist + 3);
899
        else
900
            esp = get_rsp_from_tss(dpl);
901
        esp &= ~0xfLL; /* align stack */
902
        ss = 0;
903
        new_stack = 1;
904
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
905
        /* to same privilege */
906
        if (env->eflags & VM_MASK)
907
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
        new_stack = 0;
909
        if (ist != 0)
910
            esp = get_rsp_from_tss(ist + 3);
911
        else
912
            esp = ESP;
913
        esp &= ~0xfLL; /* align stack */
914
        dpl = cpl;
915
    } else {
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
        new_stack = 0; /* avoid warning */
918
        esp = 0; /* avoid warning */
919
    }
920

    
921
    PUSHQ(esp, env->segs[R_SS].selector);
922
    PUSHQ(esp, ESP);
923
    PUSHQ(esp, compute_eflags());
924
    PUSHQ(esp, env->segs[R_CS].selector);
925
    PUSHQ(esp, old_eip);
926
    if (has_error_code) {
927
        PUSHQ(esp, error_code);
928
    }
929
    
930
    if (new_stack) {
931
        ss = 0 | dpl;
932
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
933
    }
934
    ESP = esp;
935

    
936
    selector = (selector & ~3) | dpl;
937
    cpu_x86_load_seg_cache(env, R_CS, selector, 
938
                   get_seg_base(e1, e2),
939
                   get_seg_limit(e1, e2),
940
                   e2);
941
    cpu_x86_set_cpl(env, dpl);
942
    env->eip = offset;
943

    
944
    /* interrupt gate clear IF mask */
945
    if ((type & 1) == 0) {
946
        env->eflags &= ~IF_MASK;
947
    }
948
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
949
}
950
#endif
951

    
952
void helper_syscall(int next_eip_addend)
953
{
954
    int selector;
955

    
956
    if (!(env->efer & MSR_EFER_SCE)) {
957
        raise_exception_err(EXCP06_ILLOP, 0);
958
    }
959
    selector = (env->star >> 32) & 0xffff;
960
#ifdef TARGET_X86_64
961
    if (env->hflags & HF_LMA_MASK) {
962
        int code64;
963

    
964
        ECX = env->eip + next_eip_addend;
965
        env->regs[11] = compute_eflags();
966
        
967
        code64 = env->hflags & HF_CS64_MASK;
968

    
969
        cpu_x86_set_cpl(env, 0);
970
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
971
                           0, 0xffffffff, 
972
                               DESC_G_MASK | DESC_P_MASK |
973
                               DESC_S_MASK |
974
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
975
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
976
                               0, 0xffffffff,
977
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
978
                               DESC_S_MASK |
979
                               DESC_W_MASK | DESC_A_MASK);
980
        env->eflags &= ~env->fmask;
981
        if (code64)
982
            env->eip = env->lstar;
983
        else
984
            env->eip = env->cstar;
985
    } else 
986
#endif
987
    {
988
        ECX = (uint32_t)(env->eip + next_eip_addend);
989
        
990
        cpu_x86_set_cpl(env, 0);
991
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
992
                           0, 0xffffffff, 
993
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
994
                               DESC_S_MASK |
995
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
996
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
997
                               0, 0xffffffff,
998
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
999
                               DESC_S_MASK |
1000
                               DESC_W_MASK | DESC_A_MASK);
1001
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1002
        env->eip = (uint32_t)env->star;
1003
    }
1004
}
1005

    
1006
void helper_sysret(int dflag)
1007
{
1008
    int cpl, selector;
1009

    
1010
    if (!(env->efer & MSR_EFER_SCE)) {
1011
        raise_exception_err(EXCP06_ILLOP, 0);
1012
    }
1013
    cpl = env->hflags & HF_CPL_MASK;
1014
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1015
        raise_exception_err(EXCP0D_GPF, 0);
1016
    }
1017
    selector = (env->star >> 48) & 0xffff;
1018
#ifdef TARGET_X86_64
1019
    if (env->hflags & HF_LMA_MASK) {
1020
        if (dflag == 2) {
1021
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 
1022
                                   0, 0xffffffff, 
1023
                                   DESC_G_MASK | DESC_P_MASK |
1024
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1025
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
1026
                                   DESC_L_MASK);
1027
            env->eip = ECX;
1028
        } else {
1029
            cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1030
                                   0, 0xffffffff, 
1031
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1033
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034
            env->eip = (uint32_t)ECX;
1035
        }
1036
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1037
                               0, 0xffffffff,
1038
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1040
                               DESC_W_MASK | DESC_A_MASK);
1041
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 
1042
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1043
        cpu_x86_set_cpl(env, 3);
1044
    } else 
1045
#endif
1046
    {
1047
        cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1048
                               0, 0xffffffff, 
1049
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1051
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1052
        env->eip = (uint32_t)ECX;
1053
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1054
                               0, 0xffffffff,
1055
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057
                               DESC_W_MASK | DESC_A_MASK);
1058
        env->eflags |= IF_MASK;
1059
        cpu_x86_set_cpl(env, 3);
1060
    }
1061
#ifdef USE_KQEMU
1062
    if (kqemu_is_ok(env)) {
1063
        if (env->hflags & HF_LMA_MASK)
1064
            CC_OP = CC_OP_EFLAGS;
1065
        env->exception_index = -1;
1066
        cpu_loop_exit();
1067
    }
1068
#endif
1069
}
1070

    
1071
/* real mode interrupt */
1072
static void do_interrupt_real(int intno, int is_int, int error_code,
1073
                              unsigned int next_eip)
1074
{
1075
    SegmentCache *dt;
1076
    target_ulong ptr, ssp;
1077
    int selector;
1078
    uint32_t offset, esp;
1079
    uint32_t old_cs, old_eip;
1080

    
1081
    /* real mode (simpler !) */
1082
    dt = &env->idt;
1083
    if (intno * 4 + 3 > dt->limit)
1084
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1085
    ptr = dt->base + intno * 4;
1086
    offset = lduw_kernel(ptr);
1087
    selector = lduw_kernel(ptr + 2);
1088
    esp = ESP;
1089
    ssp = env->segs[R_SS].base;
1090
    if (is_int)
1091
        old_eip = next_eip;
1092
    else
1093
        old_eip = env->eip;
1094
    old_cs = env->segs[R_CS].selector;
1095
    /* XXX: use SS segment size ? */
1096
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1097
    PUSHW(ssp, esp, 0xffff, old_cs);
1098
    PUSHW(ssp, esp, 0xffff, old_eip);
1099
    
1100
    /* update processor state */
1101
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1102
    env->eip = offset;
1103
    env->segs[R_CS].selector = selector;
1104
    env->segs[R_CS].base = (selector << 4);
1105
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1106
}
1107

    
1108
/* fake user mode interrupt */
1109
void do_interrupt_user(int intno, int is_int, int error_code, 
1110
                       target_ulong next_eip)
1111
{
1112
    SegmentCache *dt;
1113
    target_ulong ptr;
1114
    int dpl, cpl;
1115
    uint32_t e2;
1116

    
1117
    dt = &env->idt;
1118
    ptr = dt->base + (intno * 8);
1119
    e2 = ldl_kernel(ptr + 4);
1120
    
1121
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122
    cpl = env->hflags & HF_CPL_MASK;
1123
    /* check privledge if software int */
1124
    if (is_int && dpl < cpl)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126

    
1127
    /* Since we emulate only user space, we cannot do more than
1128
       exiting the emulation with the suitable exception and error
1129
       code */
1130
    if (is_int)
1131
        EIP = next_eip;
1132
}
1133

    
1134
/*
1135
 * Begin execution of an interruption. is_int is TRUE if coming from
1136
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1137
 * instruction. It is only relevant if is_int is TRUE.  
1138
 */
1139
void do_interrupt(int intno, int is_int, int error_code, 
1140
                  target_ulong next_eip, int is_hw)
1141
{
1142
    if (loglevel & CPU_LOG_INT) {
1143
        if ((env->cr[0] & CR0_PE_MASK)) {
1144
            static int count;
1145
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1146
                    count, intno, error_code, is_int,
1147
                    env->hflags & HF_CPL_MASK,
1148
                    env->segs[R_CS].selector, EIP,
1149
                    (int)env->segs[R_CS].base + EIP,
1150
                    env->segs[R_SS].selector, ESP);
1151
            if (intno == 0x0e) {
1152
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1153
            } else {
1154
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1155
            }
1156
            fprintf(logfile, "\n");
1157
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1158
#if 0
1159
            {
1160
                int i;
1161
                uint8_t *ptr;
1162
                fprintf(logfile, "       code=");
1163
                ptr = env->segs[R_CS].base + env->eip;
1164
                for(i = 0; i < 16; i++) {
1165
                    fprintf(logfile, " %02x", ldub(ptr + i));
1166
                }
1167
                fprintf(logfile, "\n");
1168
            }
1169
#endif
1170
            count++;
1171
        }
1172
    }
1173
    if (env->cr[0] & CR0_PE_MASK) {
1174
#if TARGET_X86_64
1175
        if (env->hflags & HF_LMA_MASK) {
1176
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1177
        } else
1178
#endif
1179
        {
1180
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1181
        }
1182
    } else {
1183
        do_interrupt_real(intno, is_int, error_code, next_eip);
1184
    }
1185
}
1186

    
1187
/*
1188
 * Check nested exceptions and change to double or triple fault if
1189
 * needed. It should only be called, if this is not an interrupt.
1190
 * Returns the new exception number.
1191
 */
1192
int check_exception(int intno, int *error_code)
1193
{
1194
    char first_contributory = env->old_exception == 0 ||
1195
                              (env->old_exception >= 10 &&
1196
                               env->old_exception <= 13);
1197
    char second_contributory = intno == 0 ||
1198
                               (intno >= 10 && intno <= 13);
1199

    
1200
    if (loglevel & CPU_LOG_INT)
1201
        fprintf(logfile, "check_exception old: %x new %x\n",
1202
                env->old_exception, intno);
1203

    
1204
    if (env->old_exception == EXCP08_DBLE)
1205
        cpu_abort(env, "triple fault");
1206

    
1207
    if ((first_contributory && second_contributory)
1208
        || (env->old_exception == EXCP0E_PAGE &&
1209
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1210
        intno = EXCP08_DBLE;
1211
        *error_code = 0;
1212
    }
1213

    
1214
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1215
        (intno == EXCP08_DBLE))
1216
        env->old_exception = intno;
1217

    
1218
    return intno;
1219
}
1220

    
1221
/*
1222
 * Signal an interruption. It is executed in the main CPU loop.
1223
 * is_int is TRUE if coming from the int instruction. next_eip is the
1224
 * EIP value AFTER the interrupt instruction. It is only relevant if
1225
 * is_int is TRUE.  
1226
 */
1227
void raise_interrupt(int intno, int is_int, int error_code, 
1228
                     int next_eip_addend)
1229
{
1230
    if (!is_int)
1231
        intno = check_exception(intno, &error_code);
1232

    
1233
    env->exception_index = intno;
1234
    env->error_code = error_code;
1235
    env->exception_is_int = is_int;
1236
    env->exception_next_eip = env->eip + next_eip_addend;
1237
    cpu_loop_exit();
1238
}
1239

    
1240
/* same as raise_exception_err, but do not restore global registers */
1241
static void raise_exception_err_norestore(int exception_index, int error_code)
1242
{
1243
    exception_index = check_exception(exception_index, &error_code);
1244

    
1245
    env->exception_index = exception_index;
1246
    env->error_code = error_code;
1247
    env->exception_is_int = 0;
1248
    env->exception_next_eip = 0;
1249
    longjmp(env->jmp_env, 1);
1250
}
1251

    
1252
/* shortcuts to generate exceptions */
1253

    
1254
void (raise_exception_err)(int exception_index, int error_code)
1255
{
1256
    raise_interrupt(exception_index, 0, error_code, 0);
1257
}
1258

    
1259
void raise_exception(int exception_index)
1260
{
1261
    raise_interrupt(exception_index, 0, 0, 0);
1262
}
1263

    
1264
/* SMM support */
1265

    
1266
#if defined(CONFIG_USER_ONLY) 
1267

    
1268
void do_smm_enter(void)
1269
{
1270
}
1271

    
1272
void helper_rsm(void)
1273
{
1274
}
1275

    
1276
#else
1277

    
1278
#ifdef TARGET_X86_64
1279
#define SMM_REVISION_ID 0x00020064
1280
#else
1281
#define SMM_REVISION_ID 0x00020000
1282
#endif
1283

    
1284
void do_smm_enter(void)
1285
{
1286
    target_ulong sm_state;
1287
    SegmentCache *dt;
1288
    int i, offset;
1289

    
1290
    if (loglevel & CPU_LOG_INT) {
1291
        fprintf(logfile, "SMM: enter\n");
1292
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1293
    }
1294

    
1295
    env->hflags |= HF_SMM_MASK;
1296
    cpu_smm_update(env);
1297

    
1298
    sm_state = env->smbase + 0x8000;
1299
    
1300
#ifdef TARGET_X86_64
1301
    for(i = 0; i < 6; i++) {
1302
        dt = &env->segs[i];
1303
        offset = 0x7e00 + i * 16;
1304
        stw_phys(sm_state + offset, dt->selector);
1305
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1306
        stl_phys(sm_state + offset + 4, dt->limit);
1307
        stq_phys(sm_state + offset + 8, dt->base);
1308
    }
1309

    
1310
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1311
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1312

    
1313
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1314
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1315
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1316
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1317
    
1318
    stq_phys(sm_state + 0x7e88, env->idt.base);
1319
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1320

    
1321
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1322
    stq_phys(sm_state + 0x7e98, env->tr.base);
1323
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1324
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1325
    
1326
    stq_phys(sm_state + 0x7ed0, env->efer);
1327

    
1328
    stq_phys(sm_state + 0x7ff8, EAX);
1329
    stq_phys(sm_state + 0x7ff0, ECX);
1330
    stq_phys(sm_state + 0x7fe8, EDX);
1331
    stq_phys(sm_state + 0x7fe0, EBX);
1332
    stq_phys(sm_state + 0x7fd8, ESP);
1333
    stq_phys(sm_state + 0x7fd0, EBP);
1334
    stq_phys(sm_state + 0x7fc8, ESI);
1335
    stq_phys(sm_state + 0x7fc0, EDI);
1336
    for(i = 8; i < 16; i++) 
1337
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1338
    stq_phys(sm_state + 0x7f78, env->eip);
1339
    stl_phys(sm_state + 0x7f70, compute_eflags());
1340
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1341
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1342

    
1343
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1344
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1345
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1346

    
1347
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1348
    stl_phys(sm_state + 0x7f00, env->smbase);
1349
#else
1350
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1351
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1352
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1353
    stl_phys(sm_state + 0x7ff0, env->eip);
1354
    stl_phys(sm_state + 0x7fec, EDI);
1355
    stl_phys(sm_state + 0x7fe8, ESI);
1356
    stl_phys(sm_state + 0x7fe4, EBP);
1357
    stl_phys(sm_state + 0x7fe0, ESP);
1358
    stl_phys(sm_state + 0x7fdc, EBX);
1359
    stl_phys(sm_state + 0x7fd8, EDX);
1360
    stl_phys(sm_state + 0x7fd4, ECX);
1361
    stl_phys(sm_state + 0x7fd0, EAX);
1362
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1363
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1364
    
1365
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1366
    stl_phys(sm_state + 0x7f64, env->tr.base);
1367
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1368
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1369
    
1370
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1371
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1372
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1373
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1374
    
1375
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1376
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1377

    
1378
    stl_phys(sm_state + 0x7f58, env->idt.base);
1379
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1380

    
1381
    for(i = 0; i < 6; i++) {
1382
        dt = &env->segs[i];
1383
        if (i < 3)
1384
            offset = 0x7f84 + i * 12;
1385
        else
1386
            offset = 0x7f2c + (i - 3) * 12;
1387
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1388
        stl_phys(sm_state + offset + 8, dt->base);
1389
        stl_phys(sm_state + offset + 4, dt->limit);
1390
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1391
    }
1392
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1393

    
1394
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1395
    stl_phys(sm_state + 0x7ef8, env->smbase);
1396
#endif
1397
    /* init SMM cpu state */
1398

    
1399
#ifdef TARGET_X86_64
1400
    env->efer = 0;
1401
    env->hflags &= ~HF_LMA_MASK;
1402
#endif
1403
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1404
    env->eip = 0x00008000;
1405
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1406
                           0xffffffff, 0);
1407
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1408
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1409
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1410
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1411
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1412
    
1413
    cpu_x86_update_cr0(env, 
1414
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1415
    cpu_x86_update_cr4(env, 0);
1416
    env->dr[7] = 0x00000400;
1417
    CC_OP = CC_OP_EFLAGS;
1418
}
1419

    
1420
void helper_rsm(void)
1421
{
1422
    target_ulong sm_state;
1423
    int i, offset;
1424
    uint32_t val;
1425

    
1426
    sm_state = env->smbase + 0x8000;
1427
#ifdef TARGET_X86_64
1428
    env->efer = ldq_phys(sm_state + 0x7ed0);
1429
    if (env->efer & MSR_EFER_LMA)
1430
        env->hflags |= HF_LMA_MASK;
1431
    else
1432
        env->hflags &= ~HF_LMA_MASK;
1433

    
1434
    for(i = 0; i < 6; i++) {
1435
        offset = 0x7e00 + i * 16;
1436
        cpu_x86_load_seg_cache(env, i, 
1437
                               lduw_phys(sm_state + offset),
1438
                               ldq_phys(sm_state + offset + 8),
1439
                               ldl_phys(sm_state + offset + 4),
1440
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1441
    }
1442

    
1443
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1444
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1445

    
1446
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1447
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1448
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1449
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1450
    
1451
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1452
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1453

    
1454
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1455
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1456
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1457
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1458
    
1459
    EAX = ldq_phys(sm_state + 0x7ff8);
1460
    ECX = ldq_phys(sm_state + 0x7ff0);
1461
    EDX = ldq_phys(sm_state + 0x7fe8);
1462
    EBX = ldq_phys(sm_state + 0x7fe0);
1463
    ESP = ldq_phys(sm_state + 0x7fd8);
1464
    EBP = ldq_phys(sm_state + 0x7fd0);
1465
    ESI = ldq_phys(sm_state + 0x7fc8);
1466
    EDI = ldq_phys(sm_state + 0x7fc0);
1467
    for(i = 8; i < 16; i++) 
1468
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1469
    env->eip = ldq_phys(sm_state + 0x7f78);
1470
    load_eflags(ldl_phys(sm_state + 0x7f70), 
1471
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1472
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1473
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1474

    
1475
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1476
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1477
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1478

    
1479
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1480
    if (val & 0x20000) {
1481
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1482
    }
1483
#else
1484
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1485
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1486
    load_eflags(ldl_phys(sm_state + 0x7ff4), 
1487
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1488
    env->eip = ldl_phys(sm_state + 0x7ff0);
1489
    EDI = ldl_phys(sm_state + 0x7fec);
1490
    ESI = ldl_phys(sm_state + 0x7fe8);
1491
    EBP = ldl_phys(sm_state + 0x7fe4);
1492
    ESP = ldl_phys(sm_state + 0x7fe0);
1493
    EBX = ldl_phys(sm_state + 0x7fdc);
1494
    EDX = ldl_phys(sm_state + 0x7fd8);
1495
    ECX = ldl_phys(sm_state + 0x7fd4);
1496
    EAX = ldl_phys(sm_state + 0x7fd0);
1497
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1498
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1499
    
1500
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1501
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1502
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1503
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1504
    
1505
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1506
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1507
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1508
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1509
    
1510
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1511
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1512

    
1513
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1514
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1515

    
1516
    for(i = 0; i < 6; i++) {
1517
        if (i < 3)
1518
            offset = 0x7f84 + i * 12;
1519
        else
1520
            offset = 0x7f2c + (i - 3) * 12;
1521
        cpu_x86_load_seg_cache(env, i, 
1522
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1523
                               ldl_phys(sm_state + offset + 8),
1524
                               ldl_phys(sm_state + offset + 4),
1525
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1526
    }
1527
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1528

    
1529
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1530
    if (val & 0x20000) {
1531
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1532
    }
1533
#endif
1534
    CC_OP = CC_OP_EFLAGS;
1535
    env->hflags &= ~HF_SMM_MASK;
1536
    cpu_smm_update(env);
1537

    
1538
    if (loglevel & CPU_LOG_INT) {
1539
        fprintf(logfile, "SMM: after RSM\n");
1540
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1541
    }
1542
}
1543

    
1544
#endif /* !CONFIG_USER_ONLY */
1545

    
1546

    
1547
#ifdef BUGGY_GCC_DIV64
1548
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1549
   call it from another function */
1550
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1551
{
1552
    *q_ptr = num / den;
1553
    return num % den;
1554
}
1555

    
1556
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1557
{
1558
    *q_ptr = num / den;
1559
    return num % den;
1560
}
1561
#endif
1562

    
1563
void helper_divl_EAX_T0(void)
1564
{
1565
    unsigned int den, r;
1566
    uint64_t num, q;
1567
    
1568
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1569
    den = T0;
1570
    if (den == 0) {
1571
        raise_exception(EXCP00_DIVZ);
1572
    }
1573
#ifdef BUGGY_GCC_DIV64
1574
    r = div32(&q, num, den);
1575
#else
1576
    q = (num / den);
1577
    r = (num % den);
1578
#endif
1579
    if (q > 0xffffffff)
1580
        raise_exception(EXCP00_DIVZ);
1581
    EAX = (uint32_t)q;
1582
    EDX = (uint32_t)r;
1583
}
1584

    
1585
void helper_idivl_EAX_T0(void)
1586
{
1587
    int den, r;
1588
    int64_t num, q;
1589
    
1590
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1591
    den = T0;
1592
    if (den == 0) {
1593
        raise_exception(EXCP00_DIVZ);
1594
    }
1595
#ifdef BUGGY_GCC_DIV64
1596
    r = idiv32(&q, num, den);
1597
#else
1598
    q = (num / den);
1599
    r = (num % den);
1600
#endif
1601
    if (q != (int32_t)q)
1602
        raise_exception(EXCP00_DIVZ);
1603
    EAX = (uint32_t)q;
1604
    EDX = (uint32_t)r;
1605
}
1606

    
1607
void helper_cmpxchg8b(void)
1608
{
1609
    uint64_t d;
1610
    int eflags;
1611

    
1612
    eflags = cc_table[CC_OP].compute_all();
1613
    d = ldq(A0);
1614
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1615
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1616
        eflags |= CC_Z;
1617
    } else {
1618
        EDX = d >> 32;
1619
        EAX = d;
1620
        eflags &= ~CC_Z;
1621
    }
1622
    CC_SRC = eflags;
1623
}
1624

    
1625
void helper_cpuid(void)
1626
{
1627
    uint32_t index;
1628
    index = (uint32_t)EAX;
1629
    
1630
    /* test if maximum index reached */
1631
    if (index & 0x80000000) {
1632
        if (index > env->cpuid_xlevel) 
1633
            index = env->cpuid_level;
1634
    } else {
1635
        if (index > env->cpuid_level) 
1636
            index = env->cpuid_level;
1637
    }
1638
        
1639
    switch(index) {
1640
    case 0:
1641
        EAX = env->cpuid_level;
1642
        EBX = env->cpuid_vendor1;
1643
        EDX = env->cpuid_vendor2;
1644
        ECX = env->cpuid_vendor3;
1645
        break;
1646
    case 1:
1647
        EAX = env->cpuid_version;
1648
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1649
        ECX = env->cpuid_ext_features;
1650
        EDX = env->cpuid_features;
1651
        break;
1652
    case 2:
1653
        /* cache info: needed for Pentium Pro compatibility */
1654
        EAX = 0x410601;
1655
        EBX = 0;
1656
        ECX = 0;
1657
        EDX = 0;
1658
        break;
1659
    case 0x80000000:
1660
        EAX = env->cpuid_xlevel;
1661
        EBX = env->cpuid_vendor1;
1662
        EDX = env->cpuid_vendor2;
1663
        ECX = env->cpuid_vendor3;
1664
        break;
1665
    case 0x80000001:
1666
        EAX = env->cpuid_features;
1667
        EBX = 0;
1668
        ECX = 0;
1669
        EDX = env->cpuid_ext2_features;
1670
        break;
1671
    case 0x80000002:
1672
    case 0x80000003:
1673
    case 0x80000004:
1674
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1675
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1676
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1677
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1678
        break;
1679
    case 0x80000005:
1680
        /* cache info (L1 cache) */
1681
        EAX = 0x01ff01ff;
1682
        EBX = 0x01ff01ff;
1683
        ECX = 0x40020140;
1684
        EDX = 0x40020140;
1685
        break;
1686
    case 0x80000006:
1687
        /* cache info (L2 cache) */
1688
        EAX = 0;
1689
        EBX = 0x42004200;
1690
        ECX = 0x02008140;
1691
        EDX = 0;
1692
        break;
1693
    case 0x80000008:
1694
        /* virtual & phys address size in low 2 bytes. */
1695
        EAX = 0x00003028;
1696
        EBX = 0;
1697
        ECX = 0;
1698
        EDX = 0;
1699
        break;
1700
    default:
1701
        /* reserved values: zero */
1702
        EAX = 0;
1703
        EBX = 0;
1704
        ECX = 0;
1705
        EDX = 0;
1706
        break;
1707
    }
1708
}
1709

    
1710
void helper_enter_level(int level, int data32)
1711
{
1712
    target_ulong ssp;
1713
    uint32_t esp_mask, esp, ebp;
1714

    
1715
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1716
    ssp = env->segs[R_SS].base;
1717
    ebp = EBP;
1718
    esp = ESP;
1719
    if (data32) {
1720
        /* 32 bit */
1721
        esp -= 4;
1722
        while (--level) {
1723
            esp -= 4;
1724
            ebp -= 4;
1725
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1726
        }
1727
        esp -= 4;
1728
        stl(ssp + (esp & esp_mask), T1);
1729
    } else {
1730
        /* 16 bit */
1731
        esp -= 2;
1732
        while (--level) {
1733
            esp -= 2;
1734
            ebp -= 2;
1735
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1736
        }
1737
        esp -= 2;
1738
        stw(ssp + (esp & esp_mask), T1);
1739
    }
1740
}
1741

    
1742
#ifdef TARGET_X86_64
1743
void helper_enter64_level(int level, int data64)
1744
{
1745
    target_ulong esp, ebp;
1746
    ebp = EBP;
1747
    esp = ESP;
1748

    
1749
    if (data64) {
1750
        /* 64 bit */
1751
        esp -= 8;
1752
        while (--level) {
1753
            esp -= 8;
1754
            ebp -= 8;
1755
            stq(esp, ldq(ebp));
1756
        }
1757
        esp -= 8;
1758
        stq(esp, T1);
1759
    } else {
1760
        /* 16 bit */
1761
        esp -= 2;
1762
        while (--level) {
1763
            esp -= 2;
1764
            ebp -= 2;
1765
            stw(esp, lduw(ebp));
1766
        }
1767
        esp -= 2;
1768
        stw(esp, T1);
1769
    }
1770
}
1771
#endif
1772

    
1773
void helper_lldt_T0(void)
1774
{
1775
    int selector;
1776
    SegmentCache *dt;
1777
    uint32_t e1, e2;
1778
    int index, entry_limit;
1779
    target_ulong ptr;
1780
    
1781
    selector = T0 & 0xffff;
1782
    if ((selector & 0xfffc) == 0) {
1783
        /* XXX: NULL selector case: invalid LDT */
1784
        env->ldt.base = 0;
1785
        env->ldt.limit = 0;
1786
    } else {
1787
        if (selector & 0x4)
1788
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1789
        dt = &env->gdt;
1790
        index = selector & ~7;
1791
#ifdef TARGET_X86_64
1792
        if (env->hflags & HF_LMA_MASK)
1793
            entry_limit = 15;
1794
        else
1795
#endif            
1796
            entry_limit = 7;
1797
        if ((index + entry_limit) > dt->limit)
1798
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1799
        ptr = dt->base + index;
1800
        e1 = ldl_kernel(ptr);
1801
        e2 = ldl_kernel(ptr + 4);
1802
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1803
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1804
        if (!(e2 & DESC_P_MASK))
1805
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1806
#ifdef TARGET_X86_64
1807
        if (env->hflags & HF_LMA_MASK) {
1808
            uint32_t e3;
1809
            e3 = ldl_kernel(ptr + 8);
1810
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1811
            env->ldt.base |= (target_ulong)e3 << 32;
1812
        } else
1813
#endif
1814
        {
1815
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1816
        }
1817
    }
1818
    env->ldt.selector = selector;
1819
}
1820

    
1821
void helper_ltr_T0(void)
1822
{
1823
    int selector;
1824
    SegmentCache *dt;
1825
    uint32_t e1, e2;
1826
    int index, type, entry_limit;
1827
    target_ulong ptr;
1828
    
1829
    selector = T0 & 0xffff;
1830
    if ((selector & 0xfffc) == 0) {
1831
        /* NULL selector case: invalid TR */
1832
        env->tr.base = 0;
1833
        env->tr.limit = 0;
1834
        env->tr.flags = 0;
1835
    } else {
1836
        if (selector & 0x4)
1837
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1838
        dt = &env->gdt;
1839
        index = selector & ~7;
1840
#ifdef TARGET_X86_64
1841
        if (env->hflags & HF_LMA_MASK)
1842
            entry_limit = 15;
1843
        else
1844
#endif            
1845
            entry_limit = 7;
1846
        if ((index + entry_limit) > dt->limit)
1847
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1848
        ptr = dt->base + index;
1849
        e1 = ldl_kernel(ptr);
1850
        e2 = ldl_kernel(ptr + 4);
1851
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1852
        if ((e2 & DESC_S_MASK) || 
1853
            (type != 1 && type != 9))
1854
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1855
        if (!(e2 & DESC_P_MASK))
1856
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1857
#ifdef TARGET_X86_64
1858
        if (env->hflags & HF_LMA_MASK) {
1859
            uint32_t e3, e4;
1860
            e3 = ldl_kernel(ptr + 8);
1861
            e4 = ldl_kernel(ptr + 12);
1862
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1863
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1864
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1865
            env->tr.base |= (target_ulong)e3 << 32;
1866
        } else 
1867
#endif
1868
        {
1869
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1870
        }
1871
        e2 |= DESC_TSS_BUSY_MASK;
1872
        stl_kernel(ptr + 4, e2);
1873
    }
1874
    env->tr.selector = selector;
1875
}
1876

    
1877
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1878
void load_seg(int seg_reg, int selector)
1879
{
1880
    uint32_t e1, e2;
1881
    int cpl, dpl, rpl;
1882
    SegmentCache *dt;
1883
    int index;
1884
    target_ulong ptr;
1885

    
1886
    selector &= 0xffff;
1887
    cpl = env->hflags & HF_CPL_MASK;
1888
    if ((selector & 0xfffc) == 0) {
1889
        /* null selector case */
1890
        if (seg_reg == R_SS
1891
#ifdef TARGET_X86_64
1892
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1893
#endif
1894
            )
1895
            raise_exception_err(EXCP0D_GPF, 0);
1896
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1897
    } else {
1898
        
1899
        if (selector & 0x4)
1900
            dt = &env->ldt;
1901
        else
1902
            dt = &env->gdt;
1903
        index = selector & ~7;
1904
        if ((index + 7) > dt->limit)
1905
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1906
        ptr = dt->base + index;
1907
        e1 = ldl_kernel(ptr);
1908
        e2 = ldl_kernel(ptr + 4);
1909
        
1910
        if (!(e2 & DESC_S_MASK))
1911
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1912
        rpl = selector & 3;
1913
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1914
        if (seg_reg == R_SS) {
1915
            /* must be writable segment */
1916
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1917
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1918
            if (rpl != cpl || dpl != cpl)
1919
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1920
        } else {
1921
            /* must be readable segment */
1922
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1923
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1924
            
1925
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1926
                /* if not conforming code, test rights */
1927
                if (dpl < cpl || dpl < rpl) 
1928
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1929
            }
1930
        }
1931

    
1932
        if (!(e2 & DESC_P_MASK)) {
1933
            if (seg_reg == R_SS)
1934
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1935
            else
1936
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1937
        }
1938

    
1939
        /* set the access bit if not already set */
1940
        if (!(e2 & DESC_A_MASK)) {
1941
            e2 |= DESC_A_MASK;
1942
            stl_kernel(ptr + 4, e2);
1943
        }
1944

    
1945
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1946
                       get_seg_base(e1, e2),
1947
                       get_seg_limit(e1, e2),
1948
                       e2);
1949
#if 0
1950
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1951
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1952
#endif
1953
    }
1954
}
1955

    
1956
/* protected mode jump */
1957
void helper_ljmp_protected_T0_T1(int next_eip_addend)
1958
{
1959
    int new_cs, gate_cs, type;
1960
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1961
    target_ulong new_eip, next_eip;
1962
    
1963
    new_cs = T0;
1964
    new_eip = T1;
1965
    if ((new_cs & 0xfffc) == 0)
1966
        raise_exception_err(EXCP0D_GPF, 0);
1967
    if (load_segment(&e1, &e2, new_cs) != 0)
1968
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1969
    cpl = env->hflags & HF_CPL_MASK;
1970
    if (e2 & DESC_S_MASK) {
1971
        if (!(e2 & DESC_CS_MASK))
1972
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1973
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1974
        if (e2 & DESC_C_MASK) {
1975
            /* conforming code segment */
1976
            if (dpl > cpl)
1977
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1978
        } else {
1979
            /* non conforming code segment */
1980
            rpl = new_cs & 3;
1981
            if (rpl > cpl)
1982
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1983
            if (dpl != cpl)
1984
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1985
        }
1986
        if (!(e2 & DESC_P_MASK))
1987
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1988
        limit = get_seg_limit(e1, e2);
1989
        if (new_eip > limit && 
1990
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
1991
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1992
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1993
                       get_seg_base(e1, e2), limit, e2);
1994
        EIP = new_eip;
1995
    } else {
1996
        /* jump to call or task gate */
1997
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1998
        rpl = new_cs & 3;
1999
        cpl = env->hflags & HF_CPL_MASK;
2000
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2001
        switch(type) {
2002
        case 1: /* 286 TSS */
2003
        case 9: /* 386 TSS */
2004
        case 5: /* task gate */
2005
            if (dpl < cpl || dpl < rpl)
2006
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2007
            next_eip = env->eip + next_eip_addend;
2008
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2009
            CC_OP = CC_OP_EFLAGS;
2010
            break;
2011
        case 4: /* 286 call gate */
2012
        case 12: /* 386 call gate */
2013
            if ((dpl < cpl) || (dpl < rpl))
2014
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2015
            if (!(e2 & DESC_P_MASK))
2016
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2017
            gate_cs = e1 >> 16;
2018
            new_eip = (e1 & 0xffff);
2019
            if (type == 12)
2020
                new_eip |= (e2 & 0xffff0000);
2021
            if (load_segment(&e1, &e2, gate_cs) != 0)
2022
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2023
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2024
            /* must be code segment */
2025
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
2026
                 (DESC_S_MASK | DESC_CS_MASK)))
2027
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2028
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 
2029
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2030
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2031
            if (!(e2 & DESC_P_MASK))
2032
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2033
            limit = get_seg_limit(e1, e2);
2034
            if (new_eip > limit)
2035
                raise_exception_err(EXCP0D_GPF, 0);
2036
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2037
                                   get_seg_base(e1, e2), limit, e2);
2038
            EIP = new_eip;
2039
            break;
2040
        default:
2041
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2042
            break;
2043
        }
2044
    }
2045
}
2046

    
2047
/* real mode call */
2048
void helper_lcall_real_T0_T1(int shift, int next_eip)
2049
{
2050
    int new_cs, new_eip;
2051
    uint32_t esp, esp_mask;
2052
    target_ulong ssp;
2053

    
2054
    new_cs = T0;
2055
    new_eip = T1;
2056
    esp = ESP;
2057
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2058
    ssp = env->segs[R_SS].base;
2059
    if (shift) {
2060
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2061
        PUSHL(ssp, esp, esp_mask, next_eip);
2062
    } else {
2063
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2064
        PUSHW(ssp, esp, esp_mask, next_eip);
2065
    }
2066

    
2067
    SET_ESP(esp, esp_mask);
2068
    env->eip = new_eip;
2069
    env->segs[R_CS].selector = new_cs;
2070
    env->segs[R_CS].base = (new_cs << 4);
2071
}
2072

    
2073
/* protected mode call */
2074
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2075
{
2076
    int new_cs, new_stack, i;
2077
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2078
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2079
    uint32_t val, limit, old_sp_mask;
2080
    target_ulong ssp, old_ssp, next_eip, new_eip;
2081
    
2082
    new_cs = T0;
2083
    new_eip = T1;
2084
    next_eip = env->eip + next_eip_addend;
2085
#ifdef DEBUG_PCALL
2086
    if (loglevel & CPU_LOG_PCALL) {
2087
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2088
                new_cs, (uint32_t)new_eip, shift);
2089
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2090
    }
2091
#endif
2092
    if ((new_cs & 0xfffc) == 0)
2093
        raise_exception_err(EXCP0D_GPF, 0);
2094
    if (load_segment(&e1, &e2, new_cs) != 0)
2095
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2096
    cpl = env->hflags & HF_CPL_MASK;
2097
#ifdef DEBUG_PCALL
2098
    if (loglevel & CPU_LOG_PCALL) {
2099
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2100
    }
2101
#endif
2102
    if (e2 & DESC_S_MASK) {
2103
        if (!(e2 & DESC_CS_MASK))
2104
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2105
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2106
        if (e2 & DESC_C_MASK) {
2107
            /* conforming code segment */
2108
            if (dpl > cpl)
2109
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2110
        } else {
2111
            /* non conforming code segment */
2112
            rpl = new_cs & 3;
2113
            if (rpl > cpl)
2114
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2115
            if (dpl != cpl)
2116
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2117
        }
2118
        if (!(e2 & DESC_P_MASK))
2119
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2120

    
2121
#ifdef TARGET_X86_64
2122
        /* XXX: check 16/32 bit cases in long mode */
2123
        if (shift == 2) {
2124
            target_ulong rsp;
2125
            /* 64 bit case */
2126
            rsp = ESP;
2127
            PUSHQ(rsp, env->segs[R_CS].selector);
2128
            PUSHQ(rsp, next_eip);
2129
            /* from this point, not restartable */
2130
            ESP = rsp;
2131
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2132
                                   get_seg_base(e1, e2), 
2133
                                   get_seg_limit(e1, e2), e2);
2134
            EIP = new_eip;
2135
        } else 
2136
#endif
2137
        {
2138
            sp = ESP;
2139
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2140
            ssp = env->segs[R_SS].base;
2141
            if (shift) {
2142
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2143
                PUSHL(ssp, sp, sp_mask, next_eip);
2144
            } else {
2145
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2146
                PUSHW(ssp, sp, sp_mask, next_eip);
2147
            }
2148
            
2149
            limit = get_seg_limit(e1, e2);
2150
            if (new_eip > limit)
2151
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2152
            /* from this point, not restartable */
2153
            SET_ESP(sp, sp_mask);
2154
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2155
                                   get_seg_base(e1, e2), limit, e2);
2156
            EIP = new_eip;
2157
        }
2158
    } else {
2159
        /* check gate type */
2160
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2161
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2162
        rpl = new_cs & 3;
2163
        switch(type) {
2164
        case 1: /* available 286 TSS */
2165
        case 9: /* available 386 TSS */
2166
        case 5: /* task gate */
2167
            if (dpl < cpl || dpl < rpl)
2168
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2170
            CC_OP = CC_OP_EFLAGS;
2171
            return;
2172
        case 4: /* 286 call gate */
2173
        case 12: /* 386 call gate */
2174
            break;
2175
        default:
2176
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2177
            break;
2178
        }
2179
        shift = type >> 3;
2180

    
2181
        if (dpl < cpl || dpl < rpl)
2182
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183
        /* check valid bit */
2184
        if (!(e2 & DESC_P_MASK))
2185
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2186
        selector = e1 >> 16;
2187
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2188
        param_count = e2 & 0x1f;
2189
        if ((selector & 0xfffc) == 0)
2190
            raise_exception_err(EXCP0D_GPF, 0);
2191

    
2192
        if (load_segment(&e1, &e2, selector) != 0)
2193
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2194
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2195
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2197
        if (dpl > cpl)
2198
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199
        if (!(e2 & DESC_P_MASK))
2200
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2201

    
2202
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2203
            /* to inner privilege */
2204
            get_ss_esp_from_tss(&ss, &sp, dpl);
2205
#ifdef DEBUG_PCALL
2206
            if (loglevel & CPU_LOG_PCALL)
2207
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 
2208
                        ss, sp, param_count, ESP);
2209
#endif
2210
            if ((ss & 0xfffc) == 0)
2211
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2212
            if ((ss & 3) != dpl)
2213
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2214
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2215
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2216
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2217
            if (ss_dpl != dpl)
2218
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2219
            if (!(ss_e2 & DESC_S_MASK) ||
2220
                (ss_e2 & DESC_CS_MASK) ||
2221
                !(ss_e2 & DESC_W_MASK))
2222
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2223
            if (!(ss_e2 & DESC_P_MASK))
2224
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2225
            
2226
            //            push_size = ((param_count * 2) + 8) << shift;
2227

    
2228
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2229
            old_ssp = env->segs[R_SS].base;
2230
            
2231
            sp_mask = get_sp_mask(ss_e2);
2232
            ssp = get_seg_base(ss_e1, ss_e2);
2233
            if (shift) {
2234
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2235
                PUSHL(ssp, sp, sp_mask, ESP);
2236
                for(i = param_count - 1; i >= 0; i--) {
2237
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2238
                    PUSHL(ssp, sp, sp_mask, val);
2239
                }
2240
            } else {
2241
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2242
                PUSHW(ssp, sp, sp_mask, ESP);
2243
                for(i = param_count - 1; i >= 0; i--) {
2244
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2245
                    PUSHW(ssp, sp, sp_mask, val);
2246
                }
2247
            }
2248
            new_stack = 1;
2249
        } else {
2250
            /* to same privilege */
2251
            sp = ESP;
2252
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2253
            ssp = env->segs[R_SS].base;
2254
            //            push_size = (4 << shift);
2255
            new_stack = 0;
2256
        }
2257

    
2258
        if (shift) {
2259
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2260
            PUSHL(ssp, sp, sp_mask, next_eip);
2261
        } else {
2262
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2263
            PUSHW(ssp, sp, sp_mask, next_eip);
2264
        }
2265

    
2266
        /* from this point, not restartable */
2267

    
2268
        if (new_stack) {
2269
            ss = (ss & ~3) | dpl;
2270
            cpu_x86_load_seg_cache(env, R_SS, ss, 
2271
                                   ssp,
2272
                                   get_seg_limit(ss_e1, ss_e2),
2273
                                   ss_e2);
2274
        }
2275

    
2276
        selector = (selector & ~3) | dpl;
2277
        cpu_x86_load_seg_cache(env, R_CS, selector, 
2278
                       get_seg_base(e1, e2),
2279
                       get_seg_limit(e1, e2),
2280
                       e2);
2281
        cpu_x86_set_cpl(env, dpl);
2282
        SET_ESP(sp, sp_mask);
2283
        EIP = offset;
2284
    }
2285
#ifdef USE_KQEMU
2286
    if (kqemu_is_ok(env)) {
2287
        env->exception_index = -1;
2288
        cpu_loop_exit();
2289
    }
2290
#endif
2291
}
2292

    
2293
/* real and vm86 mode iret */
2294
void helper_iret_real(int shift)
2295
{
2296
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2297
    target_ulong ssp;
2298
    int eflags_mask;
2299

    
2300
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2301
    sp = ESP;
2302
    ssp = env->segs[R_SS].base;
2303
    if (shift == 1) {
2304
        /* 32 bits */
2305
        POPL(ssp, sp, sp_mask, new_eip);
2306
        POPL(ssp, sp, sp_mask, new_cs);
2307
        new_cs &= 0xffff;
2308
        POPL(ssp, sp, sp_mask, new_eflags);
2309
    } else {
2310
        /* 16 bits */
2311
        POPW(ssp, sp, sp_mask, new_eip);
2312
        POPW(ssp, sp, sp_mask, new_cs);
2313
        POPW(ssp, sp, sp_mask, new_eflags);
2314
    }
2315
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2316
    load_seg_vm(R_CS, new_cs);
2317
    env->eip = new_eip;
2318
    if (env->eflags & VM_MASK)
2319
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2320
    else
2321
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2322
    if (shift == 0)
2323
        eflags_mask &= 0xffff;
2324
    load_eflags(new_eflags, eflags_mask);
2325
}
2326

    
2327
static inline void validate_seg(int seg_reg, int cpl)
2328
{
2329
    int dpl;
2330
    uint32_t e2;
2331

    
2332
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2333
       they may still contain a valid base. I would be interested to
2334
       know how a real x86_64 CPU behaves */
2335
    if ((seg_reg == R_FS || seg_reg == R_GS) && 
2336
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2337
        return;
2338

    
2339
    e2 = env->segs[seg_reg].flags;
2340
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2341
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2342
        /* data or non conforming code segment */
2343
        if (dpl < cpl) {
2344
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2345
        }
2346
    }
2347
}
2348

    
2349
/* protected mode iret */
2350
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2351
{
2352
    uint32_t new_cs, new_eflags, new_ss;
2353
    uint32_t new_es, new_ds, new_fs, new_gs;
2354
    uint32_t e1, e2, ss_e1, ss_e2;
2355
    int cpl, dpl, rpl, eflags_mask, iopl;
2356
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2357
    
2358
#ifdef TARGET_X86_64
2359
    if (shift == 2)
2360
        sp_mask = -1;
2361
    else
2362
#endif
2363
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2364
    sp = ESP;
2365
    ssp = env->segs[R_SS].base;
2366
    new_eflags = 0; /* avoid warning */
2367
#ifdef TARGET_X86_64
2368
    if (shift == 2) {
2369
        POPQ(sp, new_eip);
2370
        POPQ(sp, new_cs);
2371
        new_cs &= 0xffff;
2372
        if (is_iret) {
2373
            POPQ(sp, new_eflags);
2374
        }
2375
    } else
2376
#endif
2377
    if (shift == 1) {
2378
        /* 32 bits */
2379
        POPL(ssp, sp, sp_mask, new_eip);
2380
        POPL(ssp, sp, sp_mask, new_cs);
2381
        new_cs &= 0xffff;
2382
        if (is_iret) {
2383
            POPL(ssp, sp, sp_mask, new_eflags);
2384
            if (new_eflags & VM_MASK)
2385
                goto return_to_vm86;
2386
        }
2387
    } else {
2388
        /* 16 bits */
2389
        POPW(ssp, sp, sp_mask, new_eip);
2390
        POPW(ssp, sp, sp_mask, new_cs);
2391
        if (is_iret)
2392
            POPW(ssp, sp, sp_mask, new_eflags);
2393
    }
2394
#ifdef DEBUG_PCALL
2395
    if (loglevel & CPU_LOG_PCALL) {
2396
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2397
                new_cs, new_eip, shift, addend);
2398
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2399
    }
2400
#endif
2401
    if ((new_cs & 0xfffc) == 0)
2402
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2403
    if (load_segment(&e1, &e2, new_cs) != 0)
2404
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2405
    if (!(e2 & DESC_S_MASK) ||
2406
        !(e2 & DESC_CS_MASK))
2407
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408
    cpl = env->hflags & HF_CPL_MASK;
2409
    rpl = new_cs & 3; 
2410
    if (rpl < cpl)
2411
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2413
    if (e2 & DESC_C_MASK) {
2414
        if (dpl > rpl)
2415
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2416
    } else {
2417
        if (dpl != rpl)
2418
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2419
    }
2420
    if (!(e2 & DESC_P_MASK))
2421
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2422
    
2423
    sp += addend;
2424
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 
2425
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2426
        /* return to same priledge level */
2427
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2428
                       get_seg_base(e1, e2),
2429
                       get_seg_limit(e1, e2),
2430
                       e2);
2431
    } else {
2432
        /* return to different privilege level */
2433
#ifdef TARGET_X86_64
2434
        if (shift == 2) {
2435
            POPQ(sp, new_esp);
2436
            POPQ(sp, new_ss);
2437
            new_ss &= 0xffff;
2438
        } else
2439
#endif
2440
        if (shift == 1) {
2441
            /* 32 bits */
2442
            POPL(ssp, sp, sp_mask, new_esp);
2443
            POPL(ssp, sp, sp_mask, new_ss);
2444
            new_ss &= 0xffff;
2445
        } else {
2446
            /* 16 bits */
2447
            POPW(ssp, sp, sp_mask, new_esp);
2448
            POPW(ssp, sp, sp_mask, new_ss);
2449
        }
2450
#ifdef DEBUG_PCALL
2451
        if (loglevel & CPU_LOG_PCALL) {
2452
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2453
                    new_ss, new_esp);
2454
        }
2455
#endif
2456
        if ((new_ss & 0xfffc) == 0) {
2457
#ifdef TARGET_X86_64
2458
            /* NULL ss is allowed in long mode if cpl != 3*/
2459
            /* XXX: test CS64 ? */
2460
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2461
                cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2462
                                       0, 0xffffffff,
2463
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2464
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2465
                                       DESC_W_MASK | DESC_A_MASK);
2466
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2467
            } else 
2468
#endif
2469
            {
2470
                raise_exception_err(EXCP0D_GPF, 0);
2471
            }
2472
        } else {
2473
            if ((new_ss & 3) != rpl)
2474
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2475
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2476
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2477
            if (!(ss_e2 & DESC_S_MASK) ||
2478
                (ss_e2 & DESC_CS_MASK) ||
2479
                !(ss_e2 & DESC_W_MASK))
2480
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2481
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2482
            if (dpl != rpl)
2483
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2484
            if (!(ss_e2 & DESC_P_MASK))
2485
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2486
            cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2487
                                   get_seg_base(ss_e1, ss_e2),
2488
                                   get_seg_limit(ss_e1, ss_e2),
2489
                                   ss_e2);
2490
        }
2491

    
2492
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2493
                       get_seg_base(e1, e2),
2494
                       get_seg_limit(e1, e2),
2495
                       e2);
2496
        cpu_x86_set_cpl(env, rpl);
2497
        sp = new_esp;
2498
#ifdef TARGET_X86_64
2499
        if (env->hflags & HF_CS64_MASK)
2500
            sp_mask = -1;
2501
        else
2502
#endif
2503
            sp_mask = get_sp_mask(ss_e2);
2504

    
2505
        /* validate data segments */
2506
        validate_seg(R_ES, rpl);
2507
        validate_seg(R_DS, rpl);
2508
        validate_seg(R_FS, rpl);
2509
        validate_seg(R_GS, rpl);
2510

    
2511
        sp += addend;
2512
    }
2513
    SET_ESP(sp, sp_mask);
2514
    env->eip = new_eip;
2515
    if (is_iret) {
2516
        /* NOTE: 'cpl' is the _old_ CPL */
2517
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2518
        if (cpl == 0)
2519
            eflags_mask |= IOPL_MASK;
2520
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2521
        if (cpl <= iopl)
2522
            eflags_mask |= IF_MASK;
2523
        if (shift == 0)
2524
            eflags_mask &= 0xffff;
2525
        load_eflags(new_eflags, eflags_mask);
2526
    }
2527
    return;
2528

    
2529
 return_to_vm86:
2530
    POPL(ssp, sp, sp_mask, new_esp);
2531
    POPL(ssp, sp, sp_mask, new_ss);
2532
    POPL(ssp, sp, sp_mask, new_es);
2533
    POPL(ssp, sp, sp_mask, new_ds);
2534
    POPL(ssp, sp, sp_mask, new_fs);
2535
    POPL(ssp, sp, sp_mask, new_gs);
2536
    
2537
    /* modify processor state */
2538
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
2539
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2540
    load_seg_vm(R_CS, new_cs & 0xffff);
2541
    cpu_x86_set_cpl(env, 3);
2542
    load_seg_vm(R_SS, new_ss & 0xffff);
2543
    load_seg_vm(R_ES, new_es & 0xffff);
2544
    load_seg_vm(R_DS, new_ds & 0xffff);
2545
    load_seg_vm(R_FS, new_fs & 0xffff);
2546
    load_seg_vm(R_GS, new_gs & 0xffff);
2547

    
2548
    env->eip = new_eip & 0xffff;
2549
    ESP = new_esp;
2550
}
2551

    
2552
void helper_iret_protected(int shift, int next_eip)
2553
{
2554
    int tss_selector, type;
2555
    uint32_t e1, e2;
2556
    
2557
    /* specific case for TSS */
2558
    if (env->eflags & NT_MASK) {
2559
#ifdef TARGET_X86_64
2560
        if (env->hflags & HF_LMA_MASK)
2561
            raise_exception_err(EXCP0D_GPF, 0);
2562
#endif
2563
        tss_selector = lduw_kernel(env->tr.base + 0);
2564
        if (tss_selector & 4)
2565
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2566
        if (load_segment(&e1, &e2, tss_selector) != 0)
2567
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2568
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2569
        /* NOTE: we check both segment and busy TSS */
2570
        if (type != 3)
2571
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2572
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2573
    } else {
2574
        helper_ret_protected(shift, 1, 0);
2575
    }
2576
#ifdef USE_KQEMU
2577
    if (kqemu_is_ok(env)) {
2578
        CC_OP = CC_OP_EFLAGS;
2579
        env->exception_index = -1;
2580
        cpu_loop_exit();
2581
    }
2582
#endif
2583
}
2584

    
2585
void helper_lret_protected(int shift, int addend)
2586
{
2587
    helper_ret_protected(shift, 0, addend);
2588
#ifdef USE_KQEMU
2589
    if (kqemu_is_ok(env)) {
2590
        env->exception_index = -1;
2591
        cpu_loop_exit();
2592
    }
2593
#endif
2594
}
2595

    
2596
void helper_sysenter(void)
2597
{
2598
    if (env->sysenter_cs == 0) {
2599
        raise_exception_err(EXCP0D_GPF, 0);
2600
    }
2601
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2602
    cpu_x86_set_cpl(env, 0);
2603
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
2604
                           0, 0xffffffff, 
2605
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2606
                           DESC_S_MASK |
2607
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2608
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
2609
                           0, 0xffffffff,
2610
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2611
                           DESC_S_MASK |
2612
                           DESC_W_MASK | DESC_A_MASK);
2613
    ESP = env->sysenter_esp;
2614
    EIP = env->sysenter_eip;
2615
}
2616

    
2617
void helper_sysexit(void)
2618
{
2619
    int cpl;
2620

    
2621
    cpl = env->hflags & HF_CPL_MASK;
2622
    if (env->sysenter_cs == 0 || cpl != 0) {
2623
        raise_exception_err(EXCP0D_GPF, 0);
2624
    }
2625
    cpu_x86_set_cpl(env, 3);
2626
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
2627
                           0, 0xffffffff, 
2628
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2629
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2630
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2631
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
2632
                           0, 0xffffffff,
2633
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2634
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2635
                           DESC_W_MASK | DESC_A_MASK);
2636
    ESP = ECX;
2637
    EIP = EDX;
2638
#ifdef USE_KQEMU
2639
    if (kqemu_is_ok(env)) {
2640
        env->exception_index = -1;
2641
        cpu_loop_exit();
2642
    }
2643
#endif
2644
}
2645

    
2646
void helper_movl_crN_T0(int reg)
2647
{
2648
#if !defined(CONFIG_USER_ONLY) 
2649
    switch(reg) {
2650
    case 0:
2651
        cpu_x86_update_cr0(env, T0);
2652
        break;
2653
    case 3:
2654
        cpu_x86_update_cr3(env, T0);
2655
        break;
2656
    case 4:
2657
        cpu_x86_update_cr4(env, T0);
2658
        break;
2659
    case 8:
2660
        cpu_set_apic_tpr(env, T0);
2661
        break;
2662
    default:
2663
        env->cr[reg] = T0;
2664
        break;
2665
    }
2666
#endif
2667
}
2668

    
2669
/* XXX: do more */
2670
void helper_movl_drN_T0(int reg)
2671
{
2672
    env->dr[reg] = T0;
2673
}
2674

    
2675
void helper_invlpg(target_ulong addr)
2676
{
2677
    cpu_x86_flush_tlb(env, addr);
2678
}
2679

    
2680
void helper_rdtsc(void)
2681
{
2682
    uint64_t val;
2683

    
2684
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2685
        raise_exception(EXCP0D_GPF);
2686
    }
2687
    val = cpu_get_tsc(env);
2688
    EAX = (uint32_t)(val);
2689
    EDX = (uint32_t)(val >> 32);
2690
}
2691

    
2692
#if defined(CONFIG_USER_ONLY) 
2693
void helper_wrmsr(void)
2694
{
2695
}
2696

    
2697
void helper_rdmsr(void)
2698
{
2699
}
2700
#else
2701
void helper_wrmsr(void)
2702
{
2703
    uint64_t val;
2704

    
2705
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2706

    
2707
    switch((uint32_t)ECX) {
2708
    case MSR_IA32_SYSENTER_CS:
2709
        env->sysenter_cs = val & 0xffff;
2710
        break;
2711
    case MSR_IA32_SYSENTER_ESP:
2712
        env->sysenter_esp = val;
2713
        break;
2714
    case MSR_IA32_SYSENTER_EIP:
2715
        env->sysenter_eip = val;
2716
        break;
2717
    case MSR_IA32_APICBASE:
2718
        cpu_set_apic_base(env, val);
2719
        break;
2720
    case MSR_EFER:
2721
        {
2722
            uint64_t update_mask;
2723
            update_mask = 0;
2724
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2725
                update_mask |= MSR_EFER_SCE;
2726
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2727
                update_mask |= MSR_EFER_LME;
2728
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2729
                update_mask |= MSR_EFER_FFXSR;
2730
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2731
                update_mask |= MSR_EFER_NXE;
2732
            env->efer = (env->efer & ~update_mask) | 
2733
            (val & update_mask);
2734
        }
2735
        break;
2736
    case MSR_STAR:
2737
        env->star = val;
2738
        break;
2739
    case MSR_PAT:
2740
        env->pat = val;
2741
        break;
2742
#ifdef TARGET_X86_64
2743
    case MSR_LSTAR:
2744
        env->lstar = val;
2745
        break;
2746
    case MSR_CSTAR:
2747
        env->cstar = val;
2748
        break;
2749
    case MSR_FMASK:
2750
        env->fmask = val;
2751
        break;
2752
    case MSR_FSBASE:
2753
        env->segs[R_FS].base = val;
2754
        break;
2755
    case MSR_GSBASE:
2756
        env->segs[R_GS].base = val;
2757
        break;
2758
    case MSR_KERNELGSBASE:
2759
        env->kernelgsbase = val;
2760
        break;
2761
#endif
2762
    default:
2763
        /* XXX: exception ? */
2764
        break; 
2765
    }
2766
}
2767

    
2768
void helper_rdmsr(void)
2769
{
2770
    uint64_t val;
2771
    switch((uint32_t)ECX) {
2772
    case MSR_IA32_SYSENTER_CS:
2773
        val = env->sysenter_cs;
2774
        break;
2775
    case MSR_IA32_SYSENTER_ESP:
2776
        val = env->sysenter_esp;
2777
        break;
2778
    case MSR_IA32_SYSENTER_EIP:
2779
        val = env->sysenter_eip;
2780
        break;
2781
    case MSR_IA32_APICBASE:
2782
        val = cpu_get_apic_base(env);
2783
        break;
2784
    case MSR_EFER:
2785
        val = env->efer;
2786
        break;
2787
    case MSR_STAR:
2788
        val = env->star;
2789
        break;
2790
    case MSR_PAT:
2791
        val = env->pat;
2792
        break;
2793
#ifdef TARGET_X86_64
2794
    case MSR_LSTAR:
2795
        val = env->lstar;
2796
        break;
2797
    case MSR_CSTAR:
2798
        val = env->cstar;
2799
        break;
2800
    case MSR_FMASK:
2801
        val = env->fmask;
2802
        break;
2803
    case MSR_FSBASE:
2804
        val = env->segs[R_FS].base;
2805
        break;
2806
    case MSR_GSBASE:
2807
        val = env->segs[R_GS].base;
2808
        break;
2809
    case MSR_KERNELGSBASE:
2810
        val = env->kernelgsbase;
2811
        break;
2812
#endif
2813
    default:
2814
        /* XXX: exception ? */
2815
        val = 0;
2816
        break; 
2817
    }
2818
    EAX = (uint32_t)(val);
2819
    EDX = (uint32_t)(val >> 32);
2820
}
2821
#endif
2822

    
2823
void helper_lsl(void)
2824
{
2825
    unsigned int selector, limit;
2826
    uint32_t e1, e2, eflags;
2827
    int rpl, dpl, cpl, type;
2828

    
2829
    eflags = cc_table[CC_OP].compute_all();
2830
    selector = T0 & 0xffff;
2831
    if (load_segment(&e1, &e2, selector) != 0)
2832
        goto fail;
2833
    rpl = selector & 3;
2834
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2835
    cpl = env->hflags & HF_CPL_MASK;
2836
    if (e2 & DESC_S_MASK) {
2837
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2838
            /* conforming */
2839
        } else {
2840
            if (dpl < cpl || dpl < rpl)
2841
                goto fail;
2842
        }
2843
    } else {
2844
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2845
        switch(type) {
2846
        case 1:
2847
        case 2:
2848
        case 3:
2849
        case 9:
2850
        case 11:
2851
            break;
2852
        default:
2853
            goto fail;
2854
        }
2855
        if (dpl < cpl || dpl < rpl) {
2856
        fail:
2857
            CC_SRC = eflags & ~CC_Z;
2858
            return;
2859
        }
2860
    }
2861
    limit = get_seg_limit(e1, e2);
2862
    T1 = limit;
2863
    CC_SRC = eflags | CC_Z;
2864
}
2865

    
2866
void helper_lar(void)
2867
{
2868
    unsigned int selector;
2869
    uint32_t e1, e2, eflags;
2870
    int rpl, dpl, cpl, type;
2871

    
2872
    eflags = cc_table[CC_OP].compute_all();
2873
    selector = T0 & 0xffff;
2874
    if ((selector & 0xfffc) == 0)
2875
        goto fail;
2876
    if (load_segment(&e1, &e2, selector) != 0)
2877
        goto fail;
2878
    rpl = selector & 3;
2879
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2880
    cpl = env->hflags & HF_CPL_MASK;
2881
    if (e2 & DESC_S_MASK) {
2882
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2883
            /* conforming */
2884
        } else {
2885
            if (dpl < cpl || dpl < rpl)
2886
                goto fail;
2887
        }
2888
    } else {
2889
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2890
        switch(type) {
2891
        case 1:
2892
        case 2:
2893
        case 3:
2894
        case 4:
2895
        case 5:
2896
        case 9:
2897
        case 11:
2898
        case 12:
2899
            break;
2900
        default:
2901
            goto fail;
2902
        }
2903
        if (dpl < cpl || dpl < rpl) {
2904
        fail:
2905
            CC_SRC = eflags & ~CC_Z;
2906
            return;
2907
        }
2908
    }
2909
    T1 = e2 & 0x00f0ff00;
2910
    CC_SRC = eflags | CC_Z;
2911
}
2912

    
2913
void helper_verr(void)
2914
{
2915
    unsigned int selector;
2916
    uint32_t e1, e2, eflags;
2917
    int rpl, dpl, cpl;
2918

    
2919
    eflags = cc_table[CC_OP].compute_all();
2920
    selector = T0 & 0xffff;
2921
    if ((selector & 0xfffc) == 0)
2922
        goto fail;
2923
    if (load_segment(&e1, &e2, selector) != 0)
2924
        goto fail;
2925
    if (!(e2 & DESC_S_MASK))
2926
        goto fail;
2927
    rpl = selector & 3;
2928
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2929
    cpl = env->hflags & HF_CPL_MASK;
2930
    if (e2 & DESC_CS_MASK) {
2931
        if (!(e2 & DESC_R_MASK))
2932
            goto fail;
2933
        if (!(e2 & DESC_C_MASK)) {
2934
            if (dpl < cpl || dpl < rpl)
2935
                goto fail;
2936
        }
2937
    } else {
2938
        if (dpl < cpl || dpl < rpl) {
2939
        fail:
2940
            CC_SRC = eflags & ~CC_Z;
2941
            return;
2942
        }
2943
    }
2944
    CC_SRC = eflags | CC_Z;
2945
}
2946

    
2947
void helper_verw(void)
2948
{
2949
    unsigned int selector;
2950
    uint32_t e1, e2, eflags;
2951
    int rpl, dpl, cpl;
2952

    
2953
    eflags = cc_table[CC_OP].compute_all();
2954
    selector = T0 & 0xffff;
2955
    if ((selector & 0xfffc) == 0)
2956
        goto fail;
2957
    if (load_segment(&e1, &e2, selector) != 0)
2958
        goto fail;
2959
    if (!(e2 & DESC_S_MASK))
2960
        goto fail;
2961
    rpl = selector & 3;
2962
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2963
    cpl = env->hflags & HF_CPL_MASK;
2964
    if (e2 & DESC_CS_MASK) {
2965
        goto fail;
2966
    } else {
2967
        if (dpl < cpl || dpl < rpl)
2968
            goto fail;
2969
        if (!(e2 & DESC_W_MASK)) {
2970
        fail:
2971
            CC_SRC = eflags & ~CC_Z;
2972
            return;
2973
        }
2974
    }
2975
    CC_SRC = eflags | CC_Z;
2976
}
2977

    
2978
/* FPU helpers */
2979

    
2980
void helper_fldt_ST0_A0(void)
2981
{
2982
    int new_fpstt;
2983
    new_fpstt = (env->fpstt - 1) & 7;
2984
    env->fpregs[new_fpstt].d = helper_fldt(A0);
2985
    env->fpstt = new_fpstt;
2986
    env->fptags[new_fpstt] = 0; /* validate stack entry */
2987
}
2988

    
2989
void helper_fstt_ST0_A0(void)
2990
{
2991
    helper_fstt(ST0, A0);
2992
}
2993

    
2994
void fpu_set_exception(int mask)
2995
{
2996
    env->fpus |= mask;
2997
    if (env->fpus & (~env->fpuc & FPUC_EM))
2998
        env->fpus |= FPUS_SE | FPUS_B;
2999
}
3000

    
3001
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3002
{
3003
    if (b == 0.0) 
3004
        fpu_set_exception(FPUS_ZE);
3005
    return a / b;
3006
}
3007

    
3008
void fpu_raise_exception(void)
3009
{
3010
    if (env->cr[0] & CR0_NE_MASK) {
3011
        raise_exception(EXCP10_COPR);
3012
    } 
3013
#if !defined(CONFIG_USER_ONLY) 
3014
    else {
3015
        cpu_set_ferr(env);
3016
    }
3017
#endif
3018
}
3019

    
3020
/* BCD ops */
3021

    
3022
void helper_fbld_ST0_A0(void)
3023
{
3024
    CPU86_LDouble tmp;
3025
    uint64_t val;
3026
    unsigned int v;
3027
    int i;
3028

    
3029
    val = 0;
3030
    for(i = 8; i >= 0; i--) {
3031
        v = ldub(A0 + i);
3032
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3033
    }
3034
    tmp = val;
3035
    if (ldub(A0 + 9) & 0x80)
3036
        tmp = -tmp;
3037
    fpush();
3038
    ST0 = tmp;
3039
}
3040

    
3041
void helper_fbst_ST0_A0(void)
3042
{
3043
    int v;
3044
    target_ulong mem_ref, mem_end;
3045
    int64_t val;
3046

    
3047
    val = floatx_to_int64(ST0, &env->fp_status);
3048
    mem_ref = A0;
3049
    mem_end = mem_ref + 9;
3050
    if (val < 0) {
3051
        stb(mem_end, 0x80);
3052
        val = -val;
3053
    } else {
3054
        stb(mem_end, 0x00);
3055
    }
3056
    while (mem_ref < mem_end) {
3057
        if (val == 0)
3058
            break;
3059
        v = val % 100;
3060
        val = val / 100;
3061
        v = ((v / 10) << 4) | (v % 10);
3062
        stb(mem_ref++, v);
3063
    }
3064
    while (mem_ref < mem_end) {
3065
        stb(mem_ref++, 0);
3066
    }
3067
}
3068

    
3069
void helper_f2xm1(void)
3070
{
3071
    ST0 = pow(2.0,ST0) - 1.0;
3072
}
3073

    
3074
void helper_fyl2x(void)
3075
{
3076
    CPU86_LDouble fptemp;
3077
    
3078
    fptemp = ST0;
3079
    if (fptemp>0.0){
3080
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3081
        ST1 *= fptemp;
3082
        fpop();
3083
    } else { 
3084
        env->fpus &= (~0x4700);
3085
        env->fpus |= 0x400;
3086
    }
3087
}
3088

    
3089
void helper_fptan(void)
3090
{
3091
    CPU86_LDouble fptemp;
3092

    
3093
    fptemp = ST0;
3094
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3095
        env->fpus |= 0x400;
3096
    } else {
3097
        ST0 = tan(fptemp);
3098
        fpush();
3099
        ST0 = 1.0;
3100
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3101
        /* the above code is for  |arg| < 2**52 only */
3102
    }
3103
}
3104

    
3105
void helper_fpatan(void)
3106
{
3107
    CPU86_LDouble fptemp, fpsrcop;
3108

    
3109
    fpsrcop = ST1;
3110
    fptemp = ST0;
3111
    ST1 = atan2(fpsrcop,fptemp);
3112
    fpop();
3113
}
3114

    
3115
void helper_fxtract(void)
3116
{
3117
    CPU86_LDoubleU temp;
3118
    unsigned int expdif;
3119

    
3120
    temp.d = ST0;
3121
    expdif = EXPD(temp) - EXPBIAS;
3122
    /*DP exponent bias*/
3123
    ST0 = expdif;
3124
    fpush();
3125
    BIASEXPONENT(temp);
3126
    ST0 = temp.d;
3127
}
3128

    
3129
void helper_fprem1(void)
3130
{
3131
    CPU86_LDouble dblq, fpsrcop, fptemp;
3132
    CPU86_LDoubleU fpsrcop1, fptemp1;
3133
    int expdif;
3134
    signed long long int q;
3135

    
3136
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3137
        ST0 = 0.0 / 0.0; /* NaN */
3138
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3139
        return;
3140
    }
3141

    
3142
    fpsrcop = ST0;
3143
    fptemp = ST1;
3144
    fpsrcop1.d = fpsrcop;
3145
    fptemp1.d = fptemp;
3146
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3147

    
3148
    if (expdif < 0) {
3149
        /* optimisation? taken from the AMD docs */
3150
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3151
        /* ST0 is unchanged */
3152
        return;
3153
    }
3154

    
3155
    if (expdif < 53) {
3156
        dblq = fpsrcop / fptemp;
3157
        /* round dblq towards nearest integer */
3158
        dblq = rint(dblq);
3159
        ST0 = fpsrcop - fptemp * dblq;
3160

    
3161
        /* convert dblq to q by truncating towards zero */
3162
        if (dblq < 0.0)
3163
           q = (signed long long int)(-dblq);
3164
        else
3165
           q = (signed long long int)dblq;
3166

    
3167
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3168
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3169
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3170
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3171
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3172
    } else {
3173
        env->fpus |= 0x400;  /* C2 <-- 1 */
3174
        fptemp = pow(2.0, expdif - 50);
3175
        fpsrcop = (ST0 / ST1) / fptemp;
3176
        /* fpsrcop = integer obtained by chopping */
3177
        fpsrcop = (fpsrcop < 0.0) ?
3178
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3179
        ST0 -= (ST1 * fpsrcop * fptemp);
3180
    }
3181
}
3182

    
3183
void helper_fprem(void)
3184
{
3185
    CPU86_LDouble dblq, fpsrcop, fptemp;
3186
    CPU86_LDoubleU fpsrcop1, fptemp1;
3187
    int expdif;
3188
    signed long long int q;
3189

    
3190
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3191
       ST0 = 0.0 / 0.0; /* NaN */
3192
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3193
       return;
3194
    }
3195

    
3196
    fpsrcop = (CPU86_LDouble)ST0;
3197
    fptemp = (CPU86_LDouble)ST1;
3198
    fpsrcop1.d = fpsrcop;
3199
    fptemp1.d = fptemp;
3200
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3201

    
3202
    if (expdif < 0) {
3203
        /* optimisation? taken from the AMD docs */
3204
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3205
        /* ST0 is unchanged */
3206
        return;
3207
    }
3208

    
3209
    if ( expdif < 53 ) {
3210
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3211
        /* round dblq towards zero */
3212
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3213
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3214

    
3215
        /* convert dblq to q by truncating towards zero */
3216
        if (dblq < 0.0)
3217
           q = (signed long long int)(-dblq);
3218
        else
3219
           q = (signed long long int)dblq;
3220

    
3221
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3222
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3223
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3224
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3225
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3226
    } else {
3227
        int N = 32 + (expdif % 32); /* as per AMD docs */
3228
        env->fpus |= 0x400;  /* C2 <-- 1 */
3229
        fptemp = pow(2.0, (double)(expdif - N));
3230
        fpsrcop = (ST0 / ST1) / fptemp;
3231
        /* fpsrcop = integer obtained by chopping */
3232
        fpsrcop = (fpsrcop < 0.0) ?
3233
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3234
        ST0 -= (ST1 * fpsrcop * fptemp);
3235
    }
3236
}
3237

    
3238
void helper_fyl2xp1(void)
3239
{
3240
    CPU86_LDouble fptemp;
3241

    
3242
    fptemp = ST0;
3243
    if ((fptemp+1.0)>0.0) {
3244
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3245
        ST1 *= fptemp;
3246
        fpop();
3247
    } else { 
3248
        env->fpus &= (~0x4700);
3249
        env->fpus |= 0x400;
3250
    }
3251
}
3252

    
3253
void helper_fsqrt(void)
3254
{
3255
    CPU86_LDouble fptemp;
3256

    
3257
    fptemp = ST0;
3258
    if (fptemp<0.0) { 
3259
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3260
        env->fpus |= 0x400;
3261
    }
3262
    ST0 = sqrt(fptemp);
3263
}
3264

    
3265
void helper_fsincos(void)
3266
{
3267
    CPU86_LDouble fptemp;
3268

    
3269
    fptemp = ST0;
3270
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3271
        env->fpus |= 0x400;
3272
    } else {
3273
        ST0 = sin(fptemp);
3274
        fpush();
3275
        ST0 = cos(fptemp);
3276
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3277
        /* the above code is for  |arg| < 2**63 only */
3278
    }
3279
}
3280

    
3281
void helper_frndint(void)
3282
{
3283
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3284
}
3285

    
3286
void helper_fscale(void)
3287
{
3288
    ST0 = ldexp (ST0, (int)(ST1)); 
3289
}
3290

    
3291
void helper_fsin(void)
3292
{
3293
    CPU86_LDouble fptemp;
3294

    
3295
    fptemp = ST0;
3296
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3297
        env->fpus |= 0x400;
3298
    } else {
3299
        ST0 = sin(fptemp);
3300
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3301
        /* the above code is for  |arg| < 2**53 only */
3302
    }
3303
}
3304

    
3305
void helper_fcos(void)
3306
{
3307
    CPU86_LDouble fptemp;
3308

    
3309
    fptemp = ST0;
3310
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3311
        env->fpus |= 0x400;
3312
    } else {
3313
        ST0 = cos(fptemp);
3314
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3315
        /* the above code is for  |arg5 < 2**63 only */
3316
    }
3317
}
3318

    
3319
void helper_fxam_ST0(void)
3320
{
3321
    CPU86_LDoubleU temp;
3322
    int expdif;
3323

    
3324
    temp.d = ST0;
3325

    
3326
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3327
    if (SIGND(temp))
3328
        env->fpus |= 0x200; /* C1 <-- 1 */
3329

    
3330
    /* XXX: test fptags too */
3331
    expdif = EXPD(temp);
3332
    if (expdif == MAXEXPD) {
3333
#ifdef USE_X86LDOUBLE
3334
        if (MANTD(temp) == 0x8000000000000000ULL)
3335
#else
3336
        if (MANTD(temp) == 0)
3337
#endif
3338
            env->fpus |=  0x500 /*Infinity*/;
3339
        else
3340
            env->fpus |=  0x100 /*NaN*/;
3341
    } else if (expdif == 0) {
3342
        if (MANTD(temp) == 0)
3343
            env->fpus |=  0x4000 /*Zero*/;
3344
        else
3345
            env->fpus |= 0x4400 /*Denormal*/;
3346
    } else {
3347
        env->fpus |= 0x400;
3348
    }
3349
}
3350

    
3351
void helper_fstenv(target_ulong ptr, int data32)
3352
{
3353
    int fpus, fptag, exp, i;
3354
    uint64_t mant;
3355
    CPU86_LDoubleU tmp;
3356

    
3357
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3358
    fptag = 0;
3359
    for (i=7; i>=0; i--) {
3360
        fptag <<= 2;
3361
        if (env->fptags[i]) {
3362
            fptag |= 3;
3363
        } else {
3364
            tmp.d = env->fpregs[i].d;
3365
            exp = EXPD(tmp);
3366
            mant = MANTD(tmp);
3367
            if (exp == 0 && mant == 0) {
3368
                /* zero */
3369
                fptag |= 1;
3370
            } else if (exp == 0 || exp == MAXEXPD
3371
#ifdef USE_X86LDOUBLE
3372
                       || (mant & (1LL << 63)) == 0
3373
#endif
3374
                       ) {
3375
                /* NaNs, infinity, denormal */
3376
                fptag |= 2;
3377
            }
3378
        }
3379
    }
3380
    if (data32) {
3381
        /* 32 bit */
3382
        stl(ptr, env->fpuc);
3383
        stl(ptr + 4, fpus);
3384
        stl(ptr + 8, fptag);
3385
        stl(ptr + 12, 0); /* fpip */
3386
        stl(ptr + 16, 0); /* fpcs */
3387
        stl(ptr + 20, 0); /* fpoo */
3388
        stl(ptr + 24, 0); /* fpos */
3389
    } else {
3390
        /* 16 bit */
3391
        stw(ptr, env->fpuc);
3392
        stw(ptr + 2, fpus);
3393
        stw(ptr + 4, fptag);
3394
        stw(ptr + 6, 0);
3395
        stw(ptr + 8, 0);
3396
        stw(ptr + 10, 0);
3397
        stw(ptr + 12, 0);
3398
    }
3399
}
3400

    
3401
void helper_fldenv(target_ulong ptr, int data32)
3402
{
3403
    int i, fpus, fptag;
3404

    
3405
    if (data32) {
3406
        env->fpuc = lduw(ptr);
3407
        fpus = lduw(ptr + 4);
3408
        fptag = lduw(ptr + 8);
3409
    }
3410
    else {
3411
        env->fpuc = lduw(ptr);
3412
        fpus = lduw(ptr + 2);
3413
        fptag = lduw(ptr + 4);
3414
    }
3415
    env->fpstt = (fpus >> 11) & 7;
3416
    env->fpus = fpus & ~0x3800;
3417
    for(i = 0;i < 8; i++) {
3418
        env->fptags[i] = ((fptag & 3) == 3);
3419
        fptag >>= 2;
3420
    }
3421
}
3422

    
3423
void helper_fsave(target_ulong ptr, int data32)
3424
{
3425
    CPU86_LDouble tmp;
3426
    int i;
3427

    
3428
    helper_fstenv(ptr, data32);
3429

    
3430
    ptr += (14 << data32);
3431
    for(i = 0;i < 8; i++) {
3432
        tmp = ST(i);
3433
        helper_fstt(tmp, ptr);
3434
        ptr += 10;
3435
    }
3436

    
3437
    /* fninit */
3438
    env->fpus = 0;
3439
    env->fpstt = 0;
3440
    env->fpuc = 0x37f;
3441
    env->fptags[0] = 1;
3442
    env->fptags[1] = 1;
3443
    env->fptags[2] = 1;
3444
    env->fptags[3] = 1;
3445
    env->fptags[4] = 1;
3446
    env->fptags[5] = 1;
3447
    env->fptags[6] = 1;
3448
    env->fptags[7] = 1;
3449
}
3450

    
3451
void helper_frstor(target_ulong ptr, int data32)
3452
{
3453
    CPU86_LDouble tmp;
3454
    int i;
3455

    
3456
    helper_fldenv(ptr, data32);
3457
    ptr += (14 << data32);
3458

    
3459
    for(i = 0;i < 8; i++) {
3460
        tmp = helper_fldt(ptr);
3461
        ST(i) = tmp;
3462
        ptr += 10;
3463
    }
3464
}
3465

    
3466
void helper_fxsave(target_ulong ptr, int data64)
3467
{
3468
    int fpus, fptag, i, nb_xmm_regs;
3469
    CPU86_LDouble tmp;
3470
    target_ulong addr;
3471

    
3472
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3473
    fptag = 0;
3474
    for(i = 0; i < 8; i++) {
3475
        fptag |= (env->fptags[i] << i);
3476
    }
3477
    stw(ptr, env->fpuc);
3478
    stw(ptr + 2, fpus);
3479
    stw(ptr + 4, fptag ^ 0xff);
3480

    
3481
    addr = ptr + 0x20;
3482
    for(i = 0;i < 8; i++) {
3483
        tmp = ST(i);
3484
        helper_fstt(tmp, addr);
3485
        addr += 16;
3486
    }
3487
    
3488
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3489
        /* XXX: finish it */
3490
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3491
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3492
        nb_xmm_regs = 8 << data64;
3493
        addr = ptr + 0xa0;
3494
        for(i = 0; i < nb_xmm_regs; i++) {
3495
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3496
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3497
            addr += 16;
3498
        }
3499
    }
3500
}
3501

    
3502
void helper_fxrstor(target_ulong ptr, int data64)
3503
{
3504
    int i, fpus, fptag, nb_xmm_regs;
3505
    CPU86_LDouble tmp;
3506
    target_ulong addr;
3507

    
3508
    env->fpuc = lduw(ptr);
3509
    fpus = lduw(ptr + 2);
3510
    fptag = lduw(ptr + 4);
3511
    env->fpstt = (fpus >> 11) & 7;
3512
    env->fpus = fpus & ~0x3800;
3513
    fptag ^= 0xff;
3514
    for(i = 0;i < 8; i++) {
3515
        env->fptags[i] = ((fptag >> i) & 1);
3516
    }
3517

    
3518
    addr = ptr + 0x20;
3519
    for(i = 0;i < 8; i++) {
3520
        tmp = helper_fldt(addr);
3521
        ST(i) = tmp;
3522
        addr += 16;
3523
    }
3524

    
3525
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3526
        /* XXX: finish it */
3527
        env->mxcsr = ldl(ptr + 0x18);
3528
        //ldl(ptr + 0x1c);
3529
        nb_xmm_regs = 8 << data64;
3530
        addr = ptr + 0xa0;
3531
        for(i = 0; i < nb_xmm_regs; i++) {
3532
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3533
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3534
            addr += 16;
3535
        }
3536
    }
3537
}
3538

    
3539
#ifndef USE_X86LDOUBLE
3540

    
3541
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3542
{
3543
    CPU86_LDoubleU temp;
3544
    int e;
3545

    
3546
    temp.d = f;
3547
    /* mantissa */
3548
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3549
    /* exponent + sign */
3550
    e = EXPD(temp) - EXPBIAS + 16383;
3551
    e |= SIGND(temp) >> 16;
3552
    *pexp = e;
3553
}
3554

    
3555
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3556
{
3557
    CPU86_LDoubleU temp;
3558
    int e;
3559
    uint64_t ll;
3560

    
3561
    /* XXX: handle overflow ? */
3562
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3563
    e |= (upper >> 4) & 0x800; /* sign */
3564
    ll = (mant >> 11) & ((1LL << 52) - 1);
3565
#ifdef __arm__
3566
    temp.l.upper = (e << 20) | (ll >> 32);
3567
    temp.l.lower = ll;
3568
#else
3569
    temp.ll = ll | ((uint64_t)e << 52);
3570
#endif
3571
    return temp.d;
3572
}
3573

    
3574
#else
3575

    
3576
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3577
{
3578
    CPU86_LDoubleU temp;
3579

    
3580
    temp.d = f;
3581
    *pmant = temp.l.lower;
3582
    *pexp = temp.l.upper;
3583
}
3584

    
3585
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3586
{
3587
    CPU86_LDoubleU temp;
3588

    
3589
    temp.l.upper = upper;
3590
    temp.l.lower = mant;
3591
    return temp.d;
3592
}
3593
#endif
3594

    
3595
#ifdef TARGET_X86_64
3596

    
3597
//#define DEBUG_MULDIV
3598

    
3599
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3600
{
3601
    *plow += a;
3602
    /* carry test */
3603
    if (*plow < a)
3604
        (*phigh)++;
3605
    *phigh += b;
3606
}
3607

    
3608
static void neg128(uint64_t *plow, uint64_t *phigh)
3609
{
3610
    *plow = ~ *plow;
3611
    *phigh = ~ *phigh;
3612
    add128(plow, phigh, 1, 0);
3613
}
3614

    
3615
/* return TRUE if overflow */
3616
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3617
{
3618
    uint64_t q, r, a1, a0;
3619
    int i, qb, ab;
3620

    
3621
    a0 = *plow;
3622
    a1 = *phigh;
3623
    if (a1 == 0) {
3624
        q = a0 / b;
3625
        r = a0 % b;
3626
        *plow = q;
3627
        *phigh = r;
3628
    } else {
3629
        if (a1 >= b)
3630
            return 1;
3631
        /* XXX: use a better algorithm */
3632
        for(i = 0; i < 64; i++) {
3633
            ab = a1 >> 63;
3634
            a1 = (a1 << 1) | (a0 >> 63);
3635
            if (ab || a1 >= b) {
3636
                a1 -= b;
3637
                qb = 1;
3638
            } else {
3639
                qb = 0;
3640
            }
3641
            a0 = (a0 << 1) | qb;
3642
        }
3643
#if defined(DEBUG_MULDIV)
3644
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3645
               *phigh, *plow, b, a0, a1);
3646
#endif
3647
        *plow = a0;
3648
        *phigh = a1;
3649
    }
3650
    return 0;
3651
}
3652

    
3653
/* return TRUE if overflow */
3654
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3655
{
3656
    int sa, sb;
3657
    sa = ((int64_t)*phigh < 0);
3658
    if (sa)
3659
        neg128(plow, phigh);
3660
    sb = (b < 0);
3661
    if (sb)
3662
        b = -b;
3663
    if (div64(plow, phigh, b) != 0)
3664
        return 1;
3665
    if (sa ^ sb) {
3666
        if (*plow > (1ULL << 63))
3667
            return 1;
3668
        *plow = - *plow;
3669
    } else {
3670
        if (*plow >= (1ULL << 63))
3671
            return 1;
3672
    }
3673
    if (sa)
3674
        *phigh = - *phigh;
3675
    return 0;
3676
}
3677

    
3678
void helper_mulq_EAX_T0(void)
3679
{
3680
    uint64_t r0, r1;
3681

    
3682
    mulu64(&r1, &r0, EAX, T0);
3683
    EAX = r0;
3684
    EDX = r1;
3685
    CC_DST = r0;
3686
    CC_SRC = r1;
3687
}
3688

    
3689
void helper_imulq_EAX_T0(void)
3690
{
3691
    uint64_t r0, r1;
3692

    
3693
    muls64(&r1, &r0, EAX, T0);
3694
    EAX = r0;
3695
    EDX = r1;
3696
    CC_DST = r0;
3697
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3698
}
3699

    
3700
void helper_imulq_T0_T1(void)
3701
{
3702
    uint64_t r0, r1;
3703

    
3704
    muls64(&r1, &r0, T0, T1);
3705
    T0 = r0;
3706
    CC_DST = r0;
3707
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3708
}
3709

    
3710
void helper_divq_EAX_T0(void)
3711
{
3712
    uint64_t r0, r1;
3713
    if (T0 == 0) {
3714
        raise_exception(EXCP00_DIVZ);
3715
    }
3716
    r0 = EAX;
3717
    r1 = EDX;
3718
    if (div64(&r0, &r1, T0))
3719
        raise_exception(EXCP00_DIVZ);
3720
    EAX = r0;
3721
    EDX = r1;
3722
}
3723

    
3724
void helper_idivq_EAX_T0(void)
3725
{
3726
    uint64_t r0, r1;
3727
    if (T0 == 0) {
3728
        raise_exception(EXCP00_DIVZ);
3729
    }
3730
    r0 = EAX;
3731
    r1 = EDX;
3732
    if (idiv64(&r0, &r1, T0))
3733
        raise_exception(EXCP00_DIVZ);
3734
    EAX = r0;
3735
    EDX = r1;
3736
}
3737

    
3738
void helper_bswapq_T0(void)
3739
{
3740
    T0 = bswap64(T0);
3741
}
3742
#endif
3743

    
3744
void helper_hlt(void)
3745
{
3746
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3747
    env->hflags |= HF_HALTED_MASK;
3748
    env->exception_index = EXCP_HLT;
3749
    cpu_loop_exit();
3750
}
3751

    
3752
void helper_monitor(void)
3753
{
3754
    if ((uint32_t)ECX != 0)
3755
        raise_exception(EXCP0D_GPF);
3756
    /* XXX: store address ? */
3757
}
3758

    
3759
void helper_mwait(void)
3760
{
3761
    if ((uint32_t)ECX != 0)
3762
        raise_exception(EXCP0D_GPF);
3763
    /* XXX: not complete but not completely erroneous */
3764
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3765
        /* more than one CPU: do not sleep because another CPU may
3766
           wake this one */
3767
    } else {
3768
        helper_hlt();
3769
    }
3770
}
3771

    
3772
float approx_rsqrt(float a)
3773
{
3774
    return 1.0 / sqrt(a);
3775
}
3776

    
3777
float approx_rcp(float a)
3778
{
3779
    return 1.0 / a;
3780
}
3781

    
3782
void update_fp_status(void)
3783
{
3784
    int rnd_type;
3785

    
3786
    /* set rounding mode */
3787
    switch(env->fpuc & RC_MASK) {
3788
    default:
3789
    case RC_NEAR:
3790
        rnd_type = float_round_nearest_even;
3791
        break;
3792
    case RC_DOWN:
3793
        rnd_type = float_round_down;
3794
        break;
3795
    case RC_UP:
3796
        rnd_type = float_round_up;
3797
        break;
3798
    case RC_CHOP:
3799
        rnd_type = float_round_to_zero;
3800
        break;
3801
    }
3802
    set_float_rounding_mode(rnd_type, &env->fp_status);
3803
#ifdef FLOATX80
3804
    switch((env->fpuc >> 8) & 3) {
3805
    case 0:
3806
        rnd_type = 32;
3807
        break;
3808
    case 2:
3809
        rnd_type = 64;
3810
        break;
3811
    case 3:
3812
    default:
3813
        rnd_type = 80;
3814
        break;
3815
    }
3816
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3817
#endif
3818
}
3819

    
3820
#if !defined(CONFIG_USER_ONLY) 
3821

    
3822
#define MMUSUFFIX _mmu
3823
#define GETPC() (__builtin_return_address(0))
3824

    
3825
#define SHIFT 0
3826
#include "softmmu_template.h"
3827

    
3828
#define SHIFT 1
3829
#include "softmmu_template.h"
3830

    
3831
#define SHIFT 2
3832
#include "softmmu_template.h"
3833

    
3834
#define SHIFT 3
3835
#include "softmmu_template.h"
3836

    
3837
#endif
3838

    
3839
/* try to fill the TLB and return an exception if error. If retaddr is
3840
   NULL, it means that the function was called in C code (i.e. not
3841
   from generated code or from helper.c) */
3842
/* XXX: fix it to restore all registers */
3843
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
3844
{
3845
    TranslationBlock *tb;
3846
    int ret;
3847
    unsigned long pc;
3848
    CPUX86State *saved_env;
3849

    
3850
    /* XXX: hack to restore env in all cases, even if not called from
3851
       generated code */
3852
    saved_env = env;
3853
    env = cpu_single_env;
3854

    
3855
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
3856
    if (ret) {
3857
        if (retaddr) {
3858
            /* now we have a real cpu fault */
3859
            pc = (unsigned long)retaddr;
3860
            tb = tb_find_pc(pc);
3861
            if (tb) {
3862
                /* the PC is inside the translated code. It means that we have
3863
                   a virtual CPU fault */
3864
                cpu_restore_state(tb, env, pc, NULL);
3865
            }
3866
        }
3867
        if (retaddr)
3868
            raise_exception_err(env->exception_index, env->error_code);
3869
        else
3870
            raise_exception_err_norestore(env->exception_index, env->error_code);
3871
    }
3872
    env = saved_env;
3873
}