Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 4b70269e

History | View | Annotate | Download (110.9 kB)

1
/*
2
 *  i386 helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if defined(__sparc__) && (HOST_SOLARIS < 10)
25
#include <ieeefp.h>
26
#define isinf(x) (fpclass(x) == FP_NINF || fpclass(x) == FP_PINF)
27
#endif 
28

    
29
#if 0
30
#define raise_exception_err(a, b)\
31
do {\
32
    if (logfile)\
33
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
34
    (raise_exception_err)(a, b);\
35
} while (0)
36
#endif
37

    
38
const uint8_t parity_table[256] = {
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71
};
72

    
73
/* modulo 17 table */
74
const uint8_t rclw_table[32] = {
75
    0, 1, 2, 3, 4, 5, 6, 7, 
76
    8, 9,10,11,12,13,14,15,
77
   16, 0, 1, 2, 3, 4, 5, 6,
78
    7, 8, 9,10,11,12,13,14,
79
};
80

    
81
/* modulo 9 table */
82
const uint8_t rclb_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7, 
84
    8, 0, 1, 2, 3, 4, 5, 6,
85
    7, 8, 0, 1, 2, 3, 4, 5, 
86
    6, 7, 8, 0, 1, 2, 3, 4,
87
};
88

    
89
const CPU86_LDouble f15rk[7] =
90
{
91
    0.00000000000000000000L,
92
    1.00000000000000000000L,
93
    3.14159265358979323851L,  /*pi*/
94
    0.30102999566398119523L,  /*lg2*/
95
    0.69314718055994530943L,  /*ln2*/
96
    1.44269504088896340739L,  /*l2e*/
97
    3.32192809488736234781L,  /*l2t*/
98
};
99
    
100
/* thread support */
101

    
102
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
103

    
104
void cpu_lock(void)
105
{
106
    spin_lock(&global_cpu_lock);
107
}
108

    
109
void cpu_unlock(void)
110
{
111
    spin_unlock(&global_cpu_lock);
112
}
113

    
114
void cpu_loop_exit(void)
115
{
116
    /* NOTE: the register at this point must be saved by hand because
117
       longjmp restore them */
118
    regs_to_env();
119
    longjmp(env->jmp_env, 1);
120
}
121

    
122
/* return non zero if error */
123
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
124
                               int selector)
125
{
126
    SegmentCache *dt;
127
    int index;
128
    target_ulong ptr;
129

    
130
    if (selector & 0x4)
131
        dt = &env->ldt;
132
    else
133
        dt = &env->gdt;
134
    index = selector & ~7;
135
    if ((index + 7) > dt->limit)
136
        return -1;
137
    ptr = dt->base + index;
138
    *e1_ptr = ldl_kernel(ptr);
139
    *e2_ptr = ldl_kernel(ptr + 4);
140
    return 0;
141
}
142
                                     
143
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
144
{
145
    unsigned int limit;
146
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
147
    if (e2 & DESC_G_MASK)
148
        limit = (limit << 12) | 0xfff;
149
    return limit;
150
}
151

    
152
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
153
{
154
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
155
}
156

    
157
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
158
{
159
    sc->base = get_seg_base(e1, e2);
160
    sc->limit = get_seg_limit(e1, e2);
161
    sc->flags = e2;
162
}
163

    
164
/* init the segment cache in vm86 mode. */
165
static inline void load_seg_vm(int seg, int selector)
166
{
167
    selector &= 0xffff;
168
    cpu_x86_load_seg_cache(env, seg, selector, 
169
                           (selector << 4), 0xffff, 0);
170
}
171

    
172
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
173
                                       uint32_t *esp_ptr, int dpl)
174
{
175
    int type, index, shift;
176
    
177
#if 0
178
    {
179
        int i;
180
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
181
        for(i=0;i<env->tr.limit;i++) {
182
            printf("%02x ", env->tr.base[i]);
183
            if ((i & 7) == 7) printf("\n");
184
        }
185
        printf("\n");
186
    }
187
#endif
188

    
189
    if (!(env->tr.flags & DESC_P_MASK))
190
        cpu_abort(env, "invalid tss");
191
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
192
    if ((type & 7) != 1)
193
        cpu_abort(env, "invalid tss type");
194
    shift = type >> 3;
195
    index = (dpl * 4 + 2) << shift;
196
    if (index + (4 << shift) - 1 > env->tr.limit)
197
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
198
    if (shift == 0) {
199
        *esp_ptr = lduw_kernel(env->tr.base + index);
200
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
201
    } else {
202
        *esp_ptr = ldl_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
204
    }
205
}
206

    
207
/* XXX: merge with load_seg() */
208
static void tss_load_seg(int seg_reg, int selector)
209
{
210
    uint32_t e1, e2;
211
    int rpl, dpl, cpl;
212

    
213
    if ((selector & 0xfffc) != 0) {
214
        if (load_segment(&e1, &e2, selector) != 0)
215
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
        if (!(e2 & DESC_S_MASK))
217
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
        rpl = selector & 3;
219
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
220
        cpl = env->hflags & HF_CPL_MASK;
221
        if (seg_reg == R_CS) {
222
            if (!(e2 & DESC_CS_MASK))
223
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224
            /* XXX: is it correct ? */
225
            if (dpl != rpl)
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            if ((e2 & DESC_C_MASK) && dpl > rpl)
228
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        } else if (seg_reg == R_SS) {
230
            /* SS must be writable data */
231
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
            if (dpl != cpl || dpl != rpl)
234
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
        } else {
236
            /* not readable code */
237
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
238
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239
            /* if data or non conforming code, checks the rights */
240
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
241
                if (dpl < cpl || dpl < rpl)
242
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
            }
244
        }
245
        if (!(e2 & DESC_P_MASK))
246
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
247
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
248
                       get_seg_base(e1, e2),
249
                       get_seg_limit(e1, e2),
250
                       e2);
251
    } else {
252
        if (seg_reg == R_SS || seg_reg == R_CS) 
253
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254
    }
255
}
256

    
257
#define SWITCH_TSS_JMP  0
258
#define SWITCH_TSS_IRET 1
259
#define SWITCH_TSS_CALL 2
260

    
261
/* XXX: restore CPU state in registers (PowerPC case) */
262
static void switch_tss(int tss_selector, 
263
                       uint32_t e1, uint32_t e2, int source,
264
                       uint32_t next_eip)
265
{
266
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
267
    target_ulong tss_base;
268
    uint32_t new_regs[8], new_segs[6];
269
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
270
    uint32_t old_eflags, eflags_mask;
271
    SegmentCache *dt;
272
    int index;
273
    target_ulong ptr;
274

    
275
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
276
#ifdef DEBUG_PCALL
277
    if (loglevel & CPU_LOG_PCALL)
278
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
279
#endif
280

    
281
    /* if task gate, we read the TSS segment and we load it */
282
    if (type == 5) {
283
        if (!(e2 & DESC_P_MASK))
284
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
285
        tss_selector = e1 >> 16;
286
        if (tss_selector & 4)
287
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
288
        if (load_segment(&e1, &e2, tss_selector) != 0)
289
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
290
        if (e2 & DESC_S_MASK)
291
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
292
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
293
        if ((type & 7) != 1)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
    }
296

    
297
    if (!(e2 & DESC_P_MASK))
298
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
299

    
300
    if (type & 8)
301
        tss_limit_max = 103;
302
    else
303
        tss_limit_max = 43;
304
    tss_limit = get_seg_limit(e1, e2);
305
    tss_base = get_seg_base(e1, e2);
306
    if ((tss_selector & 4) != 0 || 
307
        tss_limit < tss_limit_max)
308
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
309
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
310
    if (old_type & 8)
311
        old_tss_limit_max = 103;
312
    else
313
        old_tss_limit_max = 43;
314

    
315
    /* read all the registers from the new TSS */
316
    if (type & 8) {
317
        /* 32 bit */
318
        new_cr3 = ldl_kernel(tss_base + 0x1c);
319
        new_eip = ldl_kernel(tss_base + 0x20);
320
        new_eflags = ldl_kernel(tss_base + 0x24);
321
        for(i = 0; i < 8; i++)
322
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
323
        for(i = 0; i < 6; i++)
324
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
325
        new_ldt = lduw_kernel(tss_base + 0x60);
326
        new_trap = ldl_kernel(tss_base + 0x64);
327
    } else {
328
        /* 16 bit */
329
        new_cr3 = 0;
330
        new_eip = lduw_kernel(tss_base + 0x0e);
331
        new_eflags = lduw_kernel(tss_base + 0x10);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
334
        for(i = 0; i < 4; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x2a);
337
        new_segs[R_FS] = 0;
338
        new_segs[R_GS] = 0;
339
        new_trap = 0;
340
    }
341
    
342
    /* NOTE: we must avoid memory exceptions during the task switch,
343
       so we make dummy accesses before */
344
    /* XXX: it can still fail in some cases, so a bigger hack is
345
       necessary to valid the TLB after having done the accesses */
346

    
347
    v1 = ldub_kernel(env->tr.base);
348
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
349
    stb_kernel(env->tr.base, v1);
350
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
351
    
352
    /* clear busy bit (it is restartable) */
353
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
354
        target_ulong ptr;
355
        uint32_t e2;
356
        ptr = env->gdt.base + (env->tr.selector & ~7);
357
        e2 = ldl_kernel(ptr + 4);
358
        e2 &= ~DESC_TSS_BUSY_MASK;
359
        stl_kernel(ptr + 4, e2);
360
    }
361
    old_eflags = compute_eflags();
362
    if (source == SWITCH_TSS_IRET)
363
        old_eflags &= ~NT_MASK;
364
    
365
    /* save the current state in the old TSS */
366
    if (type & 8) {
367
        /* 32 bit */
368
        stl_kernel(env->tr.base + 0x20, next_eip);
369
        stl_kernel(env->tr.base + 0x24, old_eflags);
370
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
371
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
372
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
373
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
374
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
375
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
376
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
377
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
378
        for(i = 0; i < 6; i++)
379
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
380
    } else {
381
        /* 16 bit */
382
        stw_kernel(env->tr.base + 0x0e, next_eip);
383
        stw_kernel(env->tr.base + 0x10, old_eflags);
384
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
385
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
386
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
387
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
388
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
389
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
390
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
391
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
392
        for(i = 0; i < 4; i++)
393
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
394
    }
395
    
396
    /* now if an exception occurs, it will occurs in the next task
397
       context */
398

    
399
    if (source == SWITCH_TSS_CALL) {
400
        stw_kernel(tss_base, env->tr.selector);
401
        new_eflags |= NT_MASK;
402
    }
403

    
404
    /* set busy bit */
405
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
406
        target_ulong ptr;
407
        uint32_t e2;
408
        ptr = env->gdt.base + (tss_selector & ~7);
409
        e2 = ldl_kernel(ptr + 4);
410
        e2 |= DESC_TSS_BUSY_MASK;
411
        stl_kernel(ptr + 4, e2);
412
    }
413

    
414
    /* set the new CPU state */
415
    /* from this point, any exception which occurs can give problems */
416
    env->cr[0] |= CR0_TS_MASK;
417
    env->hflags |= HF_TS_MASK;
418
    env->tr.selector = tss_selector;
419
    env->tr.base = tss_base;
420
    env->tr.limit = tss_limit;
421
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
422
    
423
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424
        cpu_x86_update_cr3(env, new_cr3);
425
    }
426
    
427
    /* load all registers without an exception, then reload them with
428
       possible exception */
429
    env->eip = new_eip;
430
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
431
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
432
    if (!(type & 8))
433
        eflags_mask &= 0xffff;
434
    load_eflags(new_eflags, eflags_mask);
435
    /* XXX: what to do in 16 bit case ? */
436
    EAX = new_regs[0];
437
    ECX = new_regs[1];
438
    EDX = new_regs[2];
439
    EBX = new_regs[3];
440
    ESP = new_regs[4];
441
    EBP = new_regs[5];
442
    ESI = new_regs[6];
443
    EDI = new_regs[7];
444
    if (new_eflags & VM_MASK) {
445
        for(i = 0; i < 6; i++) 
446
            load_seg_vm(i, new_segs[i]);
447
        /* in vm86, CPL is always 3 */
448
        cpu_x86_set_cpl(env, 3);
449
    } else {
450
        /* CPL is set the RPL of CS */
451
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
452
        /* first just selectors as the rest may trigger exceptions */
453
        for(i = 0; i < 6; i++)
454
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
455
    }
456
    
457
    env->ldt.selector = new_ldt & ~4;
458
    env->ldt.base = 0;
459
    env->ldt.limit = 0;
460
    env->ldt.flags = 0;
461

    
462
    /* load the LDT */
463
    if (new_ldt & 4)
464
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465

    
466
    if ((new_ldt & 0xfffc) != 0) {
467
        dt = &env->gdt;
468
        index = new_ldt & ~7;
469
        if ((index + 7) > dt->limit)
470
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
471
        ptr = dt->base + index;
472
        e1 = ldl_kernel(ptr);
473
        e2 = ldl_kernel(ptr + 4);
474
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
475
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
        if (!(e2 & DESC_P_MASK))
477
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
478
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
479
    }
480
    
481
    /* load the segments */
482
    if (!(new_eflags & VM_MASK)) {
483
        tss_load_seg(R_CS, new_segs[R_CS]);
484
        tss_load_seg(R_SS, new_segs[R_SS]);
485
        tss_load_seg(R_ES, new_segs[R_ES]);
486
        tss_load_seg(R_DS, new_segs[R_DS]);
487
        tss_load_seg(R_FS, new_segs[R_FS]);
488
        tss_load_seg(R_GS, new_segs[R_GS]);
489
    }
490
    
491
    /* check that EIP is in the CS segment limits */
492
    if (new_eip > env->segs[R_CS].limit) {
493
        /* XXX: different exception if CALL ? */
494
        raise_exception_err(EXCP0D_GPF, 0);
495
    }
496
}
497

    
498
/* check if Port I/O is allowed in TSS */
499
static inline void check_io(int addr, int size)
500
{
501
    int io_offset, val, mask;
502
    
503
    /* TSS must be a valid 32 bit one */
504
    if (!(env->tr.flags & DESC_P_MASK) ||
505
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
506
        env->tr.limit < 103)
507
        goto fail;
508
    io_offset = lduw_kernel(env->tr.base + 0x66);
509
    io_offset += (addr >> 3);
510
    /* Note: the check needs two bytes */
511
    if ((io_offset + 1) > env->tr.limit)
512
        goto fail;
513
    val = lduw_kernel(env->tr.base + io_offset);
514
    val >>= (addr & 7);
515
    mask = (1 << size) - 1;
516
    /* all bits must be zero to allow the I/O */
517
    if ((val & mask) != 0) {
518
    fail:
519
        raise_exception_err(EXCP0D_GPF, 0);
520
    }
521
}
522

    
523
void check_iob_T0(void)
524
{
525
    check_io(T0, 1);
526
}
527

    
528
void check_iow_T0(void)
529
{
530
    check_io(T0, 2);
531
}
532

    
533
void check_iol_T0(void)
534
{
535
    check_io(T0, 4);
536
}
537

    
538
void check_iob_DX(void)
539
{
540
    check_io(EDX & 0xffff, 1);
541
}
542

    
543
void check_iow_DX(void)
544
{
545
    check_io(EDX & 0xffff, 2);
546
}
547

    
548
void check_iol_DX(void)
549
{
550
    check_io(EDX & 0xffff, 4);
551
}
552

    
553
static inline unsigned int get_sp_mask(unsigned int e2)
554
{
555
    if (e2 & DESC_B_MASK)
556
        return 0xffffffff;
557
    else
558
        return 0xffff;
559
}
560

    
561
#ifdef TARGET_X86_64
562
#define SET_ESP(val, sp_mask)\
563
do {\
564
    if ((sp_mask) == 0xffff)\
565
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
566
    else if ((sp_mask) == 0xffffffffLL)\
567
        ESP = (uint32_t)(val);\
568
    else\
569
        ESP = (val);\
570
} while (0)
571
#else
572
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
573
#endif
574

    
575
/* XXX: add a is_user flag to have proper security support */
576
#define PUSHW(ssp, sp, sp_mask, val)\
577
{\
578
    sp -= 2;\
579
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
580
}
581

    
582
#define PUSHL(ssp, sp, sp_mask, val)\
583
{\
584
    sp -= 4;\
585
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
586
}
587

    
588
#define POPW(ssp, sp, sp_mask, val)\
589
{\
590
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
591
    sp += 2;\
592
}
593

    
594
#define POPL(ssp, sp, sp_mask, val)\
595
{\
596
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
597
    sp += 4;\
598
}
599

    
600
/* protected mode interrupt */
601
static void do_interrupt_protected(int intno, int is_int, int error_code,
602
                                   unsigned int next_eip, int is_hw)
603
{
604
    SegmentCache *dt;
605
    target_ulong ptr, ssp;
606
    int type, dpl, selector, ss_dpl, cpl;
607
    int has_error_code, new_stack, shift;
608
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
609
    uint32_t old_eip, sp_mask;
610

    
611
    has_error_code = 0;
612
    if (!is_int && !is_hw) {
613
        switch(intno) {
614
        case 8:
615
        case 10:
616
        case 11:
617
        case 12:
618
        case 13:
619
        case 14:
620
        case 17:
621
            has_error_code = 1;
622
            break;
623
        }
624
    }
625
    if (is_int)
626
        old_eip = next_eip;
627
    else
628
        old_eip = env->eip;
629

    
630
    dt = &env->idt;
631
    if (intno * 8 + 7 > dt->limit)
632
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
633
    ptr = dt->base + intno * 8;
634
    e1 = ldl_kernel(ptr);
635
    e2 = ldl_kernel(ptr + 4);
636
    /* check gate type */
637
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
638
    switch(type) {
639
    case 5: /* task gate */
640
        /* must do that check here to return the correct error code */
641
        if (!(e2 & DESC_P_MASK))
642
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
643
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
644
        if (has_error_code) {
645
            int type;
646
            uint32_t mask;
647
            /* push the error code */
648
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
649
            shift = type >> 3;
650
            if (env->segs[R_SS].flags & DESC_B_MASK)
651
                mask = 0xffffffff;
652
            else
653
                mask = 0xffff;
654
            esp = (ESP - (2 << shift)) & mask;
655
            ssp = env->segs[R_SS].base + esp;
656
            if (shift)
657
                stl_kernel(ssp, error_code);
658
            else
659
                stw_kernel(ssp, error_code);
660
            SET_ESP(esp, mask);
661
        }
662
        return;
663
    case 6: /* 286 interrupt gate */
664
    case 7: /* 286 trap gate */
665
    case 14: /* 386 interrupt gate */
666
    case 15: /* 386 trap gate */
667
        break;
668
    default:
669
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
670
        break;
671
    }
672
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
673
    cpl = env->hflags & HF_CPL_MASK;
674
    /* check privledge if software int */
675
    if (is_int && dpl < cpl)
676
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
677
    /* check valid bit */
678
    if (!(e2 & DESC_P_MASK))
679
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
680
    selector = e1 >> 16;
681
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
682
    if ((selector & 0xfffc) == 0)
683
        raise_exception_err(EXCP0D_GPF, 0);
684

    
685
    if (load_segment(&e1, &e2, selector) != 0)
686
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
687
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
688
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
689
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690
    if (dpl > cpl)
691
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
692
    if (!(e2 & DESC_P_MASK))
693
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
694
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
695
        /* to inner priviledge */
696
        get_ss_esp_from_tss(&ss, &esp, dpl);
697
        if ((ss & 0xfffc) == 0)
698
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699
        if ((ss & 3) != dpl)
700
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
701
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
702
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
703
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
704
        if (ss_dpl != dpl)
705
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
706
        if (!(ss_e2 & DESC_S_MASK) ||
707
            (ss_e2 & DESC_CS_MASK) ||
708
            !(ss_e2 & DESC_W_MASK))
709
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710
        if (!(ss_e2 & DESC_P_MASK))
711
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712
        new_stack = 1;
713
        sp_mask = get_sp_mask(ss_e2);
714
        ssp = get_seg_base(ss_e1, ss_e2);
715
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
716
        /* to same priviledge */
717
        if (env->eflags & VM_MASK)
718
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
719
        new_stack = 0;
720
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
721
        ssp = env->segs[R_SS].base;
722
        esp = ESP;
723
        dpl = cpl;
724
    } else {
725
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
726
        new_stack = 0; /* avoid warning */
727
        sp_mask = 0; /* avoid warning */
728
        ssp = 0; /* avoid warning */
729
        esp = 0; /* avoid warning */
730
    }
731

    
732
    shift = type >> 3;
733

    
734
#if 0
735
    /* XXX: check that enough room is available */
736
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
737
    if (env->eflags & VM_MASK)
738
        push_size += 8;
739
    push_size <<= shift;
740
#endif
741
    if (shift == 1) {
742
        if (new_stack) {
743
            if (env->eflags & VM_MASK) {
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
747
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
748
            }
749
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
750
            PUSHL(ssp, esp, sp_mask, ESP);
751
        }
752
        PUSHL(ssp, esp, sp_mask, compute_eflags());
753
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
754
        PUSHL(ssp, esp, sp_mask, old_eip);
755
        if (has_error_code) {
756
            PUSHL(ssp, esp, sp_mask, error_code);
757
        }
758
    } else {
759
        if (new_stack) {
760
            if (env->eflags & VM_MASK) {
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
764
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
765
            }
766
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
767
            PUSHW(ssp, esp, sp_mask, ESP);
768
        }
769
        PUSHW(ssp, esp, sp_mask, compute_eflags());
770
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
771
        PUSHW(ssp, esp, sp_mask, old_eip);
772
        if (has_error_code) {
773
            PUSHW(ssp, esp, sp_mask, error_code);
774
        }
775
    }
776
    
777
    if (new_stack) {
778
        if (env->eflags & VM_MASK) {
779
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
782
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
783
        }
784
        ss = (ss & ~3) | dpl;
785
        cpu_x86_load_seg_cache(env, R_SS, ss, 
786
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
787
    }
788
    SET_ESP(esp, sp_mask);
789

    
790
    selector = (selector & ~3) | dpl;
791
    cpu_x86_load_seg_cache(env, R_CS, selector, 
792
                   get_seg_base(e1, e2),
793
                   get_seg_limit(e1, e2),
794
                   e2);
795
    cpu_x86_set_cpl(env, dpl);
796
    env->eip = offset;
797

    
798
    /* interrupt gate clear IF mask */
799
    if ((type & 1) == 0) {
800
        env->eflags &= ~IF_MASK;
801
    }
802
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
803
}
804

    
805
#ifdef TARGET_X86_64
806

    
807
#define PUSHQ(sp, val)\
808
{\
809
    sp -= 8;\
810
    stq_kernel(sp, (val));\
811
}
812

    
813
#define POPQ(sp, val)\
814
{\
815
    val = ldq_kernel(sp);\
816
    sp += 8;\
817
}
818

    
819
static inline target_ulong get_rsp_from_tss(int level)
820
{
821
    int index;
822
    
823
#if 0
824
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
825
           env->tr.base, env->tr.limit);
826
#endif
827

    
828
    if (!(env->tr.flags & DESC_P_MASK))
829
        cpu_abort(env, "invalid tss");
830
    index = 8 * level + 4;
831
    if ((index + 7) > env->tr.limit)
832
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
833
    return ldq_kernel(env->tr.base + index);
834
}
835

    
836
/* 64 bit interrupt */
837
static void do_interrupt64(int intno, int is_int, int error_code,
838
                           target_ulong next_eip, int is_hw)
839
{
840
    SegmentCache *dt;
841
    target_ulong ptr;
842
    int type, dpl, selector, cpl, ist;
843
    int has_error_code, new_stack;
844
    uint32_t e1, e2, e3, ss;
845
    target_ulong old_eip, esp, offset;
846

    
847
    has_error_code = 0;
848
    if (!is_int && !is_hw) {
849
        switch(intno) {
850
        case 8:
851
        case 10:
852
        case 11:
853
        case 12:
854
        case 13:
855
        case 14:
856
        case 17:
857
            has_error_code = 1;
858
            break;
859
        }
860
    }
861
    if (is_int)
862
        old_eip = next_eip;
863
    else
864
        old_eip = env->eip;
865

    
866
    dt = &env->idt;
867
    if (intno * 16 + 15 > dt->limit)
868
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
869
    ptr = dt->base + intno * 16;
870
    e1 = ldl_kernel(ptr);
871
    e2 = ldl_kernel(ptr + 4);
872
    e3 = ldl_kernel(ptr + 8);
873
    /* check gate type */
874
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
875
    switch(type) {
876
    case 14: /* 386 interrupt gate */
877
    case 15: /* 386 trap gate */
878
        break;
879
    default:
880
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
881
        break;
882
    }
883
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
884
    cpl = env->hflags & HF_CPL_MASK;
885
    /* check privledge if software int */
886
    if (is_int && dpl < cpl)
887
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
888
    /* check valid bit */
889
    if (!(e2 & DESC_P_MASK))
890
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
891
    selector = e1 >> 16;
892
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
893
    ist = e2 & 7;
894
    if ((selector & 0xfffc) == 0)
895
        raise_exception_err(EXCP0D_GPF, 0);
896

    
897
    if (load_segment(&e1, &e2, selector) != 0)
898
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
899
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
900
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
901
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902
    if (dpl > cpl)
903
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
904
    if (!(e2 & DESC_P_MASK))
905
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
906
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
909
        /* to inner priviledge */
910
        if (ist != 0)
911
            esp = get_rsp_from_tss(ist + 3);
912
        else
913
            esp = get_rsp_from_tss(dpl);
914
        esp &= ~0xfLL; /* align stack */
915
        ss = 0;
916
        new_stack = 1;
917
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
918
        /* to same priviledge */
919
        if (env->eflags & VM_MASK)
920
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921
        new_stack = 0;
922
        if (ist != 0)
923
            esp = get_rsp_from_tss(ist + 3);
924
        else
925
            esp = ESP;
926
        esp &= ~0xfLL; /* align stack */
927
        dpl = cpl;
928
    } else {
929
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0; /* avoid warning */
931
        esp = 0; /* avoid warning */
932
    }
933

    
934
    PUSHQ(esp, env->segs[R_SS].selector);
935
    PUSHQ(esp, ESP);
936
    PUSHQ(esp, compute_eflags());
937
    PUSHQ(esp, env->segs[R_CS].selector);
938
    PUSHQ(esp, old_eip);
939
    if (has_error_code) {
940
        PUSHQ(esp, error_code);
941
    }
942
    
943
    if (new_stack) {
944
        ss = 0 | dpl;
945
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
946
    }
947
    ESP = esp;
948

    
949
    selector = (selector & ~3) | dpl;
950
    cpu_x86_load_seg_cache(env, R_CS, selector, 
951
                   get_seg_base(e1, e2),
952
                   get_seg_limit(e1, e2),
953
                   e2);
954
    cpu_x86_set_cpl(env, dpl);
955
    env->eip = offset;
956

    
957
    /* interrupt gate clear IF mask */
958
    if ((type & 1) == 0) {
959
        env->eflags &= ~IF_MASK;
960
    }
961
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
962
}
963
#endif
964

    
965
void helper_syscall(int next_eip_addend)
966
{
967
    int selector;
968

    
969
    if (!(env->efer & MSR_EFER_SCE)) {
970
        raise_exception_err(EXCP06_ILLOP, 0);
971
    }
972
    selector = (env->star >> 32) & 0xffff;
973
#ifdef TARGET_X86_64
974
    if (env->hflags & HF_LMA_MASK) {
975
        int code64;
976

    
977
        ECX = env->eip + next_eip_addend;
978
        env->regs[11] = compute_eflags();
979
        
980
        code64 = env->hflags & HF_CS64_MASK;
981

    
982
        cpu_x86_set_cpl(env, 0);
983
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
984
                           0, 0xffffffff, 
985
                               DESC_G_MASK | DESC_P_MASK |
986
                               DESC_S_MASK |
987
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
988
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
989
                               0, 0xffffffff,
990
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
991
                               DESC_S_MASK |
992
                               DESC_W_MASK | DESC_A_MASK);
993
        env->eflags &= ~env->fmask;
994
        if (code64)
995
            env->eip = env->lstar;
996
        else
997
            env->eip = env->cstar;
998
    } else 
999
#endif
1000
    {
1001
        ECX = (uint32_t)(env->eip + next_eip_addend);
1002
        
1003
        cpu_x86_set_cpl(env, 0);
1004
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
1005
                           0, 0xffffffff, 
1006
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1007
                               DESC_S_MASK |
1008
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1009
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
1010
                               0, 0xffffffff,
1011
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1012
                               DESC_S_MASK |
1013
                               DESC_W_MASK | DESC_A_MASK);
1014
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1015
        env->eip = (uint32_t)env->star;
1016
    }
1017
}
1018

    
1019
void helper_sysret(int dflag)
1020
{
1021
    int cpl, selector;
1022

    
1023
    if (!(env->efer & MSR_EFER_SCE)) {
1024
        raise_exception_err(EXCP06_ILLOP, 0);
1025
    }
1026
    cpl = env->hflags & HF_CPL_MASK;
1027
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1028
        raise_exception_err(EXCP0D_GPF, 0);
1029
    }
1030
    selector = (env->star >> 48) & 0xffff;
1031
#ifdef TARGET_X86_64
1032
    if (env->hflags & HF_LMA_MASK) {
1033
        if (dflag == 2) {
1034
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 
1035
                                   0, 0xffffffff, 
1036
                                   DESC_G_MASK | DESC_P_MASK |
1037
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1038
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
1039
                                   DESC_L_MASK);
1040
            env->eip = ECX;
1041
        } else {
1042
            cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1043
                                   0, 0xffffffff, 
1044
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1045
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1046
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1047
            env->eip = (uint32_t)ECX;
1048
        }
1049
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1050
                               0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1053
                               DESC_W_MASK | DESC_A_MASK);
1054
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 
1055
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1056
        cpu_x86_set_cpl(env, 3);
1057
    } else 
1058
#endif
1059
    {
1060
        cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1061
                               0, 0xffffffff, 
1062
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065
        env->eip = (uint32_t)ECX;
1066
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1067
                               0, 0xffffffff,
1068
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1069
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070
                               DESC_W_MASK | DESC_A_MASK);
1071
        env->eflags |= IF_MASK;
1072
        cpu_x86_set_cpl(env, 3);
1073
    }
1074
#ifdef USE_KQEMU
1075
    if (kqemu_is_ok(env)) {
1076
        if (env->hflags & HF_LMA_MASK)
1077
            CC_OP = CC_OP_EFLAGS;
1078
        env->exception_index = -1;
1079
        cpu_loop_exit();
1080
    }
1081
#endif
1082
}
1083

    
1084
/* real mode interrupt */
1085
static void do_interrupt_real(int intno, int is_int, int error_code,
1086
                              unsigned int next_eip)
1087
{
1088
    SegmentCache *dt;
1089
    target_ulong ptr, ssp;
1090
    int selector;
1091
    uint32_t offset, esp;
1092
    uint32_t old_cs, old_eip;
1093

    
1094
    /* real mode (simpler !) */
1095
    dt = &env->idt;
1096
    if (intno * 4 + 3 > dt->limit)
1097
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1098
    ptr = dt->base + intno * 4;
1099
    offset = lduw_kernel(ptr);
1100
    selector = lduw_kernel(ptr + 2);
1101
    esp = ESP;
1102
    ssp = env->segs[R_SS].base;
1103
    if (is_int)
1104
        old_eip = next_eip;
1105
    else
1106
        old_eip = env->eip;
1107
    old_cs = env->segs[R_CS].selector;
1108
    /* XXX: use SS segment size ? */
1109
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1110
    PUSHW(ssp, esp, 0xffff, old_cs);
1111
    PUSHW(ssp, esp, 0xffff, old_eip);
1112
    
1113
    /* update processor state */
1114
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1115
    env->eip = offset;
1116
    env->segs[R_CS].selector = selector;
1117
    env->segs[R_CS].base = (selector << 4);
1118
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1119
}
1120

    
1121
/* fake user mode interrupt */
1122
void do_interrupt_user(int intno, int is_int, int error_code, 
1123
                       target_ulong next_eip)
1124
{
1125
    SegmentCache *dt;
1126
    target_ulong ptr;
1127
    int dpl, cpl;
1128
    uint32_t e2;
1129

    
1130
    dt = &env->idt;
1131
    ptr = dt->base + (intno * 8);
1132
    e2 = ldl_kernel(ptr + 4);
1133
    
1134
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1135
    cpl = env->hflags & HF_CPL_MASK;
1136
    /* check privledge if software int */
1137
    if (is_int && dpl < cpl)
1138
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1139

    
1140
    /* Since we emulate only user space, we cannot do more than
1141
       exiting the emulation with the suitable exception and error
1142
       code */
1143
    if (is_int)
1144
        EIP = next_eip;
1145
}
1146

    
1147
/*
1148
 * Begin execution of an interruption. is_int is TRUE if coming from
1149
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1150
 * instruction. It is only relevant if is_int is TRUE.  
1151
 */
1152
void do_interrupt(int intno, int is_int, int error_code, 
1153
                  target_ulong next_eip, int is_hw)
1154
{
1155
    if (loglevel & CPU_LOG_INT) {
1156
        if ((env->cr[0] & CR0_PE_MASK)) {
1157
            static int count;
1158
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1159
                    count, intno, error_code, is_int,
1160
                    env->hflags & HF_CPL_MASK,
1161
                    env->segs[R_CS].selector, EIP,
1162
                    (int)env->segs[R_CS].base + EIP,
1163
                    env->segs[R_SS].selector, ESP);
1164
            if (intno == 0x0e) {
1165
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1166
            } else {
1167
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1168
            }
1169
            fprintf(logfile, "\n");
1170
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1171
#if 0
1172
            {
1173
                int i;
1174
                uint8_t *ptr;
1175
                fprintf(logfile, "       code=");
1176
                ptr = env->segs[R_CS].base + env->eip;
1177
                for(i = 0; i < 16; i++) {
1178
                    fprintf(logfile, " %02x", ldub(ptr + i));
1179
                }
1180
                fprintf(logfile, "\n");
1181
            }
1182
#endif
1183
            count++;
1184
        }
1185
    }
1186
    if (env->cr[0] & CR0_PE_MASK) {
1187
#if TARGET_X86_64
1188
        if (env->hflags & HF_LMA_MASK) {
1189
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1190
        } else
1191
#endif
1192
        {
1193
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1194
        }
1195
    } else {
1196
        do_interrupt_real(intno, is_int, error_code, next_eip);
1197
    }
1198
}
1199

    
1200
/*
1201
 * Check nested exceptions and change to double or triple fault if
1202
 * needed. It should only be called, if this is not an interrupt.
1203
 * Returns the new exception number.
1204
 */
1205
int check_exception(int intno, int *error_code)
1206
{
1207
    char first_contributory = env->old_exception == 0 ||
1208
                              (env->old_exception >= 10 &&
1209
                               env->old_exception <= 13);
1210
    char second_contributory = intno == 0 ||
1211
                               (intno >= 10 && intno <= 13);
1212

    
1213
    if (loglevel & CPU_LOG_INT)
1214
        fprintf(logfile, "check_exception old: %x new %x\n",
1215
                env->old_exception, intno);
1216

    
1217
    if (env->old_exception == EXCP08_DBLE)
1218
        cpu_abort(env, "triple fault");
1219

    
1220
    if ((first_contributory && second_contributory)
1221
        || (env->old_exception == EXCP0E_PAGE &&
1222
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1223
        intno = EXCP08_DBLE;
1224
        *error_code = 0;
1225
    }
1226

    
1227
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1228
        (intno == EXCP08_DBLE))
1229
        env->old_exception = intno;
1230

    
1231
    return intno;
1232
}
1233

    
1234
/*
1235
 * Signal an interruption. It is executed in the main CPU loop.
1236
 * is_int is TRUE if coming from the int instruction. next_eip is the
1237
 * EIP value AFTER the interrupt instruction. It is only relevant if
1238
 * is_int is TRUE.  
1239
 */
1240
void raise_interrupt(int intno, int is_int, int error_code, 
1241
                     int next_eip_addend)
1242
{
1243
    if (!is_int)
1244
        intno = check_exception(intno, &error_code);
1245

    
1246
    env->exception_index = intno;
1247
    env->error_code = error_code;
1248
    env->exception_is_int = is_int;
1249
    env->exception_next_eip = env->eip + next_eip_addend;
1250
    cpu_loop_exit();
1251
}
1252

    
1253
/* same as raise_exception_err, but do not restore global registers */
1254
static void raise_exception_err_norestore(int exception_index, int error_code)
1255
{
1256
    exception_index = check_exception(exception_index, &error_code);
1257

    
1258
    env->exception_index = exception_index;
1259
    env->error_code = error_code;
1260
    env->exception_is_int = 0;
1261
    env->exception_next_eip = 0;
1262
    longjmp(env->jmp_env, 1);
1263
}
1264

    
1265
/* shortcuts to generate exceptions */
1266

    
1267
void (raise_exception_err)(int exception_index, int error_code)
1268
{
1269
    raise_interrupt(exception_index, 0, error_code, 0);
1270
}
1271

    
1272
void raise_exception(int exception_index)
1273
{
1274
    raise_interrupt(exception_index, 0, 0, 0);
1275
}
1276

    
1277
/* SMM support */
1278

    
1279
#if defined(CONFIG_USER_ONLY) 
1280

    
1281
void do_smm_enter(void)
1282
{
1283
}
1284

    
1285
void helper_rsm(void)
1286
{
1287
}
1288

    
1289
#else
1290

    
1291
#ifdef TARGET_X86_64
1292
#define SMM_REVISION_ID 0x00020064
1293
#else
1294
#define SMM_REVISION_ID 0x00020000
1295
#endif
1296

    
1297
void do_smm_enter(void)
1298
{
1299
    target_ulong sm_state;
1300
    SegmentCache *dt;
1301
    int i, offset;
1302

    
1303
    if (loglevel & CPU_LOG_INT) {
1304
        fprintf(logfile, "SMM: enter\n");
1305
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1306
    }
1307

    
1308
    env->hflags |= HF_SMM_MASK;
1309
    cpu_smm_update(env);
1310

    
1311
    sm_state = env->smbase + 0x8000;
1312
    
1313
#ifdef TARGET_X86_64
1314
    for(i = 0; i < 6; i++) {
1315
        dt = &env->segs[i];
1316
        offset = 0x7e00 + i * 16;
1317
        stw_phys(sm_state + offset, dt->selector);
1318
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1319
        stl_phys(sm_state + offset + 4, dt->limit);
1320
        stq_phys(sm_state + offset + 8, dt->base);
1321
    }
1322

    
1323
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1324
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1325

    
1326
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1327
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1328
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1329
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1330
    
1331
    stq_phys(sm_state + 0x7e88, env->idt.base);
1332
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1333

    
1334
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1335
    stq_phys(sm_state + 0x7e98, env->tr.base);
1336
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1337
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1338
    
1339
    stq_phys(sm_state + 0x7ed0, env->efer);
1340

    
1341
    stq_phys(sm_state + 0x7ff8, EAX);
1342
    stq_phys(sm_state + 0x7ff0, ECX);
1343
    stq_phys(sm_state + 0x7fe8, EDX);
1344
    stq_phys(sm_state + 0x7fe0, EBX);
1345
    stq_phys(sm_state + 0x7fd8, ESP);
1346
    stq_phys(sm_state + 0x7fd0, EBP);
1347
    stq_phys(sm_state + 0x7fc8, ESI);
1348
    stq_phys(sm_state + 0x7fc0, EDI);
1349
    for(i = 8; i < 16; i++) 
1350
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1351
    stq_phys(sm_state + 0x7f78, env->eip);
1352
    stl_phys(sm_state + 0x7f70, compute_eflags());
1353
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1354
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1355

    
1356
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1357
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1358
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1359

    
1360
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1361
    stl_phys(sm_state + 0x7f00, env->smbase);
1362
#else
1363
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1364
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1365
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1366
    stl_phys(sm_state + 0x7ff0, env->eip);
1367
    stl_phys(sm_state + 0x7fec, EDI);
1368
    stl_phys(sm_state + 0x7fe8, ESI);
1369
    stl_phys(sm_state + 0x7fe4, EBP);
1370
    stl_phys(sm_state + 0x7fe0, ESP);
1371
    stl_phys(sm_state + 0x7fdc, EBX);
1372
    stl_phys(sm_state + 0x7fd8, EDX);
1373
    stl_phys(sm_state + 0x7fd4, ECX);
1374
    stl_phys(sm_state + 0x7fd0, EAX);
1375
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1376
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1377
    
1378
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1379
    stl_phys(sm_state + 0x7f64, env->tr.base);
1380
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1381
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1382
    
1383
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1384
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1385
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1386
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1387
    
1388
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1389
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1390

    
1391
    stl_phys(sm_state + 0x7f58, env->idt.base);
1392
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1393

    
1394
    for(i = 0; i < 6; i++) {
1395
        dt = &env->segs[i];
1396
        if (i < 3)
1397
            offset = 0x7f84 + i * 12;
1398
        else
1399
            offset = 0x7f2c + (i - 3) * 12;
1400
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1401
        stl_phys(sm_state + offset + 8, dt->base);
1402
        stl_phys(sm_state + offset + 4, dt->limit);
1403
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1404
    }
1405
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1406

    
1407
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1408
    stl_phys(sm_state + 0x7ef8, env->smbase);
1409
#endif
1410
    /* init SMM cpu state */
1411

    
1412
#ifdef TARGET_X86_64
1413
    env->efer = 0;
1414
    env->hflags &= ~HF_LMA_MASK;
1415
#endif
1416
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1417
    env->eip = 0x00008000;
1418
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1419
                           0xffffffff, 0);
1420
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1421
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1422
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1423
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1424
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1425
    
1426
    cpu_x86_update_cr0(env, 
1427
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1428
    cpu_x86_update_cr4(env, 0);
1429
    env->dr[7] = 0x00000400;
1430
    CC_OP = CC_OP_EFLAGS;
1431
}
1432

    
1433
void helper_rsm(void)
1434
{
1435
    target_ulong sm_state;
1436
    int i, offset;
1437
    uint32_t val;
1438

    
1439
    sm_state = env->smbase + 0x8000;
1440
#ifdef TARGET_X86_64
1441
    env->efer = ldq_phys(sm_state + 0x7ed0);
1442
    if (env->efer & MSR_EFER_LMA)
1443
        env->hflags |= HF_LMA_MASK;
1444
    else
1445
        env->hflags &= ~HF_LMA_MASK;
1446

    
1447
    for(i = 0; i < 6; i++) {
1448
        offset = 0x7e00 + i * 16;
1449
        cpu_x86_load_seg_cache(env, i, 
1450
                               lduw_phys(sm_state + offset),
1451
                               ldq_phys(sm_state + offset + 8),
1452
                               ldl_phys(sm_state + offset + 4),
1453
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1454
    }
1455

    
1456
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1457
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1458

    
1459
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1460
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1461
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1462
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1463
    
1464
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1465
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1466

    
1467
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1468
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1469
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1470
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1471
    
1472
    EAX = ldq_phys(sm_state + 0x7ff8);
1473
    ECX = ldq_phys(sm_state + 0x7ff0);
1474
    EDX = ldq_phys(sm_state + 0x7fe8);
1475
    EBX = ldq_phys(sm_state + 0x7fe0);
1476
    ESP = ldq_phys(sm_state + 0x7fd8);
1477
    EBP = ldq_phys(sm_state + 0x7fd0);
1478
    ESI = ldq_phys(sm_state + 0x7fc8);
1479
    EDI = ldq_phys(sm_state + 0x7fc0);
1480
    for(i = 8; i < 16; i++) 
1481
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1482
    env->eip = ldq_phys(sm_state + 0x7f78);
1483
    load_eflags(ldl_phys(sm_state + 0x7f70), 
1484
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1485
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1486
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1487

    
1488
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1489
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1490
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1491

    
1492
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1493
    if (val & 0x20000) {
1494
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1495
    }
1496
#else
1497
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1498
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1499
    load_eflags(ldl_phys(sm_state + 0x7ff4), 
1500
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501
    env->eip = ldl_phys(sm_state + 0x7ff0);
1502
    EDI = ldl_phys(sm_state + 0x7fec);
1503
    ESI = ldl_phys(sm_state + 0x7fe8);
1504
    EBP = ldl_phys(sm_state + 0x7fe4);
1505
    ESP = ldl_phys(sm_state + 0x7fe0);
1506
    EBX = ldl_phys(sm_state + 0x7fdc);
1507
    EDX = ldl_phys(sm_state + 0x7fd8);
1508
    ECX = ldl_phys(sm_state + 0x7fd4);
1509
    EAX = ldl_phys(sm_state + 0x7fd0);
1510
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1511
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1512
    
1513
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1514
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1515
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1516
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1517
    
1518
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1519
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1520
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1521
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1522
    
1523
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1524
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1525

    
1526
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1527
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1528

    
1529
    for(i = 0; i < 6; i++) {
1530
        if (i < 3)
1531
            offset = 0x7f84 + i * 12;
1532
        else
1533
            offset = 0x7f2c + (i - 3) * 12;
1534
        cpu_x86_load_seg_cache(env, i, 
1535
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1536
                               ldl_phys(sm_state + offset + 8),
1537
                               ldl_phys(sm_state + offset + 4),
1538
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1539
    }
1540
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1541

    
1542
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1543
    if (val & 0x20000) {
1544
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1545
    }
1546
#endif
1547
    CC_OP = CC_OP_EFLAGS;
1548
    env->hflags &= ~HF_SMM_MASK;
1549
    cpu_smm_update(env);
1550

    
1551
    if (loglevel & CPU_LOG_INT) {
1552
        fprintf(logfile, "SMM: after RSM\n");
1553
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1554
    }
1555
}
1556

    
1557
#endif /* !CONFIG_USER_ONLY */
1558

    
1559

    
1560
#ifdef BUGGY_GCC_DIV64
1561
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1562
   call it from another function */
1563
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1564
{
1565
    *q_ptr = num / den;
1566
    return num % den;
1567
}
1568

    
1569
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1570
{
1571
    *q_ptr = num / den;
1572
    return num % den;
1573
}
1574
#endif
1575

    
1576
void helper_divl_EAX_T0(void)
1577
{
1578
    unsigned int den, r;
1579
    uint64_t num, q;
1580
    
1581
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1582
    den = T0;
1583
    if (den == 0) {
1584
        raise_exception(EXCP00_DIVZ);
1585
    }
1586
#ifdef BUGGY_GCC_DIV64
1587
    r = div32(&q, num, den);
1588
#else
1589
    q = (num / den);
1590
    r = (num % den);
1591
#endif
1592
    if (q > 0xffffffff)
1593
        raise_exception(EXCP00_DIVZ);
1594
    EAX = (uint32_t)q;
1595
    EDX = (uint32_t)r;
1596
}
1597

    
1598
void helper_idivl_EAX_T0(void)
1599
{
1600
    int den, r;
1601
    int64_t num, q;
1602
    
1603
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1604
    den = T0;
1605
    if (den == 0) {
1606
        raise_exception(EXCP00_DIVZ);
1607
    }
1608
#ifdef BUGGY_GCC_DIV64
1609
    r = idiv32(&q, num, den);
1610
#else
1611
    q = (num / den);
1612
    r = (num % den);
1613
#endif
1614
    if (q != (int32_t)q)
1615
        raise_exception(EXCP00_DIVZ);
1616
    EAX = (uint32_t)q;
1617
    EDX = (uint32_t)r;
1618
}
1619

    
1620
void helper_cmpxchg8b(void)
1621
{
1622
    uint64_t d;
1623
    int eflags;
1624

    
1625
    eflags = cc_table[CC_OP].compute_all();
1626
    d = ldq(A0);
1627
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1628
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1629
        eflags |= CC_Z;
1630
    } else {
1631
        EDX = d >> 32;
1632
        EAX = d;
1633
        eflags &= ~CC_Z;
1634
    }
1635
    CC_SRC = eflags;
1636
}
1637

    
1638
void helper_cpuid(void)
1639
{
1640
    uint32_t index;
1641
    index = (uint32_t)EAX;
1642
    
1643
    /* test if maximum index reached */
1644
    if (index & 0x80000000) {
1645
        if (index > env->cpuid_xlevel) 
1646
            index = env->cpuid_level;
1647
    } else {
1648
        if (index > env->cpuid_level) 
1649
            index = env->cpuid_level;
1650
    }
1651
        
1652
    switch(index) {
1653
    case 0:
1654
        EAX = env->cpuid_level;
1655
        EBX = env->cpuid_vendor1;
1656
        EDX = env->cpuid_vendor2;
1657
        ECX = env->cpuid_vendor3;
1658
        break;
1659
    case 1:
1660
        EAX = env->cpuid_version;
1661
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1662
        ECX = env->cpuid_ext_features;
1663
        EDX = env->cpuid_features;
1664
        break;
1665
    case 2:
1666
        /* cache info: needed for Pentium Pro compatibility */
1667
        EAX = 0x410601;
1668
        EBX = 0;
1669
        ECX = 0;
1670
        EDX = 0;
1671
        break;
1672
    case 0x80000000:
1673
        EAX = env->cpuid_xlevel;
1674
        EBX = env->cpuid_vendor1;
1675
        EDX = env->cpuid_vendor2;
1676
        ECX = env->cpuid_vendor3;
1677
        break;
1678
    case 0x80000001:
1679
        EAX = env->cpuid_features;
1680
        EBX = 0;
1681
        ECX = 0;
1682
        EDX = env->cpuid_ext2_features;
1683
        break;
1684
    case 0x80000002:
1685
    case 0x80000003:
1686
    case 0x80000004:
1687
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1688
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1689
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1690
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1691
        break;
1692
    case 0x80000005:
1693
        /* cache info (L1 cache) */
1694
        EAX = 0x01ff01ff;
1695
        EBX = 0x01ff01ff;
1696
        ECX = 0x40020140;
1697
        EDX = 0x40020140;
1698
        break;
1699
    case 0x80000006:
1700
        /* cache info (L2 cache) */
1701
        EAX = 0;
1702
        EBX = 0x42004200;
1703
        ECX = 0x02008140;
1704
        EDX = 0;
1705
        break;
1706
    case 0x80000008:
1707
        /* virtual & phys address size in low 2 bytes. */
1708
        EAX = 0x00003028;
1709
        EBX = 0;
1710
        ECX = 0;
1711
        EDX = 0;
1712
        break;
1713
    default:
1714
        /* reserved values: zero */
1715
        EAX = 0;
1716
        EBX = 0;
1717
        ECX = 0;
1718
        EDX = 0;
1719
        break;
1720
    }
1721
}
1722

    
1723
void helper_enter_level(int level, int data32)
1724
{
1725
    target_ulong ssp;
1726
    uint32_t esp_mask, esp, ebp;
1727

    
1728
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1729
    ssp = env->segs[R_SS].base;
1730
    ebp = EBP;
1731
    esp = ESP;
1732
    if (data32) {
1733
        /* 32 bit */
1734
        esp -= 4;
1735
        while (--level) {
1736
            esp -= 4;
1737
            ebp -= 4;
1738
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1739
        }
1740
        esp -= 4;
1741
        stl(ssp + (esp & esp_mask), T1);
1742
    } else {
1743
        /* 16 bit */
1744
        esp -= 2;
1745
        while (--level) {
1746
            esp -= 2;
1747
            ebp -= 2;
1748
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1749
        }
1750
        esp -= 2;
1751
        stw(ssp + (esp & esp_mask), T1);
1752
    }
1753
}
1754

    
1755
#ifdef TARGET_X86_64
1756
void helper_enter64_level(int level, int data64)
1757
{
1758
    target_ulong esp, ebp;
1759
    ebp = EBP;
1760
    esp = ESP;
1761

    
1762
    if (data64) {
1763
        /* 64 bit */
1764
        esp -= 8;
1765
        while (--level) {
1766
            esp -= 8;
1767
            ebp -= 8;
1768
            stq(esp, ldq(ebp));
1769
        }
1770
        esp -= 8;
1771
        stq(esp, T1);
1772
    } else {
1773
        /* 16 bit */
1774
        esp -= 2;
1775
        while (--level) {
1776
            esp -= 2;
1777
            ebp -= 2;
1778
            stw(esp, lduw(ebp));
1779
        }
1780
        esp -= 2;
1781
        stw(esp, T1);
1782
    }
1783
}
1784
#endif
1785

    
1786
void helper_lldt_T0(void)
1787
{
1788
    int selector;
1789
    SegmentCache *dt;
1790
    uint32_t e1, e2;
1791
    int index, entry_limit;
1792
    target_ulong ptr;
1793
    
1794
    selector = T0 & 0xffff;
1795
    if ((selector & 0xfffc) == 0) {
1796
        /* XXX: NULL selector case: invalid LDT */
1797
        env->ldt.base = 0;
1798
        env->ldt.limit = 0;
1799
    } else {
1800
        if (selector & 0x4)
1801
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1802
        dt = &env->gdt;
1803
        index = selector & ~7;
1804
#ifdef TARGET_X86_64
1805
        if (env->hflags & HF_LMA_MASK)
1806
            entry_limit = 15;
1807
        else
1808
#endif            
1809
            entry_limit = 7;
1810
        if ((index + entry_limit) > dt->limit)
1811
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1812
        ptr = dt->base + index;
1813
        e1 = ldl_kernel(ptr);
1814
        e2 = ldl_kernel(ptr + 4);
1815
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1816
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1817
        if (!(e2 & DESC_P_MASK))
1818
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1819
#ifdef TARGET_X86_64
1820
        if (env->hflags & HF_LMA_MASK) {
1821
            uint32_t e3;
1822
            e3 = ldl_kernel(ptr + 8);
1823
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1824
            env->ldt.base |= (target_ulong)e3 << 32;
1825
        } else
1826
#endif
1827
        {
1828
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1829
        }
1830
    }
1831
    env->ldt.selector = selector;
1832
}
1833

    
1834
void helper_ltr_T0(void)
1835
{
1836
    int selector;
1837
    SegmentCache *dt;
1838
    uint32_t e1, e2;
1839
    int index, type, entry_limit;
1840
    target_ulong ptr;
1841
    
1842
    selector = T0 & 0xffff;
1843
    if ((selector & 0xfffc) == 0) {
1844
        /* NULL selector case: invalid TR */
1845
        env->tr.base = 0;
1846
        env->tr.limit = 0;
1847
        env->tr.flags = 0;
1848
    } else {
1849
        if (selector & 0x4)
1850
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1851
        dt = &env->gdt;
1852
        index = selector & ~7;
1853
#ifdef TARGET_X86_64
1854
        if (env->hflags & HF_LMA_MASK)
1855
            entry_limit = 15;
1856
        else
1857
#endif            
1858
            entry_limit = 7;
1859
        if ((index + entry_limit) > dt->limit)
1860
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1861
        ptr = dt->base + index;
1862
        e1 = ldl_kernel(ptr);
1863
        e2 = ldl_kernel(ptr + 4);
1864
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1865
        if ((e2 & DESC_S_MASK) || 
1866
            (type != 1 && type != 9))
1867
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1868
        if (!(e2 & DESC_P_MASK))
1869
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1870
#ifdef TARGET_X86_64
1871
        if (env->hflags & HF_LMA_MASK) {
1872
            uint32_t e3, e4;
1873
            e3 = ldl_kernel(ptr + 8);
1874
            e4 = ldl_kernel(ptr + 12);
1875
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1876
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1877
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1878
            env->tr.base |= (target_ulong)e3 << 32;
1879
        } else 
1880
#endif
1881
        {
1882
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1883
        }
1884
        e2 |= DESC_TSS_BUSY_MASK;
1885
        stl_kernel(ptr + 4, e2);
1886
    }
1887
    env->tr.selector = selector;
1888
}
1889

    
1890
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1891
void load_seg(int seg_reg, int selector)
1892
{
1893
    uint32_t e1, e2;
1894
    int cpl, dpl, rpl;
1895
    SegmentCache *dt;
1896
    int index;
1897
    target_ulong ptr;
1898

    
1899
    selector &= 0xffff;
1900
    cpl = env->hflags & HF_CPL_MASK;
1901
    if ((selector & 0xfffc) == 0) {
1902
        /* null selector case */
1903
        if (seg_reg == R_SS
1904
#ifdef TARGET_X86_64
1905
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1906
#endif
1907
            )
1908
            raise_exception_err(EXCP0D_GPF, 0);
1909
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1910
    } else {
1911
        
1912
        if (selector & 0x4)
1913
            dt = &env->ldt;
1914
        else
1915
            dt = &env->gdt;
1916
        index = selector & ~7;
1917
        if ((index + 7) > dt->limit)
1918
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1919
        ptr = dt->base + index;
1920
        e1 = ldl_kernel(ptr);
1921
        e2 = ldl_kernel(ptr + 4);
1922
        
1923
        if (!(e2 & DESC_S_MASK))
1924
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1925
        rpl = selector & 3;
1926
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1927
        if (seg_reg == R_SS) {
1928
            /* must be writable segment */
1929
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1930
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1931
            if (rpl != cpl || dpl != cpl)
1932
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1933
        } else {
1934
            /* must be readable segment */
1935
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1936
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1937
            
1938
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1939
                /* if not conforming code, test rights */
1940
                if (dpl < cpl || dpl < rpl) 
1941
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1942
            }
1943
        }
1944

    
1945
        if (!(e2 & DESC_P_MASK)) {
1946
            if (seg_reg == R_SS)
1947
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1948
            else
1949
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1950
        }
1951

    
1952
        /* set the access bit if not already set */
1953
        if (!(e2 & DESC_A_MASK)) {
1954
            e2 |= DESC_A_MASK;
1955
            stl_kernel(ptr + 4, e2);
1956
        }
1957

    
1958
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1959
                       get_seg_base(e1, e2),
1960
                       get_seg_limit(e1, e2),
1961
                       e2);
1962
#if 0
1963
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1964
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1965
#endif
1966
    }
1967
}
1968

    
1969
/* protected mode jump */
1970
void helper_ljmp_protected_T0_T1(int next_eip_addend)
1971
{
1972
    int new_cs, gate_cs, type;
1973
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1974
    target_ulong new_eip, next_eip;
1975
    
1976
    new_cs = T0;
1977
    new_eip = T1;
1978
    if ((new_cs & 0xfffc) == 0)
1979
        raise_exception_err(EXCP0D_GPF, 0);
1980
    if (load_segment(&e1, &e2, new_cs) != 0)
1981
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1982
    cpl = env->hflags & HF_CPL_MASK;
1983
    if (e2 & DESC_S_MASK) {
1984
        if (!(e2 & DESC_CS_MASK))
1985
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1986
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1987
        if (e2 & DESC_C_MASK) {
1988
            /* conforming code segment */
1989
            if (dpl > cpl)
1990
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1991
        } else {
1992
            /* non conforming code segment */
1993
            rpl = new_cs & 3;
1994
            if (rpl > cpl)
1995
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1996
            if (dpl != cpl)
1997
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1998
        }
1999
        if (!(e2 & DESC_P_MASK))
2000
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2001
        limit = get_seg_limit(e1, e2);
2002
        if (new_eip > limit && 
2003
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2004
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2005
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2006
                       get_seg_base(e1, e2), limit, e2);
2007
        EIP = new_eip;
2008
    } else {
2009
        /* jump to call or task gate */
2010
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2011
        rpl = new_cs & 3;
2012
        cpl = env->hflags & HF_CPL_MASK;
2013
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2014
        switch(type) {
2015
        case 1: /* 286 TSS */
2016
        case 9: /* 386 TSS */
2017
        case 5: /* task gate */
2018
            if (dpl < cpl || dpl < rpl)
2019
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2020
            next_eip = env->eip + next_eip_addend;
2021
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2022
            CC_OP = CC_OP_EFLAGS;
2023
            break;
2024
        case 4: /* 286 call gate */
2025
        case 12: /* 386 call gate */
2026
            if ((dpl < cpl) || (dpl < rpl))
2027
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2028
            if (!(e2 & DESC_P_MASK))
2029
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2030
            gate_cs = e1 >> 16;
2031
            new_eip = (e1 & 0xffff);
2032
            if (type == 12)
2033
                new_eip |= (e2 & 0xffff0000);
2034
            if (load_segment(&e1, &e2, gate_cs) != 0)
2035
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2036
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2037
            /* must be code segment */
2038
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
2039
                 (DESC_S_MASK | DESC_CS_MASK)))
2040
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2041
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 
2042
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2043
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2044
            if (!(e2 & DESC_P_MASK))
2045
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2046
            limit = get_seg_limit(e1, e2);
2047
            if (new_eip > limit)
2048
                raise_exception_err(EXCP0D_GPF, 0);
2049
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2050
                                   get_seg_base(e1, e2), limit, e2);
2051
            EIP = new_eip;
2052
            break;
2053
        default:
2054
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2055
            break;
2056
        }
2057
    }
2058
}
2059

    
2060
/* real mode call */
2061
void helper_lcall_real_T0_T1(int shift, int next_eip)
2062
{
2063
    int new_cs, new_eip;
2064
    uint32_t esp, esp_mask;
2065
    target_ulong ssp;
2066

    
2067
    new_cs = T0;
2068
    new_eip = T1;
2069
    esp = ESP;
2070
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2071
    ssp = env->segs[R_SS].base;
2072
    if (shift) {
2073
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2074
        PUSHL(ssp, esp, esp_mask, next_eip);
2075
    } else {
2076
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2077
        PUSHW(ssp, esp, esp_mask, next_eip);
2078
    }
2079

    
2080
    SET_ESP(esp, esp_mask);
2081
    env->eip = new_eip;
2082
    env->segs[R_CS].selector = new_cs;
2083
    env->segs[R_CS].base = (new_cs << 4);
2084
}
2085

    
2086
/* protected mode call */
2087
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2088
{
2089
    int new_cs, new_stack, i;
2090
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2091
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2092
    uint32_t val, limit, old_sp_mask;
2093
    target_ulong ssp, old_ssp, next_eip, new_eip;
2094
    
2095
    new_cs = T0;
2096
    new_eip = T1;
2097
    next_eip = env->eip + next_eip_addend;
2098
#ifdef DEBUG_PCALL
2099
    if (loglevel & CPU_LOG_PCALL) {
2100
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2101
                new_cs, (uint32_t)new_eip, shift);
2102
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2103
    }
2104
#endif
2105
    if ((new_cs & 0xfffc) == 0)
2106
        raise_exception_err(EXCP0D_GPF, 0);
2107
    if (load_segment(&e1, &e2, new_cs) != 0)
2108
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2109
    cpl = env->hflags & HF_CPL_MASK;
2110
#ifdef DEBUG_PCALL
2111
    if (loglevel & CPU_LOG_PCALL) {
2112
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2113
    }
2114
#endif
2115
    if (e2 & DESC_S_MASK) {
2116
        if (!(e2 & DESC_CS_MASK))
2117
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2118
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2119
        if (e2 & DESC_C_MASK) {
2120
            /* conforming code segment */
2121
            if (dpl > cpl)
2122
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2123
        } else {
2124
            /* non conforming code segment */
2125
            rpl = new_cs & 3;
2126
            if (rpl > cpl)
2127
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2128
            if (dpl != cpl)
2129
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2130
        }
2131
        if (!(e2 & DESC_P_MASK))
2132
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2133

    
2134
#ifdef TARGET_X86_64
2135
        /* XXX: check 16/32 bit cases in long mode */
2136
        if (shift == 2) {
2137
            target_ulong rsp;
2138
            /* 64 bit case */
2139
            rsp = ESP;
2140
            PUSHQ(rsp, env->segs[R_CS].selector);
2141
            PUSHQ(rsp, next_eip);
2142
            /* from this point, not restartable */
2143
            ESP = rsp;
2144
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2145
                                   get_seg_base(e1, e2), 
2146
                                   get_seg_limit(e1, e2), e2);
2147
            EIP = new_eip;
2148
        } else 
2149
#endif
2150
        {
2151
            sp = ESP;
2152
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2153
            ssp = env->segs[R_SS].base;
2154
            if (shift) {
2155
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2156
                PUSHL(ssp, sp, sp_mask, next_eip);
2157
            } else {
2158
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2159
                PUSHW(ssp, sp, sp_mask, next_eip);
2160
            }
2161
            
2162
            limit = get_seg_limit(e1, e2);
2163
            if (new_eip > limit)
2164
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165
            /* from this point, not restartable */
2166
            SET_ESP(sp, sp_mask);
2167
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2168
                                   get_seg_base(e1, e2), limit, e2);
2169
            EIP = new_eip;
2170
        }
2171
    } else {
2172
        /* check gate type */
2173
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2174
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2175
        rpl = new_cs & 3;
2176
        switch(type) {
2177
        case 1: /* available 286 TSS */
2178
        case 9: /* available 386 TSS */
2179
        case 5: /* task gate */
2180
            if (dpl < cpl || dpl < rpl)
2181
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2182
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2183
            CC_OP = CC_OP_EFLAGS;
2184
            return;
2185
        case 4: /* 286 call gate */
2186
        case 12: /* 386 call gate */
2187
            break;
2188
        default:
2189
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2190
            break;
2191
        }
2192
        shift = type >> 3;
2193

    
2194
        if (dpl < cpl || dpl < rpl)
2195
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2196
        /* check valid bit */
2197
        if (!(e2 & DESC_P_MASK))
2198
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2199
        selector = e1 >> 16;
2200
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2201
        param_count = e2 & 0x1f;
2202
        if ((selector & 0xfffc) == 0)
2203
            raise_exception_err(EXCP0D_GPF, 0);
2204

    
2205
        if (load_segment(&e1, &e2, selector) != 0)
2206
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2207
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2208
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2209
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2210
        if (dpl > cpl)
2211
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2212
        if (!(e2 & DESC_P_MASK))
2213
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2214

    
2215
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2216
            /* to inner priviledge */
2217
            get_ss_esp_from_tss(&ss, &sp, dpl);
2218
#ifdef DEBUG_PCALL
2219
            if (loglevel & CPU_LOG_PCALL)
2220
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 
2221
                        ss, sp, param_count, ESP);
2222
#endif
2223
            if ((ss & 0xfffc) == 0)
2224
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2225
            if ((ss & 3) != dpl)
2226
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2227
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2228
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2229
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2230
            if (ss_dpl != dpl)
2231
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2232
            if (!(ss_e2 & DESC_S_MASK) ||
2233
                (ss_e2 & DESC_CS_MASK) ||
2234
                !(ss_e2 & DESC_W_MASK))
2235
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2236
            if (!(ss_e2 & DESC_P_MASK))
2237
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2238
            
2239
            //            push_size = ((param_count * 2) + 8) << shift;
2240

    
2241
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2242
            old_ssp = env->segs[R_SS].base;
2243
            
2244
            sp_mask = get_sp_mask(ss_e2);
2245
            ssp = get_seg_base(ss_e1, ss_e2);
2246
            if (shift) {
2247
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2248
                PUSHL(ssp, sp, sp_mask, ESP);
2249
                for(i = param_count - 1; i >= 0; i--) {
2250
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2251
                    PUSHL(ssp, sp, sp_mask, val);
2252
                }
2253
            } else {
2254
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2255
                PUSHW(ssp, sp, sp_mask, ESP);
2256
                for(i = param_count - 1; i >= 0; i--) {
2257
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2258
                    PUSHW(ssp, sp, sp_mask, val);
2259
                }
2260
            }
2261
            new_stack = 1;
2262
        } else {
2263
            /* to same priviledge */
2264
            sp = ESP;
2265
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2266
            ssp = env->segs[R_SS].base;
2267
            //            push_size = (4 << shift);
2268
            new_stack = 0;
2269
        }
2270

    
2271
        if (shift) {
2272
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2273
            PUSHL(ssp, sp, sp_mask, next_eip);
2274
        } else {
2275
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2276
            PUSHW(ssp, sp, sp_mask, next_eip);
2277
        }
2278

    
2279
        /* from this point, not restartable */
2280

    
2281
        if (new_stack) {
2282
            ss = (ss & ~3) | dpl;
2283
            cpu_x86_load_seg_cache(env, R_SS, ss, 
2284
                                   ssp,
2285
                                   get_seg_limit(ss_e1, ss_e2),
2286
                                   ss_e2);
2287
        }
2288

    
2289
        selector = (selector & ~3) | dpl;
2290
        cpu_x86_load_seg_cache(env, R_CS, selector, 
2291
                       get_seg_base(e1, e2),
2292
                       get_seg_limit(e1, e2),
2293
                       e2);
2294
        cpu_x86_set_cpl(env, dpl);
2295
        SET_ESP(sp, sp_mask);
2296
        EIP = offset;
2297
    }
2298
#ifdef USE_KQEMU
2299
    if (kqemu_is_ok(env)) {
2300
        env->exception_index = -1;
2301
        cpu_loop_exit();
2302
    }
2303
#endif
2304
}
2305

    
2306
/* real and vm86 mode iret */
2307
void helper_iret_real(int shift)
2308
{
2309
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2310
    target_ulong ssp;
2311
    int eflags_mask;
2312

    
2313
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2314
    sp = ESP;
2315
    ssp = env->segs[R_SS].base;
2316
    if (shift == 1) {
2317
        /* 32 bits */
2318
        POPL(ssp, sp, sp_mask, new_eip);
2319
        POPL(ssp, sp, sp_mask, new_cs);
2320
        new_cs &= 0xffff;
2321
        POPL(ssp, sp, sp_mask, new_eflags);
2322
    } else {
2323
        /* 16 bits */
2324
        POPW(ssp, sp, sp_mask, new_eip);
2325
        POPW(ssp, sp, sp_mask, new_cs);
2326
        POPW(ssp, sp, sp_mask, new_eflags);
2327
    }
2328
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2329
    load_seg_vm(R_CS, new_cs);
2330
    env->eip = new_eip;
2331
    if (env->eflags & VM_MASK)
2332
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2333
    else
2334
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2335
    if (shift == 0)
2336
        eflags_mask &= 0xffff;
2337
    load_eflags(new_eflags, eflags_mask);
2338
}
2339

    
2340
static inline void validate_seg(int seg_reg, int cpl)
2341
{
2342
    int dpl;
2343
    uint32_t e2;
2344

    
2345
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2346
       they may still contain a valid base. I would be interested to
2347
       know how a real x86_64 CPU behaves */
2348
    if ((seg_reg == R_FS || seg_reg == R_GS) && 
2349
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2350
        return;
2351

    
2352
    e2 = env->segs[seg_reg].flags;
2353
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2354
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2355
        /* data or non conforming code segment */
2356
        if (dpl < cpl) {
2357
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2358
        }
2359
    }
2360
}
2361

    
2362
/* protected mode iret */
2363
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2364
{
2365
    uint32_t new_cs, new_eflags, new_ss;
2366
    uint32_t new_es, new_ds, new_fs, new_gs;
2367
    uint32_t e1, e2, ss_e1, ss_e2;
2368
    int cpl, dpl, rpl, eflags_mask, iopl;
2369
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2370
    
2371
#ifdef TARGET_X86_64
2372
    if (shift == 2)
2373
        sp_mask = -1;
2374
    else
2375
#endif
2376
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2377
    sp = ESP;
2378
    ssp = env->segs[R_SS].base;
2379
    new_eflags = 0; /* avoid warning */
2380
#ifdef TARGET_X86_64
2381
    if (shift == 2) {
2382
        POPQ(sp, new_eip);
2383
        POPQ(sp, new_cs);
2384
        new_cs &= 0xffff;
2385
        if (is_iret) {
2386
            POPQ(sp, new_eflags);
2387
        }
2388
    } else
2389
#endif
2390
    if (shift == 1) {
2391
        /* 32 bits */
2392
        POPL(ssp, sp, sp_mask, new_eip);
2393
        POPL(ssp, sp, sp_mask, new_cs);
2394
        new_cs &= 0xffff;
2395
        if (is_iret) {
2396
            POPL(ssp, sp, sp_mask, new_eflags);
2397
            if (new_eflags & VM_MASK)
2398
                goto return_to_vm86;
2399
        }
2400
    } else {
2401
        /* 16 bits */
2402
        POPW(ssp, sp, sp_mask, new_eip);
2403
        POPW(ssp, sp, sp_mask, new_cs);
2404
        if (is_iret)
2405
            POPW(ssp, sp, sp_mask, new_eflags);
2406
    }
2407
#ifdef DEBUG_PCALL
2408
    if (loglevel & CPU_LOG_PCALL) {
2409
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2410
                new_cs, new_eip, shift, addend);
2411
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2412
    }
2413
#endif
2414
    if ((new_cs & 0xfffc) == 0)
2415
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2416
    if (load_segment(&e1, &e2, new_cs) != 0)
2417
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2418
    if (!(e2 & DESC_S_MASK) ||
2419
        !(e2 & DESC_CS_MASK))
2420
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421
    cpl = env->hflags & HF_CPL_MASK;
2422
    rpl = new_cs & 3; 
2423
    if (rpl < cpl)
2424
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2425
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426
    if (e2 & DESC_C_MASK) {
2427
        if (dpl > rpl)
2428
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2429
    } else {
2430
        if (dpl != rpl)
2431
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2432
    }
2433
    if (!(e2 & DESC_P_MASK))
2434
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2435
    
2436
    sp += addend;
2437
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 
2438
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2439
        /* return to same priledge level */
2440
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2441
                       get_seg_base(e1, e2),
2442
                       get_seg_limit(e1, e2),
2443
                       e2);
2444
    } else {
2445
        /* return to different priviledge level */
2446
#ifdef TARGET_X86_64
2447
        if (shift == 2) {
2448
            POPQ(sp, new_esp);
2449
            POPQ(sp, new_ss);
2450
            new_ss &= 0xffff;
2451
        } else
2452
#endif
2453
        if (shift == 1) {
2454
            /* 32 bits */
2455
            POPL(ssp, sp, sp_mask, new_esp);
2456
            POPL(ssp, sp, sp_mask, new_ss);
2457
            new_ss &= 0xffff;
2458
        } else {
2459
            /* 16 bits */
2460
            POPW(ssp, sp, sp_mask, new_esp);
2461
            POPW(ssp, sp, sp_mask, new_ss);
2462
        }
2463
#ifdef DEBUG_PCALL
2464
        if (loglevel & CPU_LOG_PCALL) {
2465
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2466
                    new_ss, new_esp);
2467
        }
2468
#endif
2469
        if ((new_ss & 0xfffc) == 0) {
2470
#ifdef TARGET_X86_64
2471
            /* NULL ss is allowed in long mode if cpl != 3*/
2472
            /* XXX: test CS64 ? */
2473
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2474
                cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2475
                                       0, 0xffffffff,
2476
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2477
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2478
                                       DESC_W_MASK | DESC_A_MASK);
2479
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2480
            } else 
2481
#endif
2482
            {
2483
                raise_exception_err(EXCP0D_GPF, 0);
2484
            }
2485
        } else {
2486
            if ((new_ss & 3) != rpl)
2487
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2488
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2489
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2490
            if (!(ss_e2 & DESC_S_MASK) ||
2491
                (ss_e2 & DESC_CS_MASK) ||
2492
                !(ss_e2 & DESC_W_MASK))
2493
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2494
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2495
            if (dpl != rpl)
2496
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2497
            if (!(ss_e2 & DESC_P_MASK))
2498
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2499
            cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2500
                                   get_seg_base(ss_e1, ss_e2),
2501
                                   get_seg_limit(ss_e1, ss_e2),
2502
                                   ss_e2);
2503
        }
2504

    
2505
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2506
                       get_seg_base(e1, e2),
2507
                       get_seg_limit(e1, e2),
2508
                       e2);
2509
        cpu_x86_set_cpl(env, rpl);
2510
        sp = new_esp;
2511
#ifdef TARGET_X86_64
2512
        if (env->hflags & HF_CS64_MASK)
2513
            sp_mask = -1;
2514
        else
2515
#endif
2516
            sp_mask = get_sp_mask(ss_e2);
2517

    
2518
        /* validate data segments */
2519
        validate_seg(R_ES, rpl);
2520
        validate_seg(R_DS, rpl);
2521
        validate_seg(R_FS, rpl);
2522
        validate_seg(R_GS, rpl);
2523

    
2524
        sp += addend;
2525
    }
2526
    SET_ESP(sp, sp_mask);
2527
    env->eip = new_eip;
2528
    if (is_iret) {
2529
        /* NOTE: 'cpl' is the _old_ CPL */
2530
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2531
        if (cpl == 0)
2532
            eflags_mask |= IOPL_MASK;
2533
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2534
        if (cpl <= iopl)
2535
            eflags_mask |= IF_MASK;
2536
        if (shift == 0)
2537
            eflags_mask &= 0xffff;
2538
        load_eflags(new_eflags, eflags_mask);
2539
    }
2540
    return;
2541

    
2542
 return_to_vm86:
2543
    POPL(ssp, sp, sp_mask, new_esp);
2544
    POPL(ssp, sp, sp_mask, new_ss);
2545
    POPL(ssp, sp, sp_mask, new_es);
2546
    POPL(ssp, sp, sp_mask, new_ds);
2547
    POPL(ssp, sp, sp_mask, new_fs);
2548
    POPL(ssp, sp, sp_mask, new_gs);
2549
    
2550
    /* modify processor state */
2551
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
2552
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2553
    load_seg_vm(R_CS, new_cs & 0xffff);
2554
    cpu_x86_set_cpl(env, 3);
2555
    load_seg_vm(R_SS, new_ss & 0xffff);
2556
    load_seg_vm(R_ES, new_es & 0xffff);
2557
    load_seg_vm(R_DS, new_ds & 0xffff);
2558
    load_seg_vm(R_FS, new_fs & 0xffff);
2559
    load_seg_vm(R_GS, new_gs & 0xffff);
2560

    
2561
    env->eip = new_eip & 0xffff;
2562
    ESP = new_esp;
2563
}
2564

    
2565
void helper_iret_protected(int shift, int next_eip)
2566
{
2567
    int tss_selector, type;
2568
    uint32_t e1, e2;
2569
    
2570
    /* specific case for TSS */
2571
    if (env->eflags & NT_MASK) {
2572
#ifdef TARGET_X86_64
2573
        if (env->hflags & HF_LMA_MASK)
2574
            raise_exception_err(EXCP0D_GPF, 0);
2575
#endif
2576
        tss_selector = lduw_kernel(env->tr.base + 0);
2577
        if (tss_selector & 4)
2578
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2579
        if (load_segment(&e1, &e2, tss_selector) != 0)
2580
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2581
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2582
        /* NOTE: we check both segment and busy TSS */
2583
        if (type != 3)
2584
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2585
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2586
    } else {
2587
        helper_ret_protected(shift, 1, 0);
2588
    }
2589
#ifdef USE_KQEMU
2590
    if (kqemu_is_ok(env)) {
2591
        CC_OP = CC_OP_EFLAGS;
2592
        env->exception_index = -1;
2593
        cpu_loop_exit();
2594
    }
2595
#endif
2596
}
2597

    
2598
void helper_lret_protected(int shift, int addend)
2599
{
2600
    helper_ret_protected(shift, 0, addend);
2601
#ifdef USE_KQEMU
2602
    if (kqemu_is_ok(env)) {
2603
        env->exception_index = -1;
2604
        cpu_loop_exit();
2605
    }
2606
#endif
2607
}
2608

    
2609
void helper_sysenter(void)
2610
{
2611
    if (env->sysenter_cs == 0) {
2612
        raise_exception_err(EXCP0D_GPF, 0);
2613
    }
2614
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2615
    cpu_x86_set_cpl(env, 0);
2616
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
2617
                           0, 0xffffffff, 
2618
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2619
                           DESC_S_MASK |
2620
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2621
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
2622
                           0, 0xffffffff,
2623
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2624
                           DESC_S_MASK |
2625
                           DESC_W_MASK | DESC_A_MASK);
2626
    ESP = env->sysenter_esp;
2627
    EIP = env->sysenter_eip;
2628
}
2629

    
2630
void helper_sysexit(void)
2631
{
2632
    int cpl;
2633

    
2634
    cpl = env->hflags & HF_CPL_MASK;
2635
    if (env->sysenter_cs == 0 || cpl != 0) {
2636
        raise_exception_err(EXCP0D_GPF, 0);
2637
    }
2638
    cpu_x86_set_cpl(env, 3);
2639
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
2640
                           0, 0xffffffff, 
2641
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2642
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2643
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2644
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
2645
                           0, 0xffffffff,
2646
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2647
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2648
                           DESC_W_MASK | DESC_A_MASK);
2649
    ESP = ECX;
2650
    EIP = EDX;
2651
#ifdef USE_KQEMU
2652
    if (kqemu_is_ok(env)) {
2653
        env->exception_index = -1;
2654
        cpu_loop_exit();
2655
    }
2656
#endif
2657
}
2658

    
2659
void helper_movl_crN_T0(int reg)
2660
{
2661
#if !defined(CONFIG_USER_ONLY) 
2662
    switch(reg) {
2663
    case 0:
2664
        cpu_x86_update_cr0(env, T0);
2665
        break;
2666
    case 3:
2667
        cpu_x86_update_cr3(env, T0);
2668
        break;
2669
    case 4:
2670
        cpu_x86_update_cr4(env, T0);
2671
        break;
2672
    case 8:
2673
        cpu_set_apic_tpr(env, T0);
2674
        break;
2675
    default:
2676
        env->cr[reg] = T0;
2677
        break;
2678
    }
2679
#endif
2680
}
2681

    
2682
/* XXX: do more */
2683
void helper_movl_drN_T0(int reg)
2684
{
2685
    env->dr[reg] = T0;
2686
}
2687

    
2688
void helper_invlpg(target_ulong addr)
2689
{
2690
    cpu_x86_flush_tlb(env, addr);
2691
}
2692

    
2693
void helper_rdtsc(void)
2694
{
2695
    uint64_t val;
2696

    
2697
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2698
        raise_exception(EXCP0D_GPF);
2699
    }
2700
    val = cpu_get_tsc(env);
2701
    EAX = (uint32_t)(val);
2702
    EDX = (uint32_t)(val >> 32);
2703
}
2704

    
2705
#if defined(CONFIG_USER_ONLY) 
2706
void helper_wrmsr(void)
2707
{
2708
}
2709

    
2710
void helper_rdmsr(void)
2711
{
2712
}
2713
#else
2714
void helper_wrmsr(void)
2715
{
2716
    uint64_t val;
2717

    
2718
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2719

    
2720
    switch((uint32_t)ECX) {
2721
    case MSR_IA32_SYSENTER_CS:
2722
        env->sysenter_cs = val & 0xffff;
2723
        break;
2724
    case MSR_IA32_SYSENTER_ESP:
2725
        env->sysenter_esp = val;
2726
        break;
2727
    case MSR_IA32_SYSENTER_EIP:
2728
        env->sysenter_eip = val;
2729
        break;
2730
    case MSR_IA32_APICBASE:
2731
        cpu_set_apic_base(env, val);
2732
        break;
2733
    case MSR_EFER:
2734
        {
2735
            uint64_t update_mask;
2736
            update_mask = 0;
2737
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2738
                update_mask |= MSR_EFER_SCE;
2739
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2740
                update_mask |= MSR_EFER_LME;
2741
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2742
                update_mask |= MSR_EFER_FFXSR;
2743
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2744
                update_mask |= MSR_EFER_NXE;
2745
            env->efer = (env->efer & ~update_mask) | 
2746
            (val & update_mask);
2747
        }
2748
        break;
2749
    case MSR_STAR:
2750
        env->star = val;
2751
        break;
2752
    case MSR_PAT:
2753
        env->pat = val;
2754
        break;
2755
#ifdef TARGET_X86_64
2756
    case MSR_LSTAR:
2757
        env->lstar = val;
2758
        break;
2759
    case MSR_CSTAR:
2760
        env->cstar = val;
2761
        break;
2762
    case MSR_FMASK:
2763
        env->fmask = val;
2764
        break;
2765
    case MSR_FSBASE:
2766
        env->segs[R_FS].base = val;
2767
        break;
2768
    case MSR_GSBASE:
2769
        env->segs[R_GS].base = val;
2770
        break;
2771
    case MSR_KERNELGSBASE:
2772
        env->kernelgsbase = val;
2773
        break;
2774
#endif
2775
    default:
2776
        /* XXX: exception ? */
2777
        break; 
2778
    }
2779
}
2780

    
2781
void helper_rdmsr(void)
2782
{
2783
    uint64_t val;
2784
    switch((uint32_t)ECX) {
2785
    case MSR_IA32_SYSENTER_CS:
2786
        val = env->sysenter_cs;
2787
        break;
2788
    case MSR_IA32_SYSENTER_ESP:
2789
        val = env->sysenter_esp;
2790
        break;
2791
    case MSR_IA32_SYSENTER_EIP:
2792
        val = env->sysenter_eip;
2793
        break;
2794
    case MSR_IA32_APICBASE:
2795
        val = cpu_get_apic_base(env);
2796
        break;
2797
    case MSR_EFER:
2798
        val = env->efer;
2799
        break;
2800
    case MSR_STAR:
2801
        val = env->star;
2802
        break;
2803
    case MSR_PAT:
2804
        val = env->pat;
2805
        break;
2806
#ifdef TARGET_X86_64
2807
    case MSR_LSTAR:
2808
        val = env->lstar;
2809
        break;
2810
    case MSR_CSTAR:
2811
        val = env->cstar;
2812
        break;
2813
    case MSR_FMASK:
2814
        val = env->fmask;
2815
        break;
2816
    case MSR_FSBASE:
2817
        val = env->segs[R_FS].base;
2818
        break;
2819
    case MSR_GSBASE:
2820
        val = env->segs[R_GS].base;
2821
        break;
2822
    case MSR_KERNELGSBASE:
2823
        val = env->kernelgsbase;
2824
        break;
2825
#endif
2826
    default:
2827
        /* XXX: exception ? */
2828
        val = 0;
2829
        break; 
2830
    }
2831
    EAX = (uint32_t)(val);
2832
    EDX = (uint32_t)(val >> 32);
2833
}
2834
#endif
2835

    
2836
void helper_lsl(void)
2837
{
2838
    unsigned int selector, limit;
2839
    uint32_t e1, e2, eflags;
2840
    int rpl, dpl, cpl, type;
2841

    
2842
    eflags = cc_table[CC_OP].compute_all();
2843
    selector = T0 & 0xffff;
2844
    if (load_segment(&e1, &e2, selector) != 0)
2845
        goto fail;
2846
    rpl = selector & 3;
2847
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2848
    cpl = env->hflags & HF_CPL_MASK;
2849
    if (e2 & DESC_S_MASK) {
2850
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2851
            /* conforming */
2852
        } else {
2853
            if (dpl < cpl || dpl < rpl)
2854
                goto fail;
2855
        }
2856
    } else {
2857
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2858
        switch(type) {
2859
        case 1:
2860
        case 2:
2861
        case 3:
2862
        case 9:
2863
        case 11:
2864
            break;
2865
        default:
2866
            goto fail;
2867
        }
2868
        if (dpl < cpl || dpl < rpl) {
2869
        fail:
2870
            CC_SRC = eflags & ~CC_Z;
2871
            return;
2872
        }
2873
    }
2874
    limit = get_seg_limit(e1, e2);
2875
    T1 = limit;
2876
    CC_SRC = eflags | CC_Z;
2877
}
2878

    
2879
void helper_lar(void)
2880
{
2881
    unsigned int selector;
2882
    uint32_t e1, e2, eflags;
2883
    int rpl, dpl, cpl, type;
2884

    
2885
    eflags = cc_table[CC_OP].compute_all();
2886
    selector = T0 & 0xffff;
2887
    if ((selector & 0xfffc) == 0)
2888
        goto fail;
2889
    if (load_segment(&e1, &e2, selector) != 0)
2890
        goto fail;
2891
    rpl = selector & 3;
2892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2893
    cpl = env->hflags & HF_CPL_MASK;
2894
    if (e2 & DESC_S_MASK) {
2895
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2896
            /* conforming */
2897
        } else {
2898
            if (dpl < cpl || dpl < rpl)
2899
                goto fail;
2900
        }
2901
    } else {
2902
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2903
        switch(type) {
2904
        case 1:
2905
        case 2:
2906
        case 3:
2907
        case 4:
2908
        case 5:
2909
        case 9:
2910
        case 11:
2911
        case 12:
2912
            break;
2913
        default:
2914
            goto fail;
2915
        }
2916
        if (dpl < cpl || dpl < rpl) {
2917
        fail:
2918
            CC_SRC = eflags & ~CC_Z;
2919
            return;
2920
        }
2921
    }
2922
    T1 = e2 & 0x00f0ff00;
2923
    CC_SRC = eflags | CC_Z;
2924
}
2925

    
2926
void helper_verr(void)
2927
{
2928
    unsigned int selector;
2929
    uint32_t e1, e2, eflags;
2930
    int rpl, dpl, cpl;
2931

    
2932
    eflags = cc_table[CC_OP].compute_all();
2933
    selector = T0 & 0xffff;
2934
    if ((selector & 0xfffc) == 0)
2935
        goto fail;
2936
    if (load_segment(&e1, &e2, selector) != 0)
2937
        goto fail;
2938
    if (!(e2 & DESC_S_MASK))
2939
        goto fail;
2940
    rpl = selector & 3;
2941
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2942
    cpl = env->hflags & HF_CPL_MASK;
2943
    if (e2 & DESC_CS_MASK) {
2944
        if (!(e2 & DESC_R_MASK))
2945
            goto fail;
2946
        if (!(e2 & DESC_C_MASK)) {
2947
            if (dpl < cpl || dpl < rpl)
2948
                goto fail;
2949
        }
2950
    } else {
2951
        if (dpl < cpl || dpl < rpl) {
2952
        fail:
2953
            CC_SRC = eflags & ~CC_Z;
2954
            return;
2955
        }
2956
    }
2957
    CC_SRC = eflags | CC_Z;
2958
}
2959

    
2960
void helper_verw(void)
2961
{
2962
    unsigned int selector;
2963
    uint32_t e1, e2, eflags;
2964
    int rpl, dpl, cpl;
2965

    
2966
    eflags = cc_table[CC_OP].compute_all();
2967
    selector = T0 & 0xffff;
2968
    if ((selector & 0xfffc) == 0)
2969
        goto fail;
2970
    if (load_segment(&e1, &e2, selector) != 0)
2971
        goto fail;
2972
    if (!(e2 & DESC_S_MASK))
2973
        goto fail;
2974
    rpl = selector & 3;
2975
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976
    cpl = env->hflags & HF_CPL_MASK;
2977
    if (e2 & DESC_CS_MASK) {
2978
        goto fail;
2979
    } else {
2980
        if (dpl < cpl || dpl < rpl)
2981
            goto fail;
2982
        if (!(e2 & DESC_W_MASK)) {
2983
        fail:
2984
            CC_SRC = eflags & ~CC_Z;
2985
            return;
2986
        }
2987
    }
2988
    CC_SRC = eflags | CC_Z;
2989
}
2990

    
2991
/* FPU helpers */
2992

    
2993
void helper_fldt_ST0_A0(void)
2994
{
2995
    int new_fpstt;
2996
    new_fpstt = (env->fpstt - 1) & 7;
2997
    env->fpregs[new_fpstt].d = helper_fldt(A0);
2998
    env->fpstt = new_fpstt;
2999
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3000
}
3001

    
3002
void helper_fstt_ST0_A0(void)
3003
{
3004
    helper_fstt(ST0, A0);
3005
}
3006

    
3007
void fpu_set_exception(int mask)
3008
{
3009
    env->fpus |= mask;
3010
    if (env->fpus & (~env->fpuc & FPUC_EM))
3011
        env->fpus |= FPUS_SE | FPUS_B;
3012
}
3013

    
3014
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3015
{
3016
    if (b == 0.0) 
3017
        fpu_set_exception(FPUS_ZE);
3018
    return a / b;
3019
}
3020

    
3021
void fpu_raise_exception(void)
3022
{
3023
    if (env->cr[0] & CR0_NE_MASK) {
3024
        raise_exception(EXCP10_COPR);
3025
    } 
3026
#if !defined(CONFIG_USER_ONLY) 
3027
    else {
3028
        cpu_set_ferr(env);
3029
    }
3030
#endif
3031
}
3032

    
3033
/* BCD ops */
3034

    
3035
void helper_fbld_ST0_A0(void)
3036
{
3037
    CPU86_LDouble tmp;
3038
    uint64_t val;
3039
    unsigned int v;
3040
    int i;
3041

    
3042
    val = 0;
3043
    for(i = 8; i >= 0; i--) {
3044
        v = ldub(A0 + i);
3045
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3046
    }
3047
    tmp = val;
3048
    if (ldub(A0 + 9) & 0x80)
3049
        tmp = -tmp;
3050
    fpush();
3051
    ST0 = tmp;
3052
}
3053

    
3054
void helper_fbst_ST0_A0(void)
3055
{
3056
    int v;
3057
    target_ulong mem_ref, mem_end;
3058
    int64_t val;
3059

    
3060
    val = floatx_to_int64(ST0, &env->fp_status);
3061
    mem_ref = A0;
3062
    mem_end = mem_ref + 9;
3063
    if (val < 0) {
3064
        stb(mem_end, 0x80);
3065
        val = -val;
3066
    } else {
3067
        stb(mem_end, 0x00);
3068
    }
3069
    while (mem_ref < mem_end) {
3070
        if (val == 0)
3071
            break;
3072
        v = val % 100;
3073
        val = val / 100;
3074
        v = ((v / 10) << 4) | (v % 10);
3075
        stb(mem_ref++, v);
3076
    }
3077
    while (mem_ref < mem_end) {
3078
        stb(mem_ref++, 0);
3079
    }
3080
}
3081

    
3082
void helper_f2xm1(void)
3083
{
3084
    ST0 = pow(2.0,ST0) - 1.0;
3085
}
3086

    
3087
void helper_fyl2x(void)
3088
{
3089
    CPU86_LDouble fptemp;
3090
    
3091
    fptemp = ST0;
3092
    if (fptemp>0.0){
3093
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3094
        ST1 *= fptemp;
3095
        fpop();
3096
    } else { 
3097
        env->fpus &= (~0x4700);
3098
        env->fpus |= 0x400;
3099
    }
3100
}
3101

    
3102
void helper_fptan(void)
3103
{
3104
    CPU86_LDouble fptemp;
3105

    
3106
    fptemp = ST0;
3107
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3108
        env->fpus |= 0x400;
3109
    } else {
3110
        ST0 = tan(fptemp);
3111
        fpush();
3112
        ST0 = 1.0;
3113
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3114
        /* the above code is for  |arg| < 2**52 only */
3115
    }
3116
}
3117

    
3118
void helper_fpatan(void)
3119
{
3120
    CPU86_LDouble fptemp, fpsrcop;
3121

    
3122
    fpsrcop = ST1;
3123
    fptemp = ST0;
3124
    ST1 = atan2(fpsrcop,fptemp);
3125
    fpop();
3126
}
3127

    
3128
void helper_fxtract(void)
3129
{
3130
    CPU86_LDoubleU temp;
3131
    unsigned int expdif;
3132

    
3133
    temp.d = ST0;
3134
    expdif = EXPD(temp) - EXPBIAS;
3135
    /*DP exponent bias*/
3136
    ST0 = expdif;
3137
    fpush();
3138
    BIASEXPONENT(temp);
3139
    ST0 = temp.d;
3140
}
3141

    
3142
void helper_fprem1(void)
3143
{
3144
    CPU86_LDouble dblq, fpsrcop, fptemp;
3145
    CPU86_LDoubleU fpsrcop1, fptemp1;
3146
    int expdif;
3147
    signed long long int q;
3148

    
3149
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3150
        ST0 = 0.0 / 0.0; /* NaN */
3151
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3152
        return;
3153
    }
3154

    
3155
    fpsrcop = ST0;
3156
    fptemp = ST1;
3157
    fpsrcop1.d = fpsrcop;
3158
    fptemp1.d = fptemp;
3159
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3160

    
3161
    if (expdif < 0) {
3162
        /* optimisation? taken from the AMD docs */
3163
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3164
        /* ST0 is unchanged */
3165
        return;
3166
    }
3167

    
3168
    if (expdif < 53) {
3169
        dblq = fpsrcop / fptemp;
3170
        /* round dblq towards nearest integer */
3171
        dblq = rint(dblq);
3172
        ST0 = fpsrcop - fptemp * dblq;
3173

    
3174
        /* convert dblq to q by truncating towards zero */
3175
        if (dblq < 0.0)
3176
           q = (signed long long int)(-dblq);
3177
        else
3178
           q = (signed long long int)dblq;
3179

    
3180
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3181
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3182
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3183
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3184
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3185
    } else {
3186
        env->fpus |= 0x400;  /* C2 <-- 1 */
3187
        fptemp = pow(2.0, expdif - 50);
3188
        fpsrcop = (ST0 / ST1) / fptemp;
3189
        /* fpsrcop = integer obtained by chopping */
3190
        fpsrcop = (fpsrcop < 0.0) ?
3191
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3192
        ST0 -= (ST1 * fpsrcop * fptemp);
3193
    }
3194
}
3195

    
3196
void helper_fprem(void)
3197
{
3198
    CPU86_LDouble dblq, fpsrcop, fptemp;
3199
    CPU86_LDoubleU fpsrcop1, fptemp1;
3200
    int expdif;
3201
    signed long long int q;
3202

    
3203
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3204
       ST0 = 0.0 / 0.0; /* NaN */
3205
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3206
       return;
3207
    }
3208

    
3209
    fpsrcop = (CPU86_LDouble)ST0;
3210
    fptemp = (CPU86_LDouble)ST1;
3211
    fpsrcop1.d = fpsrcop;
3212
    fptemp1.d = fptemp;
3213
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3214

    
3215
    if (expdif < 0) {
3216
        /* optimisation? taken from the AMD docs */
3217
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3218
        /* ST0 is unchanged */
3219
        return;
3220
    }
3221

    
3222
    if ( expdif < 53 ) {
3223
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3224
        /* round dblq towards zero */
3225
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3226
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3227

    
3228
        /* convert dblq to q by truncating towards zero */
3229
        if (dblq < 0.0)
3230
           q = (signed long long int)(-dblq);
3231
        else
3232
           q = (signed long long int)dblq;
3233

    
3234
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3235
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3236
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3237
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3238
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3239
    } else {
3240
        int N = 32 + (expdif % 32); /* as per AMD docs */
3241
        env->fpus |= 0x400;  /* C2 <-- 1 */
3242
        fptemp = pow(2.0, (double)(expdif - N));
3243
        fpsrcop = (ST0 / ST1) / fptemp;
3244
        /* fpsrcop = integer obtained by chopping */
3245
        fpsrcop = (fpsrcop < 0.0) ?
3246
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3247
        ST0 -= (ST1 * fpsrcop * fptemp);
3248
    }
3249
}
3250

    
3251
void helper_fyl2xp1(void)
3252
{
3253
    CPU86_LDouble fptemp;
3254

    
3255
    fptemp = ST0;
3256
    if ((fptemp+1.0)>0.0) {
3257
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3258
        ST1 *= fptemp;
3259
        fpop();
3260
    } else { 
3261
        env->fpus &= (~0x4700);
3262
        env->fpus |= 0x400;
3263
    }
3264
}
3265

    
3266
void helper_fsqrt(void)
3267
{
3268
    CPU86_LDouble fptemp;
3269

    
3270
    fptemp = ST0;
3271
    if (fptemp<0.0) { 
3272
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3273
        env->fpus |= 0x400;
3274
    }
3275
    ST0 = sqrt(fptemp);
3276
}
3277

    
3278
void helper_fsincos(void)
3279
{
3280
    CPU86_LDouble fptemp;
3281

    
3282
    fptemp = ST0;
3283
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3284
        env->fpus |= 0x400;
3285
    } else {
3286
        ST0 = sin(fptemp);
3287
        fpush();
3288
        ST0 = cos(fptemp);
3289
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3290
        /* the above code is for  |arg| < 2**63 only */
3291
    }
3292
}
3293

    
3294
void helper_frndint(void)
3295
{
3296
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3297
}
3298

    
3299
void helper_fscale(void)
3300
{
3301
    ST0 = ldexp (ST0, (int)(ST1)); 
3302
}
3303

    
3304
void helper_fsin(void)
3305
{
3306
    CPU86_LDouble fptemp;
3307

    
3308
    fptemp = ST0;
3309
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3310
        env->fpus |= 0x400;
3311
    } else {
3312
        ST0 = sin(fptemp);
3313
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3314
        /* the above code is for  |arg| < 2**53 only */
3315
    }
3316
}
3317

    
3318
void helper_fcos(void)
3319
{
3320
    CPU86_LDouble fptemp;
3321

    
3322
    fptemp = ST0;
3323
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3324
        env->fpus |= 0x400;
3325
    } else {
3326
        ST0 = cos(fptemp);
3327
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3328
        /* the above code is for  |arg5 < 2**63 only */
3329
    }
3330
}
3331

    
3332
void helper_fxam_ST0(void)
3333
{
3334
    CPU86_LDoubleU temp;
3335
    int expdif;
3336

    
3337
    temp.d = ST0;
3338

    
3339
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3340
    if (SIGND(temp))
3341
        env->fpus |= 0x200; /* C1 <-- 1 */
3342

    
3343
    /* XXX: test fptags too */
3344
    expdif = EXPD(temp);
3345
    if (expdif == MAXEXPD) {
3346
#ifdef USE_X86LDOUBLE
3347
        if (MANTD(temp) == 0x8000000000000000ULL)
3348
#else
3349
        if (MANTD(temp) == 0)
3350
#endif
3351
            env->fpus |=  0x500 /*Infinity*/;
3352
        else
3353
            env->fpus |=  0x100 /*NaN*/;
3354
    } else if (expdif == 0) {
3355
        if (MANTD(temp) == 0)
3356
            env->fpus |=  0x4000 /*Zero*/;
3357
        else
3358
            env->fpus |= 0x4400 /*Denormal*/;
3359
    } else {
3360
        env->fpus |= 0x400;
3361
    }
3362
}
3363

    
3364
void helper_fstenv(target_ulong ptr, int data32)
3365
{
3366
    int fpus, fptag, exp, i;
3367
    uint64_t mant;
3368
    CPU86_LDoubleU tmp;
3369

    
3370
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3371
    fptag = 0;
3372
    for (i=7; i>=0; i--) {
3373
        fptag <<= 2;
3374
        if (env->fptags[i]) {
3375
            fptag |= 3;
3376
        } else {
3377
            tmp.d = env->fpregs[i].d;
3378
            exp = EXPD(tmp);
3379
            mant = MANTD(tmp);
3380
            if (exp == 0 && mant == 0) {
3381
                /* zero */
3382
                fptag |= 1;
3383
            } else if (exp == 0 || exp == MAXEXPD
3384
#ifdef USE_X86LDOUBLE
3385
                       || (mant & (1LL << 63)) == 0
3386
#endif
3387
                       ) {
3388
                /* NaNs, infinity, denormal */
3389
                fptag |= 2;
3390
            }
3391
        }
3392
    }
3393
    if (data32) {
3394
        /* 32 bit */
3395
        stl(ptr, env->fpuc);
3396
        stl(ptr + 4, fpus);
3397
        stl(ptr + 8, fptag);
3398
        stl(ptr + 12, 0); /* fpip */
3399
        stl(ptr + 16, 0); /* fpcs */
3400
        stl(ptr + 20, 0); /* fpoo */
3401
        stl(ptr + 24, 0); /* fpos */
3402
    } else {
3403
        /* 16 bit */
3404
        stw(ptr, env->fpuc);
3405
        stw(ptr + 2, fpus);
3406
        stw(ptr + 4, fptag);
3407
        stw(ptr + 6, 0);
3408
        stw(ptr + 8, 0);
3409
        stw(ptr + 10, 0);
3410
        stw(ptr + 12, 0);
3411
    }
3412
}
3413

    
3414
void helper_fldenv(target_ulong ptr, int data32)
3415
{
3416
    int i, fpus, fptag;
3417

    
3418
    if (data32) {
3419
        env->fpuc = lduw(ptr);
3420
        fpus = lduw(ptr + 4);
3421
        fptag = lduw(ptr + 8);
3422
    }
3423
    else {
3424
        env->fpuc = lduw(ptr);
3425
        fpus = lduw(ptr + 2);
3426
        fptag = lduw(ptr + 4);
3427
    }
3428
    env->fpstt = (fpus >> 11) & 7;
3429
    env->fpus = fpus & ~0x3800;
3430
    for(i = 0;i < 8; i++) {
3431
        env->fptags[i] = ((fptag & 3) == 3);
3432
        fptag >>= 2;
3433
    }
3434
}
3435

    
3436
void helper_fsave(target_ulong ptr, int data32)
3437
{
3438
    CPU86_LDouble tmp;
3439
    int i;
3440

    
3441
    helper_fstenv(ptr, data32);
3442

    
3443
    ptr += (14 << data32);
3444
    for(i = 0;i < 8; i++) {
3445
        tmp = ST(i);
3446
        helper_fstt(tmp, ptr);
3447
        ptr += 10;
3448
    }
3449

    
3450
    /* fninit */
3451
    env->fpus = 0;
3452
    env->fpstt = 0;
3453
    env->fpuc = 0x37f;
3454
    env->fptags[0] = 1;
3455
    env->fptags[1] = 1;
3456
    env->fptags[2] = 1;
3457
    env->fptags[3] = 1;
3458
    env->fptags[4] = 1;
3459
    env->fptags[5] = 1;
3460
    env->fptags[6] = 1;
3461
    env->fptags[7] = 1;
3462
}
3463

    
3464
void helper_frstor(target_ulong ptr, int data32)
3465
{
3466
    CPU86_LDouble tmp;
3467
    int i;
3468

    
3469
    helper_fldenv(ptr, data32);
3470
    ptr += (14 << data32);
3471

    
3472
    for(i = 0;i < 8; i++) {
3473
        tmp = helper_fldt(ptr);
3474
        ST(i) = tmp;
3475
        ptr += 10;
3476
    }
3477
}
3478

    
3479
void helper_fxsave(target_ulong ptr, int data64)
3480
{
3481
    int fpus, fptag, i, nb_xmm_regs;
3482
    CPU86_LDouble tmp;
3483
    target_ulong addr;
3484

    
3485
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3486
    fptag = 0;
3487
    for(i = 0; i < 8; i++) {
3488
        fptag |= (env->fptags[i] << i);
3489
    }
3490
    stw(ptr, env->fpuc);
3491
    stw(ptr + 2, fpus);
3492
    stw(ptr + 4, fptag ^ 0xff);
3493

    
3494
    addr = ptr + 0x20;
3495
    for(i = 0;i < 8; i++) {
3496
        tmp = ST(i);
3497
        helper_fstt(tmp, addr);
3498
        addr += 16;
3499
    }
3500
    
3501
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3502
        /* XXX: finish it */
3503
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3504
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3505
        nb_xmm_regs = 8 << data64;
3506
        addr = ptr + 0xa0;
3507
        for(i = 0; i < nb_xmm_regs; i++) {
3508
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3509
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3510
            addr += 16;
3511
        }
3512
    }
3513
}
3514

    
3515
void helper_fxrstor(target_ulong ptr, int data64)
3516
{
3517
    int i, fpus, fptag, nb_xmm_regs;
3518
    CPU86_LDouble tmp;
3519
    target_ulong addr;
3520

    
3521
    env->fpuc = lduw(ptr);
3522
    fpus = lduw(ptr + 2);
3523
    fptag = lduw(ptr + 4);
3524
    env->fpstt = (fpus >> 11) & 7;
3525
    env->fpus = fpus & ~0x3800;
3526
    fptag ^= 0xff;
3527
    for(i = 0;i < 8; i++) {
3528
        env->fptags[i] = ((fptag >> i) & 1);
3529
    }
3530

    
3531
    addr = ptr + 0x20;
3532
    for(i = 0;i < 8; i++) {
3533
        tmp = helper_fldt(addr);
3534
        ST(i) = tmp;
3535
        addr += 16;
3536
    }
3537

    
3538
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3539
        /* XXX: finish it */
3540
        env->mxcsr = ldl(ptr + 0x18);
3541
        //ldl(ptr + 0x1c);
3542
        nb_xmm_regs = 8 << data64;
3543
        addr = ptr + 0xa0;
3544
        for(i = 0; i < nb_xmm_regs; i++) {
3545
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3546
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3547
            addr += 16;
3548
        }
3549
    }
3550
}
3551

    
3552
#ifndef USE_X86LDOUBLE
3553

    
3554
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3555
{
3556
    CPU86_LDoubleU temp;
3557
    int e;
3558

    
3559
    temp.d = f;
3560
    /* mantissa */
3561
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3562
    /* exponent + sign */
3563
    e = EXPD(temp) - EXPBIAS + 16383;
3564
    e |= SIGND(temp) >> 16;
3565
    *pexp = e;
3566
}
3567

    
3568
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3569
{
3570
    CPU86_LDoubleU temp;
3571
    int e;
3572
    uint64_t ll;
3573

    
3574
    /* XXX: handle overflow ? */
3575
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3576
    e |= (upper >> 4) & 0x800; /* sign */
3577
    ll = (mant >> 11) & ((1LL << 52) - 1);
3578
#ifdef __arm__
3579
    temp.l.upper = (e << 20) | (ll >> 32);
3580
    temp.l.lower = ll;
3581
#else
3582
    temp.ll = ll | ((uint64_t)e << 52);
3583
#endif
3584
    return temp.d;
3585
}
3586

    
3587
#else
3588

    
3589
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3590
{
3591
    CPU86_LDoubleU temp;
3592

    
3593
    temp.d = f;
3594
    *pmant = temp.l.lower;
3595
    *pexp = temp.l.upper;
3596
}
3597

    
3598
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3599
{
3600
    CPU86_LDoubleU temp;
3601

    
3602
    temp.l.upper = upper;
3603
    temp.l.lower = mant;
3604
    return temp.d;
3605
}
3606
#endif
3607

    
3608
#ifdef TARGET_X86_64
3609

    
3610
//#define DEBUG_MULDIV
3611

    
3612
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3613
{
3614
    *plow += a;
3615
    /* carry test */
3616
    if (*plow < a)
3617
        (*phigh)++;
3618
    *phigh += b;
3619
}
3620

    
3621
static void neg128(uint64_t *plow, uint64_t *phigh)
3622
{
3623
    *plow = ~ *plow;
3624
    *phigh = ~ *phigh;
3625
    add128(plow, phigh, 1, 0);
3626
}
3627

    
3628
static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3629
{
3630
    uint32_t a0, a1, b0, b1;
3631
    uint64_t v;
3632

    
3633
    a0 = a;
3634
    a1 = a >> 32;
3635

    
3636
    b0 = b;
3637
    b1 = b >> 32;
3638
    
3639
    v = (uint64_t)a0 * (uint64_t)b0;
3640
    *plow = v;
3641
    *phigh = 0;
3642

    
3643
    v = (uint64_t)a0 * (uint64_t)b1;
3644
    add128(plow, phigh, v << 32, v >> 32);
3645
    
3646
    v = (uint64_t)a1 * (uint64_t)b0;
3647
    add128(plow, phigh, v << 32, v >> 32);
3648
    
3649
    v = (uint64_t)a1 * (uint64_t)b1;
3650
    *phigh += v;
3651
#ifdef DEBUG_MULDIV
3652
    printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3653
           a, b, *phigh, *plow);
3654
#endif
3655
}
3656

    
3657
static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3658
{
3659
    int sa, sb;
3660
    sa = (a < 0);
3661
    if (sa)
3662
        a = -a;
3663
    sb = (b < 0);
3664
    if (sb)
3665
        b = -b;
3666
    mul64(plow, phigh, a, b);
3667
    if (sa ^ sb) {
3668
        neg128(plow, phigh);
3669
    }
3670
}
3671

    
3672
/* return TRUE if overflow */
3673
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3674
{
3675
    uint64_t q, r, a1, a0;
3676
    int i, qb, ab;
3677

    
3678
    a0 = *plow;
3679
    a1 = *phigh;
3680
    if (a1 == 0) {
3681
        q = a0 / b;
3682
        r = a0 % b;
3683
        *plow = q;
3684
        *phigh = r;
3685
    } else {
3686
        if (a1 >= b)
3687
            return 1;
3688
        /* XXX: use a better algorithm */
3689
        for(i = 0; i < 64; i++) {
3690
            ab = a1 >> 63;
3691
            a1 = (a1 << 1) | (a0 >> 63);
3692
            if (ab || a1 >= b) {
3693
                a1 -= b;
3694
                qb = 1;
3695
            } else {
3696
                qb = 0;
3697
            }
3698
            a0 = (a0 << 1) | qb;
3699
        }
3700
#if defined(DEBUG_MULDIV)
3701
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3702
               *phigh, *plow, b, a0, a1);
3703
#endif
3704
        *plow = a0;
3705
        *phigh = a1;
3706
    }
3707
    return 0;
3708
}
3709

    
3710
/* return TRUE if overflow */
3711
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3712
{
3713
    int sa, sb;
3714
    sa = ((int64_t)*phigh < 0);
3715
    if (sa)
3716
        neg128(plow, phigh);
3717
    sb = (b < 0);
3718
    if (sb)
3719
        b = -b;
3720
    if (div64(plow, phigh, b) != 0)
3721
        return 1;
3722
    if (sa ^ sb) {
3723
        if (*plow > (1ULL << 63))
3724
            return 1;
3725
        *plow = - *plow;
3726
    } else {
3727
        if (*plow >= (1ULL << 63))
3728
            return 1;
3729
    }
3730
    if (sa)
3731
        *phigh = - *phigh;
3732
    return 0;
3733
}
3734

    
3735
void helper_mulq_EAX_T0(void)
3736
{
3737
    uint64_t r0, r1;
3738

    
3739
    mul64(&r0, &r1, EAX, T0);
3740
    EAX = r0;
3741
    EDX = r1;
3742
    CC_DST = r0;
3743
    CC_SRC = r1;
3744
}
3745

    
3746
void helper_imulq_EAX_T0(void)
3747
{
3748
    uint64_t r0, r1;
3749

    
3750
    imul64(&r0, &r1, EAX, T0);
3751
    EAX = r0;
3752
    EDX = r1;
3753
    CC_DST = r0;
3754
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3755
}
3756

    
3757
void helper_imulq_T0_T1(void)
3758
{
3759
    uint64_t r0, r1;
3760

    
3761
    imul64(&r0, &r1, T0, T1);
3762
    T0 = r0;
3763
    CC_DST = r0;
3764
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3765
}
3766

    
3767
void helper_divq_EAX_T0(void)
3768
{
3769
    uint64_t r0, r1;
3770
    if (T0 == 0) {
3771
        raise_exception(EXCP00_DIVZ);
3772
    }
3773
    r0 = EAX;
3774
    r1 = EDX;
3775
    if (div64(&r0, &r1, T0))
3776
        raise_exception(EXCP00_DIVZ);
3777
    EAX = r0;
3778
    EDX = r1;
3779
}
3780

    
3781
void helper_idivq_EAX_T0(void)
3782
{
3783
    uint64_t r0, r1;
3784
    if (T0 == 0) {
3785
        raise_exception(EXCP00_DIVZ);
3786
    }
3787
    r0 = EAX;
3788
    r1 = EDX;
3789
    if (idiv64(&r0, &r1, T0))
3790
        raise_exception(EXCP00_DIVZ);
3791
    EAX = r0;
3792
    EDX = r1;
3793
}
3794

    
3795
void helper_bswapq_T0(void)
3796
{
3797
    T0 = bswap64(T0);
3798
}
3799
#endif
3800

    
3801
void helper_hlt(void)
3802
{
3803
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3804
    env->hflags |= HF_HALTED_MASK;
3805
    env->exception_index = EXCP_HLT;
3806
    cpu_loop_exit();
3807
}
3808

    
3809
void helper_monitor(void)
3810
{
3811
    if ((uint32_t)ECX != 0)
3812
        raise_exception(EXCP0D_GPF);
3813
    /* XXX: store address ? */
3814
}
3815

    
3816
void helper_mwait(void)
3817
{
3818
    if ((uint32_t)ECX != 0)
3819
        raise_exception(EXCP0D_GPF);
3820
    /* XXX: not complete but not completely erroneous */
3821
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3822
        /* more than one CPU: do not sleep because another CPU may
3823
           wake this one */
3824
    } else {
3825
        helper_hlt();
3826
    }
3827
}
3828

    
3829
float approx_rsqrt(float a)
3830
{
3831
    return 1.0 / sqrt(a);
3832
}
3833

    
3834
float approx_rcp(float a)
3835
{
3836
    return 1.0 / a;
3837
}
3838

    
3839
void update_fp_status(void)
3840
{
3841
    int rnd_type;
3842

    
3843
    /* set rounding mode */
3844
    switch(env->fpuc & RC_MASK) {
3845
    default:
3846
    case RC_NEAR:
3847
        rnd_type = float_round_nearest_even;
3848
        break;
3849
    case RC_DOWN:
3850
        rnd_type = float_round_down;
3851
        break;
3852
    case RC_UP:
3853
        rnd_type = float_round_up;
3854
        break;
3855
    case RC_CHOP:
3856
        rnd_type = float_round_to_zero;
3857
        break;
3858
    }
3859
    set_float_rounding_mode(rnd_type, &env->fp_status);
3860
#ifdef FLOATX80
3861
    switch((env->fpuc >> 8) & 3) {
3862
    case 0:
3863
        rnd_type = 32;
3864
        break;
3865
    case 2:
3866
        rnd_type = 64;
3867
        break;
3868
    case 3:
3869
    default:
3870
        rnd_type = 80;
3871
        break;
3872
    }
3873
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3874
#endif
3875
}
3876

    
3877
#if !defined(CONFIG_USER_ONLY) 
3878

    
3879
#define MMUSUFFIX _mmu
3880
#define GETPC() (__builtin_return_address(0))
3881

    
3882
#define SHIFT 0
3883
#include "softmmu_template.h"
3884

    
3885
#define SHIFT 1
3886
#include "softmmu_template.h"
3887

    
3888
#define SHIFT 2
3889
#include "softmmu_template.h"
3890

    
3891
#define SHIFT 3
3892
#include "softmmu_template.h"
3893

    
3894
#endif
3895

    
3896
/* try to fill the TLB and return an exception if error. If retaddr is
3897
   NULL, it means that the function was called in C code (i.e. not
3898
   from generated code or from helper.c) */
3899
/* XXX: fix it to restore all registers */
3900
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
3901
{
3902
    TranslationBlock *tb;
3903
    int ret;
3904
    unsigned long pc;
3905
    CPUX86State *saved_env;
3906

    
3907
    /* XXX: hack to restore env in all cases, even if not called from
3908
       generated code */
3909
    saved_env = env;
3910
    env = cpu_single_env;
3911

    
3912
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
3913
    if (ret) {
3914
        if (retaddr) {
3915
            /* now we have a real cpu fault */
3916
            pc = (unsigned long)retaddr;
3917
            tb = tb_find_pc(pc);
3918
            if (tb) {
3919
                /* the PC is inside the translated code. It means that we have
3920
                   a virtual CPU fault */
3921
                cpu_restore_state(tb, env, pc, NULL);
3922
            }
3923
        }
3924
        if (retaddr)
3925
            raise_exception_err(env->exception_index, env->error_code);
3926
        else
3927
            raise_exception_err_norestore(env->exception_index, env->error_code);
3928
    }
3929
    env = saved_env;
3930
}