Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 0211e5af

History | View | Annotate | Download (148 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
/* return non zero if error */
112
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
113
                               int selector)
114
{
115
    SegmentCache *dt;
116
    int index;
117
    target_ulong ptr;
118

    
119
    if (selector & 0x4)
120
        dt = &env->ldt;
121
    else
122
        dt = &env->gdt;
123
    index = selector & ~7;
124
    if ((index + 7) > dt->limit)
125
        return -1;
126
    ptr = dt->base + index;
127
    *e1_ptr = ldl_kernel(ptr);
128
    *e2_ptr = ldl_kernel(ptr + 4);
129
    return 0;
130
}
131

    
132
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
133
{
134
    unsigned int limit;
135
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
136
    if (e2 & DESC_G_MASK)
137
        limit = (limit << 12) | 0xfff;
138
    return limit;
139
}
140

    
141
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142
{
143
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
144
}
145

    
146
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147
{
148
    sc->base = get_seg_base(e1, e2);
149
    sc->limit = get_seg_limit(e1, e2);
150
    sc->flags = e2;
151
}
152

    
153
/* init the segment cache in vm86 mode. */
154
static inline void load_seg_vm(int seg, int selector)
155
{
156
    selector &= 0xffff;
157
    cpu_x86_load_seg_cache(env, seg, selector,
158
                           (selector << 4), 0xffff, 0);
159
}
160

    
161
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
162
                                       uint32_t *esp_ptr, int dpl)
163
{
164
    int type, index, shift;
165

    
166
#if 0
167
    {
168
        int i;
169
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
170
        for(i=0;i<env->tr.limit;i++) {
171
            printf("%02x ", env->tr.base[i]);
172
            if ((i & 7) == 7) printf("\n");
173
        }
174
        printf("\n");
175
    }
176
#endif
177

    
178
    if (!(env->tr.flags & DESC_P_MASK))
179
        cpu_abort(env, "invalid tss");
180
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181
    if ((type & 7) != 1)
182
        cpu_abort(env, "invalid tss type");
183
    shift = type >> 3;
184
    index = (dpl * 4 + 2) << shift;
185
    if (index + (4 << shift) - 1 > env->tr.limit)
186
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187
    if (shift == 0) {
188
        *esp_ptr = lduw_kernel(env->tr.base + index);
189
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190
    } else {
191
        *esp_ptr = ldl_kernel(env->tr.base + index);
192
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
193
    }
194
}
195

    
196
/* XXX: merge with load_seg() */
197
static void tss_load_seg(int seg_reg, int selector)
198
{
199
    uint32_t e1, e2;
200
    int rpl, dpl, cpl;
201

    
202
    if ((selector & 0xfffc) != 0) {
203
        if (load_segment(&e1, &e2, selector) != 0)
204
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205
        if (!(e2 & DESC_S_MASK))
206
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207
        rpl = selector & 3;
208
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
209
        cpl = env->hflags & HF_CPL_MASK;
210
        if (seg_reg == R_CS) {
211
            if (!(e2 & DESC_CS_MASK))
212
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213
            /* XXX: is it correct ? */
214
            if (dpl != rpl)
215
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
            if ((e2 & DESC_C_MASK) && dpl > rpl)
217
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
        } else if (seg_reg == R_SS) {
219
            /* SS must be writable data */
220
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
            if (dpl != cpl || dpl != rpl)
223
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224
        } else {
225
            /* not readable code */
226
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
227
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228
            /* if data or non conforming code, checks the rights */
229
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
230
                if (dpl < cpl || dpl < rpl)
231
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
            }
233
        }
234
        if (!(e2 & DESC_P_MASK))
235
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
236
        cpu_x86_load_seg_cache(env, seg_reg, selector,
237
                       get_seg_base(e1, e2),
238
                       get_seg_limit(e1, e2),
239
                       e2);
240
    } else {
241
        if (seg_reg == R_SS || seg_reg == R_CS)
242
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
    }
244
}
245

    
246
#define SWITCH_TSS_JMP  0
247
#define SWITCH_TSS_IRET 1
248
#define SWITCH_TSS_CALL 2
249

    
250
/* XXX: restore CPU state in registers (PowerPC case) */
251
static void switch_tss(int tss_selector,
252
                       uint32_t e1, uint32_t e2, int source,
253
                       uint32_t next_eip)
254
{
255
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
256
    target_ulong tss_base;
257
    uint32_t new_regs[8], new_segs[6];
258
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
259
    uint32_t old_eflags, eflags_mask;
260
    SegmentCache *dt;
261
    int index;
262
    target_ulong ptr;
263

    
264
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265
#ifdef DEBUG_PCALL
266
    if (loglevel & CPU_LOG_PCALL)
267
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
268
#endif
269

    
270
    /* if task gate, we read the TSS segment and we load it */
271
    if (type == 5) {
272
        if (!(e2 & DESC_P_MASK))
273
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
274
        tss_selector = e1 >> 16;
275
        if (tss_selector & 4)
276
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
277
        if (load_segment(&e1, &e2, tss_selector) != 0)
278
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279
        if (e2 & DESC_S_MASK)
280
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
281
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282
        if ((type & 7) != 1)
283
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284
    }
285

    
286
    if (!(e2 & DESC_P_MASK))
287
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288

    
289
    if (type & 8)
290
        tss_limit_max = 103;
291
    else
292
        tss_limit_max = 43;
293
    tss_limit = get_seg_limit(e1, e2);
294
    tss_base = get_seg_base(e1, e2);
295
    if ((tss_selector & 4) != 0 ||
296
        tss_limit < tss_limit_max)
297
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299
    if (old_type & 8)
300
        old_tss_limit_max = 103;
301
    else
302
        old_tss_limit_max = 43;
303

    
304
    /* read all the registers from the new TSS */
305
    if (type & 8) {
306
        /* 32 bit */
307
        new_cr3 = ldl_kernel(tss_base + 0x1c);
308
        new_eip = ldl_kernel(tss_base + 0x20);
309
        new_eflags = ldl_kernel(tss_base + 0x24);
310
        for(i = 0; i < 8; i++)
311
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
312
        for(i = 0; i < 6; i++)
313
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
314
        new_ldt = lduw_kernel(tss_base + 0x60);
315
        new_trap = ldl_kernel(tss_base + 0x64);
316
    } else {
317
        /* 16 bit */
318
        new_cr3 = 0;
319
        new_eip = lduw_kernel(tss_base + 0x0e);
320
        new_eflags = lduw_kernel(tss_base + 0x10);
321
        for(i = 0; i < 8; i++)
322
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
323
        for(i = 0; i < 4; i++)
324
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
325
        new_ldt = lduw_kernel(tss_base + 0x2a);
326
        new_segs[R_FS] = 0;
327
        new_segs[R_GS] = 0;
328
        new_trap = 0;
329
    }
330

    
331
    /* NOTE: we must avoid memory exceptions during the task switch,
332
       so we make dummy accesses before */
333
    /* XXX: it can still fail in some cases, so a bigger hack is
334
       necessary to valid the TLB after having done the accesses */
335

    
336
    v1 = ldub_kernel(env->tr.base);
337
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
338
    stb_kernel(env->tr.base, v1);
339
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
340

    
341
    /* clear busy bit (it is restartable) */
342
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
343
        target_ulong ptr;
344
        uint32_t e2;
345
        ptr = env->gdt.base + (env->tr.selector & ~7);
346
        e2 = ldl_kernel(ptr + 4);
347
        e2 &= ~DESC_TSS_BUSY_MASK;
348
        stl_kernel(ptr + 4, e2);
349
    }
350
    old_eflags = compute_eflags();
351
    if (source == SWITCH_TSS_IRET)
352
        old_eflags &= ~NT_MASK;
353

    
354
    /* save the current state in the old TSS */
355
    if (type & 8) {
356
        /* 32 bit */
357
        stl_kernel(env->tr.base + 0x20, next_eip);
358
        stl_kernel(env->tr.base + 0x24, old_eflags);
359
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
360
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
361
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
362
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
363
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
364
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
365
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
366
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
367
        for(i = 0; i < 6; i++)
368
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
369
    } else {
370
        /* 16 bit */
371
        stw_kernel(env->tr.base + 0x0e, next_eip);
372
        stw_kernel(env->tr.base + 0x10, old_eflags);
373
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
374
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
375
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
376
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
377
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
378
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
379
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
380
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
381
        for(i = 0; i < 4; i++)
382
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
383
    }
384

    
385
    /* now if an exception occurs, it will occurs in the next task
386
       context */
387

    
388
    if (source == SWITCH_TSS_CALL) {
389
        stw_kernel(tss_base, env->tr.selector);
390
        new_eflags |= NT_MASK;
391
    }
392

    
393
    /* set busy bit */
394
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
395
        target_ulong ptr;
396
        uint32_t e2;
397
        ptr = env->gdt.base + (tss_selector & ~7);
398
        e2 = ldl_kernel(ptr + 4);
399
        e2 |= DESC_TSS_BUSY_MASK;
400
        stl_kernel(ptr + 4, e2);
401
    }
402

    
403
    /* set the new CPU state */
404
    /* from this point, any exception which occurs can give problems */
405
    env->cr[0] |= CR0_TS_MASK;
406
    env->hflags |= HF_TS_MASK;
407
    env->tr.selector = tss_selector;
408
    env->tr.base = tss_base;
409
    env->tr.limit = tss_limit;
410
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411

    
412
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
413
        cpu_x86_update_cr3(env, new_cr3);
414
    }
415

    
416
    /* load all registers without an exception, then reload them with
417
       possible exception */
418
    env->eip = new_eip;
419
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
420
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421
    if (!(type & 8))
422
        eflags_mask &= 0xffff;
423
    load_eflags(new_eflags, eflags_mask);
424
    /* XXX: what to do in 16 bit case ? */
425
    EAX = new_regs[0];
426
    ECX = new_regs[1];
427
    EDX = new_regs[2];
428
    EBX = new_regs[3];
429
    ESP = new_regs[4];
430
    EBP = new_regs[5];
431
    ESI = new_regs[6];
432
    EDI = new_regs[7];
433
    if (new_eflags & VM_MASK) {
434
        for(i = 0; i < 6; i++)
435
            load_seg_vm(i, new_segs[i]);
436
        /* in vm86, CPL is always 3 */
437
        cpu_x86_set_cpl(env, 3);
438
    } else {
439
        /* CPL is set the RPL of CS */
440
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441
        /* first just selectors as the rest may trigger exceptions */
442
        for(i = 0; i < 6; i++)
443
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
444
    }
445

    
446
    env->ldt.selector = new_ldt & ~4;
447
    env->ldt.base = 0;
448
    env->ldt.limit = 0;
449
    env->ldt.flags = 0;
450

    
451
    /* load the LDT */
452
    if (new_ldt & 4)
453
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454

    
455
    if ((new_ldt & 0xfffc) != 0) {
456
        dt = &env->gdt;
457
        index = new_ldt & ~7;
458
        if ((index + 7) > dt->limit)
459
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460
        ptr = dt->base + index;
461
        e1 = ldl_kernel(ptr);
462
        e2 = ldl_kernel(ptr + 4);
463
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        if (!(e2 & DESC_P_MASK))
466
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
468
    }
469

    
470
    /* load the segments */
471
    if (!(new_eflags & VM_MASK)) {
472
        tss_load_seg(R_CS, new_segs[R_CS]);
473
        tss_load_seg(R_SS, new_segs[R_SS]);
474
        tss_load_seg(R_ES, new_segs[R_ES]);
475
        tss_load_seg(R_DS, new_segs[R_DS]);
476
        tss_load_seg(R_FS, new_segs[R_FS]);
477
        tss_load_seg(R_GS, new_segs[R_GS]);
478
    }
479

    
480
    /* check that EIP is in the CS segment limits */
481
    if (new_eip > env->segs[R_CS].limit) {
482
        /* XXX: different exception if CALL ? */
483
        raise_exception_err(EXCP0D_GPF, 0);
484
    }
485
}
486

    
487
/* check if Port I/O is allowed in TSS */
488
static inline void check_io(int addr, int size)
489
{
490
    int io_offset, val, mask;
491

    
492
    /* TSS must be a valid 32 bit one */
493
    if (!(env->tr.flags & DESC_P_MASK) ||
494
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
495
        env->tr.limit < 103)
496
        goto fail;
497
    io_offset = lduw_kernel(env->tr.base + 0x66);
498
    io_offset += (addr >> 3);
499
    /* Note: the check needs two bytes */
500
    if ((io_offset + 1) > env->tr.limit)
501
        goto fail;
502
    val = lduw_kernel(env->tr.base + io_offset);
503
    val >>= (addr & 7);
504
    mask = (1 << size) - 1;
505
    /* all bits must be zero to allow the I/O */
506
    if ((val & mask) != 0) {
507
    fail:
508
        raise_exception_err(EXCP0D_GPF, 0);
509
    }
510
}
511

    
512
void helper_check_iob(uint32_t t0)
513
{
514
    check_io(t0, 1);
515
}
516

    
517
void helper_check_iow(uint32_t t0)
518
{
519
    check_io(t0, 2);
520
}
521

    
522
void helper_check_iol(uint32_t t0)
523
{
524
    check_io(t0, 4);
525
}
526

    
527
void helper_outb(uint32_t port, uint32_t data)
528
{
529
    cpu_outb(env, port, data & 0xff);
530
}
531

    
532
target_ulong helper_inb(uint32_t port)
533
{
534
    return cpu_inb(env, port);
535
}
536

    
537
void helper_outw(uint32_t port, uint32_t data)
538
{
539
    cpu_outw(env, port, data & 0xffff);
540
}
541

    
542
target_ulong helper_inw(uint32_t port)
543
{
544
    return cpu_inw(env, port);
545
}
546

    
547
void helper_outl(uint32_t port, uint32_t data)
548
{
549
    cpu_outl(env, port, data);
550
}
551

    
552
target_ulong helper_inl(uint32_t port)
553
{
554
    return cpu_inl(env, port);
555
}
556

    
557
static inline unsigned int get_sp_mask(unsigned int e2)
558
{
559
    if (e2 & DESC_B_MASK)
560
        return 0xffffffff;
561
    else
562
        return 0xffff;
563
}
564

    
565
#ifdef TARGET_X86_64
566
#define SET_ESP(val, sp_mask)\
567
do {\
568
    if ((sp_mask) == 0xffff)\
569
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
570
    else if ((sp_mask) == 0xffffffffLL)\
571
        ESP = (uint32_t)(val);\
572
    else\
573
        ESP = (val);\
574
} while (0)
575
#else
576
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
577
#endif
578

    
579
/* XXX: add a is_user flag to have proper security support */
580
#define PUSHW(ssp, sp, sp_mask, val)\
581
{\
582
    sp -= 2;\
583
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
584
}
585

    
586
#define PUSHL(ssp, sp, sp_mask, val)\
587
{\
588
    sp -= 4;\
589
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
590
}
591

    
592
#define POPW(ssp, sp, sp_mask, val)\
593
{\
594
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
595
    sp += 2;\
596
}
597

    
598
#define POPL(ssp, sp, sp_mask, val)\
599
{\
600
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
601
    sp += 4;\
602
}
603

    
604
/* protected mode interrupt */
605
static void do_interrupt_protected(int intno, int is_int, int error_code,
606
                                   unsigned int next_eip, int is_hw)
607
{
608
    SegmentCache *dt;
609
    target_ulong ptr, ssp;
610
    int type, dpl, selector, ss_dpl, cpl;
611
    int has_error_code, new_stack, shift;
612
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
613
    uint32_t old_eip, sp_mask;
614
    int svm_should_check = 1;
615

    
616
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
617
        next_eip = EIP;
618
        svm_should_check = 0;
619
    }
620

    
621
    if (svm_should_check
622
        && (INTERCEPTEDl(_exceptions, 1 << intno)
623
        && !is_int)) {
624
        raise_interrupt(intno, is_int, error_code, 0);
625
    }
626
    has_error_code = 0;
627
    if (!is_int && !is_hw) {
628
        switch(intno) {
629
        case 8:
630
        case 10:
631
        case 11:
632
        case 12:
633
        case 13:
634
        case 14:
635
        case 17:
636
            has_error_code = 1;
637
            break;
638
        }
639
    }
640
    if (is_int)
641
        old_eip = next_eip;
642
    else
643
        old_eip = env->eip;
644

    
645
    dt = &env->idt;
646
    if (intno * 8 + 7 > dt->limit)
647
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648
    ptr = dt->base + intno * 8;
649
    e1 = ldl_kernel(ptr);
650
    e2 = ldl_kernel(ptr + 4);
651
    /* check gate type */
652
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
653
    switch(type) {
654
    case 5: /* task gate */
655
        /* must do that check here to return the correct error code */
656
        if (!(e2 & DESC_P_MASK))
657
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
659
        if (has_error_code) {
660
            int type;
661
            uint32_t mask;
662
            /* push the error code */
663
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
664
            shift = type >> 3;
665
            if (env->segs[R_SS].flags & DESC_B_MASK)
666
                mask = 0xffffffff;
667
            else
668
                mask = 0xffff;
669
            esp = (ESP - (2 << shift)) & mask;
670
            ssp = env->segs[R_SS].base + esp;
671
            if (shift)
672
                stl_kernel(ssp, error_code);
673
            else
674
                stw_kernel(ssp, error_code);
675
            SET_ESP(esp, mask);
676
        }
677
        return;
678
    case 6: /* 286 interrupt gate */
679
    case 7: /* 286 trap gate */
680
    case 14: /* 386 interrupt gate */
681
    case 15: /* 386 trap gate */
682
        break;
683
    default:
684
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
685
        break;
686
    }
687
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688
    cpl = env->hflags & HF_CPL_MASK;
689
    /* check privledge if software int */
690
    if (is_int && dpl < cpl)
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
    /* check valid bit */
693
    if (!(e2 & DESC_P_MASK))
694
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
695
    selector = e1 >> 16;
696
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
697
    if ((selector & 0xfffc) == 0)
698
        raise_exception_err(EXCP0D_GPF, 0);
699

    
700
    if (load_segment(&e1, &e2, selector) != 0)
701
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
702
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
705
    if (dpl > cpl)
706
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
    if (!(e2 & DESC_P_MASK))
708
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
709
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
710
        /* to inner privilege */
711
        get_ss_esp_from_tss(&ss, &esp, dpl);
712
        if ((ss & 0xfffc) == 0)
713
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714
        if ((ss & 3) != dpl)
715
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
716
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
717
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
719
        if (ss_dpl != dpl)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if (!(ss_e2 & DESC_S_MASK) ||
722
            (ss_e2 & DESC_CS_MASK) ||
723
            !(ss_e2 & DESC_W_MASK))
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        if (!(ss_e2 & DESC_P_MASK))
726
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
727
        new_stack = 1;
728
        sp_mask = get_sp_mask(ss_e2);
729
        ssp = get_seg_base(ss_e1, ss_e2);
730
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
731
        /* to same privilege */
732
        if (env->eflags & VM_MASK)
733
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
        new_stack = 0;
735
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
736
        ssp = env->segs[R_SS].base;
737
        esp = ESP;
738
        dpl = cpl;
739
    } else {
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0; /* avoid warning */
742
        sp_mask = 0; /* avoid warning */
743
        ssp = 0; /* avoid warning */
744
        esp = 0; /* avoid warning */
745
    }
746

    
747
    shift = type >> 3;
748

    
749
#if 0
750
    /* XXX: check that enough room is available */
751
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
752
    if (env->eflags & VM_MASK)
753
        push_size += 8;
754
    push_size <<= shift;
755
#endif
756
    if (shift == 1) {
757
        if (new_stack) {
758
            if (env->eflags & VM_MASK) {
759
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
760
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
761
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
762
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
763
            }
764
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
765
            PUSHL(ssp, esp, sp_mask, ESP);
766
        }
767
        PUSHL(ssp, esp, sp_mask, compute_eflags());
768
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
769
        PUSHL(ssp, esp, sp_mask, old_eip);
770
        if (has_error_code) {
771
            PUSHL(ssp, esp, sp_mask, error_code);
772
        }
773
    } else {
774
        if (new_stack) {
775
            if (env->eflags & VM_MASK) {
776
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
777
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
778
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
779
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
780
            }
781
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
782
            PUSHW(ssp, esp, sp_mask, ESP);
783
        }
784
        PUSHW(ssp, esp, sp_mask, compute_eflags());
785
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
786
        PUSHW(ssp, esp, sp_mask, old_eip);
787
        if (has_error_code) {
788
            PUSHW(ssp, esp, sp_mask, error_code);
789
        }
790
    }
791

    
792
    if (new_stack) {
793
        if (env->eflags & VM_MASK) {
794
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798
        }
799
        ss = (ss & ~3) | dpl;
800
        cpu_x86_load_seg_cache(env, R_SS, ss,
801
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802
    }
803
    SET_ESP(esp, sp_mask);
804

    
805
    selector = (selector & ~3) | dpl;
806
    cpu_x86_load_seg_cache(env, R_CS, selector,
807
                   get_seg_base(e1, e2),
808
                   get_seg_limit(e1, e2),
809
                   e2);
810
    cpu_x86_set_cpl(env, dpl);
811
    env->eip = offset;
812

    
813
    /* interrupt gate clear IF mask */
814
    if ((type & 1) == 0) {
815
        env->eflags &= ~IF_MASK;
816
    }
817
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
818
}
819

    
820
#ifdef TARGET_X86_64
821

    
822
#define PUSHQ(sp, val)\
823
{\
824
    sp -= 8;\
825
    stq_kernel(sp, (val));\
826
}
827

    
828
#define POPQ(sp, val)\
829
{\
830
    val = ldq_kernel(sp);\
831
    sp += 8;\
832
}
833

    
834
static inline target_ulong get_rsp_from_tss(int level)
835
{
836
    int index;
837

    
838
#if 0
839
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840
           env->tr.base, env->tr.limit);
841
#endif
842

    
843
    if (!(env->tr.flags & DESC_P_MASK))
844
        cpu_abort(env, "invalid tss");
845
    index = 8 * level + 4;
846
    if ((index + 7) > env->tr.limit)
847
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
848
    return ldq_kernel(env->tr.base + index);
849
}
850

    
851
/* 64 bit interrupt */
852
static void do_interrupt64(int intno, int is_int, int error_code,
853
                           target_ulong next_eip, int is_hw)
854
{
855
    SegmentCache *dt;
856
    target_ulong ptr;
857
    int type, dpl, selector, cpl, ist;
858
    int has_error_code, new_stack;
859
    uint32_t e1, e2, e3, ss;
860
    target_ulong old_eip, esp, offset;
861
    int svm_should_check = 1;
862

    
863
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
864
        next_eip = EIP;
865
        svm_should_check = 0;
866
    }
867
    if (svm_should_check
868
        && INTERCEPTEDl(_exceptions, 1 << intno)
869
        && !is_int) {
870
        raise_interrupt(intno, is_int, error_code, 0);
871
    }
872
    has_error_code = 0;
873
    if (!is_int && !is_hw) {
874
        switch(intno) {
875
        case 8:
876
        case 10:
877
        case 11:
878
        case 12:
879
        case 13:
880
        case 14:
881
        case 17:
882
            has_error_code = 1;
883
            break;
884
        }
885
    }
886
    if (is_int)
887
        old_eip = next_eip;
888
    else
889
        old_eip = env->eip;
890

    
891
    dt = &env->idt;
892
    if (intno * 16 + 15 > dt->limit)
893
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
894
    ptr = dt->base + intno * 16;
895
    e1 = ldl_kernel(ptr);
896
    e2 = ldl_kernel(ptr + 4);
897
    e3 = ldl_kernel(ptr + 8);
898
    /* check gate type */
899
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
900
    switch(type) {
901
    case 14: /* 386 interrupt gate */
902
    case 15: /* 386 trap gate */
903
        break;
904
    default:
905
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
        break;
907
    }
908
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909
    cpl = env->hflags & HF_CPL_MASK;
910
    /* check privledge if software int */
911
    if (is_int && dpl < cpl)
912
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913
    /* check valid bit */
914
    if (!(e2 & DESC_P_MASK))
915
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
916
    selector = e1 >> 16;
917
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
918
    ist = e2 & 7;
919
    if ((selector & 0xfffc) == 0)
920
        raise_exception_err(EXCP0D_GPF, 0);
921

    
922
    if (load_segment(&e1, &e2, selector) != 0)
923
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
927
    if (dpl > cpl)
928
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929
    if (!(e2 & DESC_P_MASK))
930
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
931
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
934
        /* to inner privilege */
935
        if (ist != 0)
936
            esp = get_rsp_from_tss(ist + 3);
937
        else
938
            esp = get_rsp_from_tss(dpl);
939
        esp &= ~0xfLL; /* align stack */
940
        ss = 0;
941
        new_stack = 1;
942
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
943
        /* to same privilege */
944
        if (env->eflags & VM_MASK)
945
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
        new_stack = 0;
947
        if (ist != 0)
948
            esp = get_rsp_from_tss(ist + 3);
949
        else
950
            esp = ESP;
951
        esp &= ~0xfLL; /* align stack */
952
        dpl = cpl;
953
    } else {
954
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0; /* avoid warning */
956
        esp = 0; /* avoid warning */
957
    }
958

    
959
    PUSHQ(esp, env->segs[R_SS].selector);
960
    PUSHQ(esp, ESP);
961
    PUSHQ(esp, compute_eflags());
962
    PUSHQ(esp, env->segs[R_CS].selector);
963
    PUSHQ(esp, old_eip);
964
    if (has_error_code) {
965
        PUSHQ(esp, error_code);
966
    }
967

    
968
    if (new_stack) {
969
        ss = 0 | dpl;
970
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
971
    }
972
    ESP = esp;
973

    
974
    selector = (selector & ~3) | dpl;
975
    cpu_x86_load_seg_cache(env, R_CS, selector,
976
                   get_seg_base(e1, e2),
977
                   get_seg_limit(e1, e2),
978
                   e2);
979
    cpu_x86_set_cpl(env, dpl);
980
    env->eip = offset;
981

    
982
    /* interrupt gate clear IF mask */
983
    if ((type & 1) == 0) {
984
        env->eflags &= ~IF_MASK;
985
    }
986
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
987
}
988
#endif
989

    
990
#if defined(CONFIG_USER_ONLY)
991
void helper_syscall(int next_eip_addend)
992
{
993
    env->exception_index = EXCP_SYSCALL;
994
    env->exception_next_eip = env->eip + next_eip_addend;
995
    cpu_loop_exit();
996
}
997
#else
998
void helper_syscall(int next_eip_addend)
999
{
1000
    int selector;
1001

    
1002
    if (!(env->efer & MSR_EFER_SCE)) {
1003
        raise_exception_err(EXCP06_ILLOP, 0);
1004
    }
1005
    selector = (env->star >> 32) & 0xffff;
1006
#ifdef TARGET_X86_64
1007
    if (env->hflags & HF_LMA_MASK) {
1008
        int code64;
1009

    
1010
        ECX = env->eip + next_eip_addend;
1011
        env->regs[11] = compute_eflags();
1012

    
1013
        code64 = env->hflags & HF_CS64_MASK;
1014

    
1015
        cpu_x86_set_cpl(env, 0);
1016
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1017
                           0, 0xffffffff,
1018
                               DESC_G_MASK | DESC_P_MASK |
1019
                               DESC_S_MASK |
1020
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1021
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1022
                               0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_W_MASK | DESC_A_MASK);
1026
        env->eflags &= ~env->fmask;
1027
        load_eflags(env->eflags, 0);
1028
        if (code64)
1029
            env->eip = env->lstar;
1030
        else
1031
            env->eip = env->cstar;
1032
    } else
1033
#endif
1034
    {
1035
        ECX = (uint32_t)(env->eip + next_eip_addend);
1036

    
1037
        cpu_x86_set_cpl(env, 0);
1038
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1039
                           0, 0xffffffff,
1040
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1041
                               DESC_S_MASK |
1042
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1043
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1044
                               0, 0xffffffff,
1045
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
                               DESC_S_MASK |
1047
                               DESC_W_MASK | DESC_A_MASK);
1048
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1049
        env->eip = (uint32_t)env->star;
1050
    }
1051
}
1052
#endif
1053

    
1054
void helper_sysret(int dflag)
1055
{
1056
    int cpl, selector;
1057

    
1058
    if (!(env->efer & MSR_EFER_SCE)) {
1059
        raise_exception_err(EXCP06_ILLOP, 0);
1060
    }
1061
    cpl = env->hflags & HF_CPL_MASK;
1062
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1063
        raise_exception_err(EXCP0D_GPF, 0);
1064
    }
1065
    selector = (env->star >> 48) & 0xffff;
1066
#ifdef TARGET_X86_64
1067
    if (env->hflags & HF_LMA_MASK) {
1068
        if (dflag == 2) {
1069
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1070
                                   0, 0xffffffff,
1071
                                   DESC_G_MASK | DESC_P_MASK |
1072
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1074
                                   DESC_L_MASK);
1075
            env->eip = ECX;
1076
        } else {
1077
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082
            env->eip = (uint32_t)ECX;
1083
        }
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1090
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1091
        cpu_x86_set_cpl(env, 3);
1092
    } else
1093
#endif
1094
    {
1095
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1096
                               0, 0xffffffff,
1097
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1098
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1100
        env->eip = (uint32_t)ECX;
1101
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_W_MASK | DESC_A_MASK);
1106
        env->eflags |= IF_MASK;
1107
        cpu_x86_set_cpl(env, 3);
1108
    }
1109
#ifdef USE_KQEMU
1110
    if (kqemu_is_ok(env)) {
1111
        if (env->hflags & HF_LMA_MASK)
1112
            CC_OP = CC_OP_EFLAGS;
1113
        env->exception_index = -1;
1114
        cpu_loop_exit();
1115
    }
1116
#endif
1117
}
1118

    
1119
/* real mode interrupt */
1120
static void do_interrupt_real(int intno, int is_int, int error_code,
1121
                              unsigned int next_eip)
1122
{
1123
    SegmentCache *dt;
1124
    target_ulong ptr, ssp;
1125
    int selector;
1126
    uint32_t offset, esp;
1127
    uint32_t old_cs, old_eip;
1128
    int svm_should_check = 1;
1129

    
1130
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1131
        next_eip = EIP;
1132
        svm_should_check = 0;
1133
    }
1134
    if (svm_should_check
1135
        && INTERCEPTEDl(_exceptions, 1 << intno)
1136
        && !is_int) {
1137
        raise_interrupt(intno, is_int, error_code, 0);
1138
    }
1139
    /* real mode (simpler !) */
1140
    dt = &env->idt;
1141
    if (intno * 4 + 3 > dt->limit)
1142
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1143
    ptr = dt->base + intno * 4;
1144
    offset = lduw_kernel(ptr);
1145
    selector = lduw_kernel(ptr + 2);
1146
    esp = ESP;
1147
    ssp = env->segs[R_SS].base;
1148
    if (is_int)
1149
        old_eip = next_eip;
1150
    else
1151
        old_eip = env->eip;
1152
    old_cs = env->segs[R_CS].selector;
1153
    /* XXX: use SS segment size ? */
1154
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1155
    PUSHW(ssp, esp, 0xffff, old_cs);
1156
    PUSHW(ssp, esp, 0xffff, old_eip);
1157

    
1158
    /* update processor state */
1159
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1160
    env->eip = offset;
1161
    env->segs[R_CS].selector = selector;
1162
    env->segs[R_CS].base = (selector << 4);
1163
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1164
}
1165

    
1166
/* fake user mode interrupt */
1167
void do_interrupt_user(int intno, int is_int, int error_code,
1168
                       target_ulong next_eip)
1169
{
1170
    SegmentCache *dt;
1171
    target_ulong ptr;
1172
    int dpl, cpl, shift;
1173
    uint32_t e2;
1174

    
1175
    dt = &env->idt;
1176
    if (env->hflags & HF_LMA_MASK) {
1177
        shift = 4;
1178
    } else {
1179
        shift = 3;
1180
    }
1181
    ptr = dt->base + (intno << shift);
1182
    e2 = ldl_kernel(ptr + 4);
1183

    
1184
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1185
    cpl = env->hflags & HF_CPL_MASK;
1186
    /* check privledge if software int */
1187
    if (is_int && dpl < cpl)
1188
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1189

    
1190
    /* Since we emulate only user space, we cannot do more than
1191
       exiting the emulation with the suitable exception and error
1192
       code */
1193
    if (is_int)
1194
        EIP = next_eip;
1195
}
1196

    
1197
/*
1198
 * Begin execution of an interruption. is_int is TRUE if coming from
1199
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1200
 * instruction. It is only relevant if is_int is TRUE.
1201
 */
1202
void do_interrupt(int intno, int is_int, int error_code,
1203
                  target_ulong next_eip, int is_hw)
1204
{
1205
    if (loglevel & CPU_LOG_INT) {
1206
        if ((env->cr[0] & CR0_PE_MASK)) {
1207
            static int count;
1208
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1209
                    count, intno, error_code, is_int,
1210
                    env->hflags & HF_CPL_MASK,
1211
                    env->segs[R_CS].selector, EIP,
1212
                    (int)env->segs[R_CS].base + EIP,
1213
                    env->segs[R_SS].selector, ESP);
1214
            if (intno == 0x0e) {
1215
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1216
            } else {
1217
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1218
            }
1219
            fprintf(logfile, "\n");
1220
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1221
#if 0
1222
            {
1223
                int i;
1224
                uint8_t *ptr;
1225
                fprintf(logfile, "       code=");
1226
                ptr = env->segs[R_CS].base + env->eip;
1227
                for(i = 0; i < 16; i++) {
1228
                    fprintf(logfile, " %02x", ldub(ptr + i));
1229
                }
1230
                fprintf(logfile, "\n");
1231
            }
1232
#endif
1233
            count++;
1234
        }
1235
    }
1236
    if (env->cr[0] & CR0_PE_MASK) {
1237
#if TARGET_X86_64
1238
        if (env->hflags & HF_LMA_MASK) {
1239
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1240
        } else
1241
#endif
1242
        {
1243
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1244
        }
1245
    } else {
1246
        do_interrupt_real(intno, is_int, error_code, next_eip);
1247
    }
1248
}
1249

    
1250
/*
1251
 * Check nested exceptions and change to double or triple fault if
1252
 * needed. It should only be called, if this is not an interrupt.
1253
 * Returns the new exception number.
1254
 */
1255
static int check_exception(int intno, int *error_code)
1256
{
1257
    int first_contributory = env->old_exception == 0 ||
1258
                              (env->old_exception >= 10 &&
1259
                               env->old_exception <= 13);
1260
    int second_contributory = intno == 0 ||
1261
                               (intno >= 10 && intno <= 13);
1262

    
1263
    if (loglevel & CPU_LOG_INT)
1264
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1265
                env->old_exception, intno);
1266

    
1267
    if (env->old_exception == EXCP08_DBLE)
1268
        cpu_abort(env, "triple fault");
1269

    
1270
    if ((first_contributory && second_contributory)
1271
        || (env->old_exception == EXCP0E_PAGE &&
1272
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1273
        intno = EXCP08_DBLE;
1274
        *error_code = 0;
1275
    }
1276

    
1277
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1278
        (intno == EXCP08_DBLE))
1279
        env->old_exception = intno;
1280

    
1281
    return intno;
1282
}
1283

    
1284
/*
1285
 * Signal an interruption. It is executed in the main CPU loop.
1286
 * is_int is TRUE if coming from the int instruction. next_eip is the
1287
 * EIP value AFTER the interrupt instruction. It is only relevant if
1288
 * is_int is TRUE.
1289
 */
1290
void raise_interrupt(int intno, int is_int, int error_code,
1291
                     int next_eip_addend)
1292
{
1293
    if (!is_int) {
1294
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1295
        intno = check_exception(intno, &error_code);
1296
    }
1297

    
1298
    env->exception_index = intno;
1299
    env->error_code = error_code;
1300
    env->exception_is_int = is_int;
1301
    env->exception_next_eip = env->eip + next_eip_addend;
1302
    cpu_loop_exit();
1303
}
1304

    
1305
/* same as raise_exception_err, but do not restore global registers */
1306
static void raise_exception_err_norestore(int exception_index, int error_code)
1307
{
1308
    exception_index = check_exception(exception_index, &error_code);
1309

    
1310
    env->exception_index = exception_index;
1311
    env->error_code = error_code;
1312
    env->exception_is_int = 0;
1313
    env->exception_next_eip = 0;
1314
    longjmp(env->jmp_env, 1);
1315
}
1316

    
1317
/* shortcuts to generate exceptions */
1318

    
1319
void (raise_exception_err)(int exception_index, int error_code)
1320
{
1321
    raise_interrupt(exception_index, 0, error_code, 0);
1322
}
1323

    
1324
void raise_exception(int exception_index)
1325
{
1326
    raise_interrupt(exception_index, 0, 0, 0);
1327
}
1328

    
1329
/* SMM support */
1330

    
1331
#if defined(CONFIG_USER_ONLY)
1332

    
1333
void do_smm_enter(void)
1334
{
1335
}
1336

    
1337
void helper_rsm(void)
1338
{
1339
}
1340

    
1341
#else
1342

    
1343
#ifdef TARGET_X86_64
1344
#define SMM_REVISION_ID 0x00020064
1345
#else
1346
#define SMM_REVISION_ID 0x00020000
1347
#endif
1348

    
1349
void do_smm_enter(void)
1350
{
1351
    target_ulong sm_state;
1352
    SegmentCache *dt;
1353
    int i, offset;
1354

    
1355
    if (loglevel & CPU_LOG_INT) {
1356
        fprintf(logfile, "SMM: enter\n");
1357
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1358
    }
1359

    
1360
    env->hflags |= HF_SMM_MASK;
1361
    cpu_smm_update(env);
1362

    
1363
    sm_state = env->smbase + 0x8000;
1364

    
1365
#ifdef TARGET_X86_64
1366
    for(i = 0; i < 6; i++) {
1367
        dt = &env->segs[i];
1368
        offset = 0x7e00 + i * 16;
1369
        stw_phys(sm_state + offset, dt->selector);
1370
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371
        stl_phys(sm_state + offset + 4, dt->limit);
1372
        stq_phys(sm_state + offset + 8, dt->base);
1373
    }
1374

    
1375
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1376
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1377

    
1378
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1380
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1382

    
1383
    stq_phys(sm_state + 0x7e88, env->idt.base);
1384
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1385

    
1386
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1387
    stq_phys(sm_state + 0x7e98, env->tr.base);
1388
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1389
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1390

    
1391
    stq_phys(sm_state + 0x7ed0, env->efer);
1392

    
1393
    stq_phys(sm_state + 0x7ff8, EAX);
1394
    stq_phys(sm_state + 0x7ff0, ECX);
1395
    stq_phys(sm_state + 0x7fe8, EDX);
1396
    stq_phys(sm_state + 0x7fe0, EBX);
1397
    stq_phys(sm_state + 0x7fd8, ESP);
1398
    stq_phys(sm_state + 0x7fd0, EBP);
1399
    stq_phys(sm_state + 0x7fc8, ESI);
1400
    stq_phys(sm_state + 0x7fc0, EDI);
1401
    for(i = 8; i < 16; i++)
1402
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403
    stq_phys(sm_state + 0x7f78, env->eip);
1404
    stl_phys(sm_state + 0x7f70, compute_eflags());
1405
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1406
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1407

    
1408
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1409
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1410
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1411

    
1412
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413
    stl_phys(sm_state + 0x7f00, env->smbase);
1414
#else
1415
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1418
    stl_phys(sm_state + 0x7ff0, env->eip);
1419
    stl_phys(sm_state + 0x7fec, EDI);
1420
    stl_phys(sm_state + 0x7fe8, ESI);
1421
    stl_phys(sm_state + 0x7fe4, EBP);
1422
    stl_phys(sm_state + 0x7fe0, ESP);
1423
    stl_phys(sm_state + 0x7fdc, EBX);
1424
    stl_phys(sm_state + 0x7fd8, EDX);
1425
    stl_phys(sm_state + 0x7fd4, ECX);
1426
    stl_phys(sm_state + 0x7fd0, EAX);
1427
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1429

    
1430
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431
    stl_phys(sm_state + 0x7f64, env->tr.base);
1432
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1433
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1434

    
1435
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1437
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1439

    
1440
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1441
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1442

    
1443
    stl_phys(sm_state + 0x7f58, env->idt.base);
1444
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1445

    
1446
    for(i = 0; i < 6; i++) {
1447
        dt = &env->segs[i];
1448
        if (i < 3)
1449
            offset = 0x7f84 + i * 12;
1450
        else
1451
            offset = 0x7f2c + (i - 3) * 12;
1452
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453
        stl_phys(sm_state + offset + 8, dt->base);
1454
        stl_phys(sm_state + offset + 4, dt->limit);
1455
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1456
    }
1457
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1458

    
1459
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460
    stl_phys(sm_state + 0x7ef8, env->smbase);
1461
#endif
1462
    /* init SMM cpu state */
1463

    
1464
#ifdef TARGET_X86_64
1465
    env->efer = 0;
1466
    env->hflags &= ~HF_LMA_MASK;
1467
#endif
1468
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1469
    env->eip = 0x00008000;
1470
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1471
                           0xffffffff, 0);
1472
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1473
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1474
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1475
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1476
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1477

    
1478
    cpu_x86_update_cr0(env,
1479
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1480
    cpu_x86_update_cr4(env, 0);
1481
    env->dr[7] = 0x00000400;
1482
    CC_OP = CC_OP_EFLAGS;
1483
}
1484

    
1485
void helper_rsm(void)
1486
{
1487
    target_ulong sm_state;
1488
    int i, offset;
1489
    uint32_t val;
1490

    
1491
    sm_state = env->smbase + 0x8000;
1492
#ifdef TARGET_X86_64
1493
    env->efer = ldq_phys(sm_state + 0x7ed0);
1494
    if (env->efer & MSR_EFER_LMA)
1495
        env->hflags |= HF_LMA_MASK;
1496
    else
1497
        env->hflags &= ~HF_LMA_MASK;
1498

    
1499
    for(i = 0; i < 6; i++) {
1500
        offset = 0x7e00 + i * 16;
1501
        cpu_x86_load_seg_cache(env, i,
1502
                               lduw_phys(sm_state + offset),
1503
                               ldq_phys(sm_state + offset + 8),
1504
                               ldl_phys(sm_state + offset + 4),
1505
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1506
    }
1507

    
1508
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1509
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1510

    
1511
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1512
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1513
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1514
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1515

    
1516
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1517
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1518

    
1519
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1520
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1521
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1522
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1523

    
1524
    EAX = ldq_phys(sm_state + 0x7ff8);
1525
    ECX = ldq_phys(sm_state + 0x7ff0);
1526
    EDX = ldq_phys(sm_state + 0x7fe8);
1527
    EBX = ldq_phys(sm_state + 0x7fe0);
1528
    ESP = ldq_phys(sm_state + 0x7fd8);
1529
    EBP = ldq_phys(sm_state + 0x7fd0);
1530
    ESI = ldq_phys(sm_state + 0x7fc8);
1531
    EDI = ldq_phys(sm_state + 0x7fc0);
1532
    for(i = 8; i < 16; i++)
1533
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1534
    env->eip = ldq_phys(sm_state + 0x7f78);
1535
    load_eflags(ldl_phys(sm_state + 0x7f70),
1536
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1538
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1539

    
1540
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1541
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1542
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1543

    
1544
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1545
    if (val & 0x20000) {
1546
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1547
    }
1548
#else
1549
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1550
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1551
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1552
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1553
    env->eip = ldl_phys(sm_state + 0x7ff0);
1554
    EDI = ldl_phys(sm_state + 0x7fec);
1555
    ESI = ldl_phys(sm_state + 0x7fe8);
1556
    EBP = ldl_phys(sm_state + 0x7fe4);
1557
    ESP = ldl_phys(sm_state + 0x7fe0);
1558
    EBX = ldl_phys(sm_state + 0x7fdc);
1559
    EDX = ldl_phys(sm_state + 0x7fd8);
1560
    ECX = ldl_phys(sm_state + 0x7fd4);
1561
    EAX = ldl_phys(sm_state + 0x7fd0);
1562
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1563
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1564

    
1565
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1566
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1567
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1568
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1569

    
1570
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1571
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1572
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1573
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1574

    
1575
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1576
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1577

    
1578
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1579
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1580

    
1581
    for(i = 0; i < 6; i++) {
1582
        if (i < 3)
1583
            offset = 0x7f84 + i * 12;
1584
        else
1585
            offset = 0x7f2c + (i - 3) * 12;
1586
        cpu_x86_load_seg_cache(env, i,
1587
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1588
                               ldl_phys(sm_state + offset + 8),
1589
                               ldl_phys(sm_state + offset + 4),
1590
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1591
    }
1592
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1593

    
1594
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1595
    if (val & 0x20000) {
1596
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1597
    }
1598
#endif
1599
    CC_OP = CC_OP_EFLAGS;
1600
    env->hflags &= ~HF_SMM_MASK;
1601
    cpu_smm_update(env);
1602

    
1603
    if (loglevel & CPU_LOG_INT) {
1604
        fprintf(logfile, "SMM: after RSM\n");
1605
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1606
    }
1607
}
1608

    
1609
#endif /* !CONFIG_USER_ONLY */
1610

    
1611

    
1612
/* division, flags are undefined */
1613

    
1614
void helper_divb_AL(target_ulong t0)
1615
{
1616
    unsigned int num, den, q, r;
1617

    
1618
    num = (EAX & 0xffff);
1619
    den = (t0 & 0xff);
1620
    if (den == 0) {
1621
        raise_exception(EXCP00_DIVZ);
1622
    }
1623
    q = (num / den);
1624
    if (q > 0xff)
1625
        raise_exception(EXCP00_DIVZ);
1626
    q &= 0xff;
1627
    r = (num % den) & 0xff;
1628
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1629
}
1630

    
1631
void helper_idivb_AL(target_ulong t0)
1632
{
1633
    int num, den, q, r;
1634

    
1635
    num = (int16_t)EAX;
1636
    den = (int8_t)t0;
1637
    if (den == 0) {
1638
        raise_exception(EXCP00_DIVZ);
1639
    }
1640
    q = (num / den);
1641
    if (q != (int8_t)q)
1642
        raise_exception(EXCP00_DIVZ);
1643
    q &= 0xff;
1644
    r = (num % den) & 0xff;
1645
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1646
}
1647

    
1648
void helper_divw_AX(target_ulong t0)
1649
{
1650
    unsigned int num, den, q, r;
1651

    
1652
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1653
    den = (t0 & 0xffff);
1654
    if (den == 0) {
1655
        raise_exception(EXCP00_DIVZ);
1656
    }
1657
    q = (num / den);
1658
    if (q > 0xffff)
1659
        raise_exception(EXCP00_DIVZ);
1660
    q &= 0xffff;
1661
    r = (num % den) & 0xffff;
1662
    EAX = (EAX & ~0xffff) | q;
1663
    EDX = (EDX & ~0xffff) | r;
1664
}
1665

    
1666
void helper_idivw_AX(target_ulong t0)
1667
{
1668
    int num, den, q, r;
1669

    
1670
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1671
    den = (int16_t)t0;
1672
    if (den == 0) {
1673
        raise_exception(EXCP00_DIVZ);
1674
    }
1675
    q = (num / den);
1676
    if (q != (int16_t)q)
1677
        raise_exception(EXCP00_DIVZ);
1678
    q &= 0xffff;
1679
    r = (num % den) & 0xffff;
1680
    EAX = (EAX & ~0xffff) | q;
1681
    EDX = (EDX & ~0xffff) | r;
1682
}
1683

    
1684
void helper_divl_EAX(target_ulong t0)
1685
{
1686
    unsigned int den, r;
1687
    uint64_t num, q;
1688

    
1689
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1690
    den = t0;
1691
    if (den == 0) {
1692
        raise_exception(EXCP00_DIVZ);
1693
    }
1694
    q = (num / den);
1695
    r = (num % den);
1696
    if (q > 0xffffffff)
1697
        raise_exception(EXCP00_DIVZ);
1698
    EAX = (uint32_t)q;
1699
    EDX = (uint32_t)r;
1700
}
1701

    
1702
void helper_idivl_EAX(target_ulong t0)
1703
{
1704
    int den, r;
1705
    int64_t num, q;
1706

    
1707
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708
    den = t0;
1709
    if (den == 0) {
1710
        raise_exception(EXCP00_DIVZ);
1711
    }
1712
    q = (num / den);
1713
    r = (num % den);
1714
    if (q != (int32_t)q)
1715
        raise_exception(EXCP00_DIVZ);
1716
    EAX = (uint32_t)q;
1717
    EDX = (uint32_t)r;
1718
}
1719

    
1720
/* bcd */
1721

    
1722
/* XXX: exception */
1723
void helper_aam(int base)
1724
{
1725
    int al, ah;
1726
    al = EAX & 0xff;
1727
    ah = al / base;
1728
    al = al % base;
1729
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1730
    CC_DST = al;
1731
}
1732

    
1733
void helper_aad(int base)
1734
{
1735
    int al, ah;
1736
    al = EAX & 0xff;
1737
    ah = (EAX >> 8) & 0xff;
1738
    al = ((ah * base) + al) & 0xff;
1739
    EAX = (EAX & ~0xffff) | al;
1740
    CC_DST = al;
1741
}
1742

    
1743
void helper_aaa(void)
1744
{
1745
    int icarry;
1746
    int al, ah, af;
1747
    int eflags;
1748

    
1749
    eflags = cc_table[CC_OP].compute_all();
1750
    af = eflags & CC_A;
1751
    al = EAX & 0xff;
1752
    ah = (EAX >> 8) & 0xff;
1753

    
1754
    icarry = (al > 0xf9);
1755
    if (((al & 0x0f) > 9 ) || af) {
1756
        al = (al + 6) & 0x0f;
1757
        ah = (ah + 1 + icarry) & 0xff;
1758
        eflags |= CC_C | CC_A;
1759
    } else {
1760
        eflags &= ~(CC_C | CC_A);
1761
        al &= 0x0f;
1762
    }
1763
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1764
    CC_SRC = eflags;
1765
    FORCE_RET();
1766
}
1767

    
1768
void helper_aas(void)
1769
{
1770
    int icarry;
1771
    int al, ah, af;
1772
    int eflags;
1773

    
1774
    eflags = cc_table[CC_OP].compute_all();
1775
    af = eflags & CC_A;
1776
    al = EAX & 0xff;
1777
    ah = (EAX >> 8) & 0xff;
1778

    
1779
    icarry = (al < 6);
1780
    if (((al & 0x0f) > 9 ) || af) {
1781
        al = (al - 6) & 0x0f;
1782
        ah = (ah - 1 - icarry) & 0xff;
1783
        eflags |= CC_C | CC_A;
1784
    } else {
1785
        eflags &= ~(CC_C | CC_A);
1786
        al &= 0x0f;
1787
    }
1788
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1789
    CC_SRC = eflags;
1790
    FORCE_RET();
1791
}
1792

    
1793
void helper_daa(void)
1794
{
1795
    int al, af, cf;
1796
    int eflags;
1797

    
1798
    eflags = cc_table[CC_OP].compute_all();
1799
    cf = eflags & CC_C;
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802

    
1803
    eflags = 0;
1804
    if (((al & 0x0f) > 9 ) || af) {
1805
        al = (al + 6) & 0xff;
1806
        eflags |= CC_A;
1807
    }
1808
    if ((al > 0x9f) || cf) {
1809
        al = (al + 0x60) & 0xff;
1810
        eflags |= CC_C;
1811
    }
1812
    EAX = (EAX & ~0xff) | al;
1813
    /* well, speed is not an issue here, so we compute the flags by hand */
1814
    eflags |= (al == 0) << 6; /* zf */
1815
    eflags |= parity_table[al]; /* pf */
1816
    eflags |= (al & 0x80); /* sf */
1817
    CC_SRC = eflags;
1818
    FORCE_RET();
1819
}
1820

    
1821
void helper_das(void)
1822
{
1823
    int al, al1, af, cf;
1824
    int eflags;
1825

    
1826
    eflags = cc_table[CC_OP].compute_all();
1827
    cf = eflags & CC_C;
1828
    af = eflags & CC_A;
1829
    al = EAX & 0xff;
1830

    
1831
    eflags = 0;
1832
    al1 = al;
1833
    if (((al & 0x0f) > 9 ) || af) {
1834
        eflags |= CC_A;
1835
        if (al < 6 || cf)
1836
            eflags |= CC_C;
1837
        al = (al - 6) & 0xff;
1838
    }
1839
    if ((al1 > 0x99) || cf) {
1840
        al = (al - 0x60) & 0xff;
1841
        eflags |= CC_C;
1842
    }
1843
    EAX = (EAX & ~0xff) | al;
1844
    /* well, speed is not an issue here, so we compute the flags by hand */
1845
    eflags |= (al == 0) << 6; /* zf */
1846
    eflags |= parity_table[al]; /* pf */
1847
    eflags |= (al & 0x80); /* sf */
1848
    CC_SRC = eflags;
1849
    FORCE_RET();
1850
}
1851

    
1852
void helper_cmpxchg8b(target_ulong a0)
1853
{
1854
    uint64_t d;
1855
    int eflags;
1856

    
1857
    eflags = cc_table[CC_OP].compute_all();
1858
    d = ldq(a0);
1859
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1860
        stq(a0, ((uint64_t)ECX << 32) | EBX);
1861
        eflags |= CC_Z;
1862
    } else {
1863
        EDX = (uint32_t)(d >> 32);
1864
        EAX = (uint32_t)d;
1865
        eflags &= ~CC_Z;
1866
    }
1867
    CC_SRC = eflags;
1868
}
1869

    
1870
void helper_single_step(void)
1871
{
1872
    env->dr[6] |= 0x4000;
1873
    raise_exception(EXCP01_SSTP);
1874
}
1875

    
1876
void helper_cpuid(void)
1877
{
1878
    uint32_t index;
1879
    index = (uint32_t)EAX;
1880

    
1881
    /* test if maximum index reached */
1882
    if (index & 0x80000000) {
1883
        if (index > env->cpuid_xlevel)
1884
            index = env->cpuid_level;
1885
    } else {
1886
        if (index > env->cpuid_level)
1887
            index = env->cpuid_level;
1888
    }
1889

    
1890
    switch(index) {
1891
    case 0:
1892
        EAX = env->cpuid_level;
1893
        EBX = env->cpuid_vendor1;
1894
        EDX = env->cpuid_vendor2;
1895
        ECX = env->cpuid_vendor3;
1896
        break;
1897
    case 1:
1898
        EAX = env->cpuid_version;
1899
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1900
        ECX = env->cpuid_ext_features;
1901
        EDX = env->cpuid_features;
1902
        break;
1903
    case 2:
1904
        /* cache info: needed for Pentium Pro compatibility */
1905
        EAX = 1;
1906
        EBX = 0;
1907
        ECX = 0;
1908
        EDX = 0x2c307d;
1909
        break;
1910
    case 0x80000000:
1911
        EAX = env->cpuid_xlevel;
1912
        EBX = env->cpuid_vendor1;
1913
        EDX = env->cpuid_vendor2;
1914
        ECX = env->cpuid_vendor3;
1915
        break;
1916
    case 0x80000001:
1917
        EAX = env->cpuid_features;
1918
        EBX = 0;
1919
        ECX = env->cpuid_ext3_features;
1920
        EDX = env->cpuid_ext2_features;
1921
        break;
1922
    case 0x80000002:
1923
    case 0x80000003:
1924
    case 0x80000004:
1925
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1926
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1927
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1928
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1929
        break;
1930
    case 0x80000005:
1931
        /* cache info (L1 cache) */
1932
        EAX = 0x01ff01ff;
1933
        EBX = 0x01ff01ff;
1934
        ECX = 0x40020140;
1935
        EDX = 0x40020140;
1936
        break;
1937
    case 0x80000006:
1938
        /* cache info (L2 cache) */
1939
        EAX = 0;
1940
        EBX = 0x42004200;
1941
        ECX = 0x02008140;
1942
        EDX = 0;
1943
        break;
1944
    case 0x80000008:
1945
        /* virtual & phys address size in low 2 bytes. */
1946
/* XXX: This value must match the one used in the MMU code. */ 
1947
#if defined(TARGET_X86_64)
1948
#  if defined(USE_KQEMU)
1949
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1950
#  else
1951
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1952
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1953
#  endif
1954
#else
1955
# if defined(USE_KQEMU)
1956
        EAX = 0x00000020;        /* 32 bits physical */
1957
#  else
1958
        EAX = 0x00000024;        /* 36 bits physical */
1959
#  endif
1960
#endif
1961
        EBX = 0;
1962
        ECX = 0;
1963
        EDX = 0;
1964
        break;
1965
    case 0x8000000A:
1966
        EAX = 0x00000001;
1967
        EBX = 0;
1968
        ECX = 0;
1969
        EDX = 0;
1970
        break;
1971
    default:
1972
        /* reserved values: zero */
1973
        EAX = 0;
1974
        EBX = 0;
1975
        ECX = 0;
1976
        EDX = 0;
1977
        break;
1978
    }
1979
}
1980

    
1981
void helper_enter_level(int level, int data32, target_ulong t1)
1982
{
1983
    target_ulong ssp;
1984
    uint32_t esp_mask, esp, ebp;
1985

    
1986
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1987
    ssp = env->segs[R_SS].base;
1988
    ebp = EBP;
1989
    esp = ESP;
1990
    if (data32) {
1991
        /* 32 bit */
1992
        esp -= 4;
1993
        while (--level) {
1994
            esp -= 4;
1995
            ebp -= 4;
1996
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1997
        }
1998
        esp -= 4;
1999
        stl(ssp + (esp & esp_mask), t1);
2000
    } else {
2001
        /* 16 bit */
2002
        esp -= 2;
2003
        while (--level) {
2004
            esp -= 2;
2005
            ebp -= 2;
2006
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2007
        }
2008
        esp -= 2;
2009
        stw(ssp + (esp & esp_mask), t1);
2010
    }
2011
}
2012

    
2013
#ifdef TARGET_X86_64
2014
void helper_enter64_level(int level, int data64, target_ulong t1)
2015
{
2016
    target_ulong esp, ebp;
2017
    ebp = EBP;
2018
    esp = ESP;
2019

    
2020
    if (data64) {
2021
        /* 64 bit */
2022
        esp -= 8;
2023
        while (--level) {
2024
            esp -= 8;
2025
            ebp -= 8;
2026
            stq(esp, ldq(ebp));
2027
        }
2028
        esp -= 8;
2029
        stq(esp, t1);
2030
    } else {
2031
        /* 16 bit */
2032
        esp -= 2;
2033
        while (--level) {
2034
            esp -= 2;
2035
            ebp -= 2;
2036
            stw(esp, lduw(ebp));
2037
        }
2038
        esp -= 2;
2039
        stw(esp, t1);
2040
    }
2041
}
2042
#endif
2043

    
2044
void helper_lldt(int selector)
2045
{
2046
    SegmentCache *dt;
2047
    uint32_t e1, e2;
2048
    int index, entry_limit;
2049
    target_ulong ptr;
2050

    
2051
    selector &= 0xffff;
2052
    if ((selector & 0xfffc) == 0) {
2053
        /* XXX: NULL selector case: invalid LDT */
2054
        env->ldt.base = 0;
2055
        env->ldt.limit = 0;
2056
    } else {
2057
        if (selector & 0x4)
2058
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2059
        dt = &env->gdt;
2060
        index = selector & ~7;
2061
#ifdef TARGET_X86_64
2062
        if (env->hflags & HF_LMA_MASK)
2063
            entry_limit = 15;
2064
        else
2065
#endif
2066
            entry_limit = 7;
2067
        if ((index + entry_limit) > dt->limit)
2068
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2069
        ptr = dt->base + index;
2070
        e1 = ldl_kernel(ptr);
2071
        e2 = ldl_kernel(ptr + 4);
2072
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2073
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2074
        if (!(e2 & DESC_P_MASK))
2075
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2076
#ifdef TARGET_X86_64
2077
        if (env->hflags & HF_LMA_MASK) {
2078
            uint32_t e3;
2079
            e3 = ldl_kernel(ptr + 8);
2080
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2081
            env->ldt.base |= (target_ulong)e3 << 32;
2082
        } else
2083
#endif
2084
        {
2085
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2086
        }
2087
    }
2088
    env->ldt.selector = selector;
2089
}
2090

    
2091
void helper_ltr(int selector)
2092
{
2093
    SegmentCache *dt;
2094
    uint32_t e1, e2;
2095
    int index, type, entry_limit;
2096
    target_ulong ptr;
2097

    
2098
    selector &= 0xffff;
2099
    if ((selector & 0xfffc) == 0) {
2100
        /* NULL selector case: invalid TR */
2101
        env->tr.base = 0;
2102
        env->tr.limit = 0;
2103
        env->tr.flags = 0;
2104
    } else {
2105
        if (selector & 0x4)
2106
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2107
        dt = &env->gdt;
2108
        index = selector & ~7;
2109
#ifdef TARGET_X86_64
2110
        if (env->hflags & HF_LMA_MASK)
2111
            entry_limit = 15;
2112
        else
2113
#endif
2114
            entry_limit = 7;
2115
        if ((index + entry_limit) > dt->limit)
2116
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2117
        ptr = dt->base + index;
2118
        e1 = ldl_kernel(ptr);
2119
        e2 = ldl_kernel(ptr + 4);
2120
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2121
        if ((e2 & DESC_S_MASK) ||
2122
            (type != 1 && type != 9))
2123
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2124
        if (!(e2 & DESC_P_MASK))
2125
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2126
#ifdef TARGET_X86_64
2127
        if (env->hflags & HF_LMA_MASK) {
2128
            uint32_t e3, e4;
2129
            e3 = ldl_kernel(ptr + 8);
2130
            e4 = ldl_kernel(ptr + 12);
2131
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2132
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2134
            env->tr.base |= (target_ulong)e3 << 32;
2135
        } else
2136
#endif
2137
        {
2138
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2139
        }
2140
        e2 |= DESC_TSS_BUSY_MASK;
2141
        stl_kernel(ptr + 4, e2);
2142
    }
2143
    env->tr.selector = selector;
2144
}
2145

    
2146
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2147
void helper_load_seg(int seg_reg, int selector)
2148
{
2149
    uint32_t e1, e2;
2150
    int cpl, dpl, rpl;
2151
    SegmentCache *dt;
2152
    int index;
2153
    target_ulong ptr;
2154

    
2155
    selector &= 0xffff;
2156
    cpl = env->hflags & HF_CPL_MASK;
2157
    if ((selector & 0xfffc) == 0) {
2158
        /* null selector case */
2159
        if (seg_reg == R_SS
2160
#ifdef TARGET_X86_64
2161
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2162
#endif
2163
            )
2164
            raise_exception_err(EXCP0D_GPF, 0);
2165
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2166
    } else {
2167

    
2168
        if (selector & 0x4)
2169
            dt = &env->ldt;
2170
        else
2171
            dt = &env->gdt;
2172
        index = selector & ~7;
2173
        if ((index + 7) > dt->limit)
2174
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2175
        ptr = dt->base + index;
2176
        e1 = ldl_kernel(ptr);
2177
        e2 = ldl_kernel(ptr + 4);
2178

    
2179
        if (!(e2 & DESC_S_MASK))
2180
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2181
        rpl = selector & 3;
2182
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2183
        if (seg_reg == R_SS) {
2184
            /* must be writable segment */
2185
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2186
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2187
            if (rpl != cpl || dpl != cpl)
2188
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2189
        } else {
2190
            /* must be readable segment */
2191
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2192
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193

    
2194
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2195
                /* if not conforming code, test rights */
2196
                if (dpl < cpl || dpl < rpl)
2197
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
            }
2199
        }
2200

    
2201
        if (!(e2 & DESC_P_MASK)) {
2202
            if (seg_reg == R_SS)
2203
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2204
            else
2205
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2206
        }
2207

    
2208
        /* set the access bit if not already set */
2209
        if (!(e2 & DESC_A_MASK)) {
2210
            e2 |= DESC_A_MASK;
2211
            stl_kernel(ptr + 4, e2);
2212
        }
2213

    
2214
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2215
                       get_seg_base(e1, e2),
2216
                       get_seg_limit(e1, e2),
2217
                       e2);
2218
#if 0
2219
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2220
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2221
#endif
2222
    }
2223
}
2224

    
2225
/* protected mode jump */
2226
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2227
                           int next_eip_addend)
2228
{
2229
    int gate_cs, type;
2230
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2231
    target_ulong next_eip;
2232

    
2233
    if ((new_cs & 0xfffc) == 0)
2234
        raise_exception_err(EXCP0D_GPF, 0);
2235
    if (load_segment(&e1, &e2, new_cs) != 0)
2236
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
    cpl = env->hflags & HF_CPL_MASK;
2238
    if (e2 & DESC_S_MASK) {
2239
        if (!(e2 & DESC_CS_MASK))
2240
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2241
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2242
        if (e2 & DESC_C_MASK) {
2243
            /* conforming code segment */
2244
            if (dpl > cpl)
2245
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246
        } else {
2247
            /* non conforming code segment */
2248
            rpl = new_cs & 3;
2249
            if (rpl > cpl)
2250
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2251
            if (dpl != cpl)
2252
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253
        }
2254
        if (!(e2 & DESC_P_MASK))
2255
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256
        limit = get_seg_limit(e1, e2);
2257
        if (new_eip > limit &&
2258
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2259
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2261
                       get_seg_base(e1, e2), limit, e2);
2262
        EIP = new_eip;
2263
    } else {
2264
        /* jump to call or task gate */
2265
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2266
        rpl = new_cs & 3;
2267
        cpl = env->hflags & HF_CPL_MASK;
2268
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2269
        switch(type) {
2270
        case 1: /* 286 TSS */
2271
        case 9: /* 386 TSS */
2272
        case 5: /* task gate */
2273
            if (dpl < cpl || dpl < rpl)
2274
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2275
            next_eip = env->eip + next_eip_addend;
2276
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2277
            CC_OP = CC_OP_EFLAGS;
2278
            break;
2279
        case 4: /* 286 call gate */
2280
        case 12: /* 386 call gate */
2281
            if ((dpl < cpl) || (dpl < rpl))
2282
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2283
            if (!(e2 & DESC_P_MASK))
2284
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2285
            gate_cs = e1 >> 16;
2286
            new_eip = (e1 & 0xffff);
2287
            if (type == 12)
2288
                new_eip |= (e2 & 0xffff0000);
2289
            if (load_segment(&e1, &e2, gate_cs) != 0)
2290
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2291
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2292
            /* must be code segment */
2293
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2294
                 (DESC_S_MASK | DESC_CS_MASK)))
2295
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2296
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2297
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2298
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2299
            if (!(e2 & DESC_P_MASK))
2300
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2301
            limit = get_seg_limit(e1, e2);
2302
            if (new_eip > limit)
2303
                raise_exception_err(EXCP0D_GPF, 0);
2304
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2305
                                   get_seg_base(e1, e2), limit, e2);
2306
            EIP = new_eip;
2307
            break;
2308
        default:
2309
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310
            break;
2311
        }
2312
    }
2313
}
2314

    
2315
/* real mode call */
2316
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2317
                       int shift, int next_eip)
2318
{
2319
    int new_eip;
2320
    uint32_t esp, esp_mask;
2321
    target_ulong ssp;
2322

    
2323
    new_eip = new_eip1;
2324
    esp = ESP;
2325
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2326
    ssp = env->segs[R_SS].base;
2327
    if (shift) {
2328
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2329
        PUSHL(ssp, esp, esp_mask, next_eip);
2330
    } else {
2331
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2332
        PUSHW(ssp, esp, esp_mask, next_eip);
2333
    }
2334

    
2335
    SET_ESP(esp, esp_mask);
2336
    env->eip = new_eip;
2337
    env->segs[R_CS].selector = new_cs;
2338
    env->segs[R_CS].base = (new_cs << 4);
2339
}
2340

    
2341
/* protected mode call */
2342
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2343
                            int shift, int next_eip_addend)
2344
{
2345
    int new_stack, i;
2346
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2347
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2348
    uint32_t val, limit, old_sp_mask;
2349
    target_ulong ssp, old_ssp, next_eip;
2350

    
2351
    next_eip = env->eip + next_eip_addend;
2352
#ifdef DEBUG_PCALL
2353
    if (loglevel & CPU_LOG_PCALL) {
2354
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2355
                new_cs, (uint32_t)new_eip, shift);
2356
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2357
    }
2358
#endif
2359
    if ((new_cs & 0xfffc) == 0)
2360
        raise_exception_err(EXCP0D_GPF, 0);
2361
    if (load_segment(&e1, &e2, new_cs) != 0)
2362
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2363
    cpl = env->hflags & HF_CPL_MASK;
2364
#ifdef DEBUG_PCALL
2365
    if (loglevel & CPU_LOG_PCALL) {
2366
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2367
    }
2368
#endif
2369
    if (e2 & DESC_S_MASK) {
2370
        if (!(e2 & DESC_CS_MASK))
2371
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2372
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2373
        if (e2 & DESC_C_MASK) {
2374
            /* conforming code segment */
2375
            if (dpl > cpl)
2376
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377
        } else {
2378
            /* non conforming code segment */
2379
            rpl = new_cs & 3;
2380
            if (rpl > cpl)
2381
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382
            if (dpl != cpl)
2383
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384
        }
2385
        if (!(e2 & DESC_P_MASK))
2386
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2387

    
2388
#ifdef TARGET_X86_64
2389
        /* XXX: check 16/32 bit cases in long mode */
2390
        if (shift == 2) {
2391
            target_ulong rsp;
2392
            /* 64 bit case */
2393
            rsp = ESP;
2394
            PUSHQ(rsp, env->segs[R_CS].selector);
2395
            PUSHQ(rsp, next_eip);
2396
            /* from this point, not restartable */
2397
            ESP = rsp;
2398
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2399
                                   get_seg_base(e1, e2),
2400
                                   get_seg_limit(e1, e2), e2);
2401
            EIP = new_eip;
2402
        } else
2403
#endif
2404
        {
2405
            sp = ESP;
2406
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2407
            ssp = env->segs[R_SS].base;
2408
            if (shift) {
2409
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2410
                PUSHL(ssp, sp, sp_mask, next_eip);
2411
            } else {
2412
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2413
                PUSHW(ssp, sp, sp_mask, next_eip);
2414
            }
2415

    
2416
            limit = get_seg_limit(e1, e2);
2417
            if (new_eip > limit)
2418
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2419
            /* from this point, not restartable */
2420
            SET_ESP(sp, sp_mask);
2421
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2422
                                   get_seg_base(e1, e2), limit, e2);
2423
            EIP = new_eip;
2424
        }
2425
    } else {
2426
        /* check gate type */
2427
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2428
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2429
        rpl = new_cs & 3;
2430
        switch(type) {
2431
        case 1: /* available 286 TSS */
2432
        case 9: /* available 386 TSS */
2433
        case 5: /* task gate */
2434
            if (dpl < cpl || dpl < rpl)
2435
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2436
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2437
            CC_OP = CC_OP_EFLAGS;
2438
            return;
2439
        case 4: /* 286 call gate */
2440
        case 12: /* 386 call gate */
2441
            break;
2442
        default:
2443
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2444
            break;
2445
        }
2446
        shift = type >> 3;
2447

    
2448
        if (dpl < cpl || dpl < rpl)
2449
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2450
        /* check valid bit */
2451
        if (!(e2 & DESC_P_MASK))
2452
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2453
        selector = e1 >> 16;
2454
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2455
        param_count = e2 & 0x1f;
2456
        if ((selector & 0xfffc) == 0)
2457
            raise_exception_err(EXCP0D_GPF, 0);
2458

    
2459
        if (load_segment(&e1, &e2, selector) != 0)
2460
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2461
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2462
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2464
        if (dpl > cpl)
2465
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2466
        if (!(e2 & DESC_P_MASK))
2467
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2468

    
2469
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2470
            /* to inner privilege */
2471
            get_ss_esp_from_tss(&ss, &sp, dpl);
2472
#ifdef DEBUG_PCALL
2473
            if (loglevel & CPU_LOG_PCALL)
2474
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2475
                        ss, sp, param_count, ESP);
2476
#endif
2477
            if ((ss & 0xfffc) == 0)
2478
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2479
            if ((ss & 3) != dpl)
2480
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2481
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2482
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2483
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2484
            if (ss_dpl != dpl)
2485
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2486
            if (!(ss_e2 & DESC_S_MASK) ||
2487
                (ss_e2 & DESC_CS_MASK) ||
2488
                !(ss_e2 & DESC_W_MASK))
2489
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2490
            if (!(ss_e2 & DESC_P_MASK))
2491
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492

    
2493
            //            push_size = ((param_count * 2) + 8) << shift;
2494

    
2495
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2496
            old_ssp = env->segs[R_SS].base;
2497

    
2498
            sp_mask = get_sp_mask(ss_e2);
2499
            ssp = get_seg_base(ss_e1, ss_e2);
2500
            if (shift) {
2501
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2502
                PUSHL(ssp, sp, sp_mask, ESP);
2503
                for(i = param_count - 1; i >= 0; i--) {
2504
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2505
                    PUSHL(ssp, sp, sp_mask, val);
2506
                }
2507
            } else {
2508
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2509
                PUSHW(ssp, sp, sp_mask, ESP);
2510
                for(i = param_count - 1; i >= 0; i--) {
2511
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2512
                    PUSHW(ssp, sp, sp_mask, val);
2513
                }
2514
            }
2515
            new_stack = 1;
2516
        } else {
2517
            /* to same privilege */
2518
            sp = ESP;
2519
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2520
            ssp = env->segs[R_SS].base;
2521
            //            push_size = (4 << shift);
2522
            new_stack = 0;
2523
        }
2524

    
2525
        if (shift) {
2526
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2527
            PUSHL(ssp, sp, sp_mask, next_eip);
2528
        } else {
2529
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2530
            PUSHW(ssp, sp, sp_mask, next_eip);
2531
        }
2532

    
2533
        /* from this point, not restartable */
2534

    
2535
        if (new_stack) {
2536
            ss = (ss & ~3) | dpl;
2537
            cpu_x86_load_seg_cache(env, R_SS, ss,
2538
                                   ssp,
2539
                                   get_seg_limit(ss_e1, ss_e2),
2540
                                   ss_e2);
2541
        }
2542

    
2543
        selector = (selector & ~3) | dpl;
2544
        cpu_x86_load_seg_cache(env, R_CS, selector,
2545
                       get_seg_base(e1, e2),
2546
                       get_seg_limit(e1, e2),
2547
                       e2);
2548
        cpu_x86_set_cpl(env, dpl);
2549
        SET_ESP(sp, sp_mask);
2550
        EIP = offset;
2551
    }
2552
#ifdef USE_KQEMU
2553
    if (kqemu_is_ok(env)) {
2554
        env->exception_index = -1;
2555
        cpu_loop_exit();
2556
    }
2557
#endif
2558
}
2559

    
2560
/* real and vm86 mode iret */
2561
void helper_iret_real(int shift)
2562
{
2563
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2564
    target_ulong ssp;
2565
    int eflags_mask;
2566

    
2567
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2568
    sp = ESP;
2569
    ssp = env->segs[R_SS].base;
2570
    if (shift == 1) {
2571
        /* 32 bits */
2572
        POPL(ssp, sp, sp_mask, new_eip);
2573
        POPL(ssp, sp, sp_mask, new_cs);
2574
        new_cs &= 0xffff;
2575
        POPL(ssp, sp, sp_mask, new_eflags);
2576
    } else {
2577
        /* 16 bits */
2578
        POPW(ssp, sp, sp_mask, new_eip);
2579
        POPW(ssp, sp, sp_mask, new_cs);
2580
        POPW(ssp, sp, sp_mask, new_eflags);
2581
    }
2582
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2583
    load_seg_vm(R_CS, new_cs);
2584
    env->eip = new_eip;
2585
    if (env->eflags & VM_MASK)
2586
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2587
    else
2588
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2589
    if (shift == 0)
2590
        eflags_mask &= 0xffff;
2591
    load_eflags(new_eflags, eflags_mask);
2592
    env->hflags &= ~HF_NMI_MASK;
2593
}
2594

    
2595
static inline void validate_seg(int seg_reg, int cpl)
2596
{
2597
    int dpl;
2598
    uint32_t e2;
2599

    
2600
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2601
       they may still contain a valid base. I would be interested to
2602
       know how a real x86_64 CPU behaves */
2603
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2604
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2605
        return;
2606

    
2607
    e2 = env->segs[seg_reg].flags;
2608
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2609
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2610
        /* data or non conforming code segment */
2611
        if (dpl < cpl) {
2612
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2613
        }
2614
    }
2615
}
2616

    
2617
/* protected mode iret */
2618
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2619
{
2620
    uint32_t new_cs, new_eflags, new_ss;
2621
    uint32_t new_es, new_ds, new_fs, new_gs;
2622
    uint32_t e1, e2, ss_e1, ss_e2;
2623
    int cpl, dpl, rpl, eflags_mask, iopl;
2624
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2625

    
2626
#ifdef TARGET_X86_64
2627
    if (shift == 2)
2628
        sp_mask = -1;
2629
    else
2630
#endif
2631
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2632
    sp = ESP;
2633
    ssp = env->segs[R_SS].base;
2634
    new_eflags = 0; /* avoid warning */
2635
#ifdef TARGET_X86_64
2636
    if (shift == 2) {
2637
        POPQ(sp, new_eip);
2638
        POPQ(sp, new_cs);
2639
        new_cs &= 0xffff;
2640
        if (is_iret) {
2641
            POPQ(sp, new_eflags);
2642
        }
2643
    } else
2644
#endif
2645
    if (shift == 1) {
2646
        /* 32 bits */
2647
        POPL(ssp, sp, sp_mask, new_eip);
2648
        POPL(ssp, sp, sp_mask, new_cs);
2649
        new_cs &= 0xffff;
2650
        if (is_iret) {
2651
            POPL(ssp, sp, sp_mask, new_eflags);
2652
            if (new_eflags & VM_MASK)
2653
                goto return_to_vm86;
2654
        }
2655
    } else {
2656
        /* 16 bits */
2657
        POPW(ssp, sp, sp_mask, new_eip);
2658
        POPW(ssp, sp, sp_mask, new_cs);
2659
        if (is_iret)
2660
            POPW(ssp, sp, sp_mask, new_eflags);
2661
    }
2662
#ifdef DEBUG_PCALL
2663
    if (loglevel & CPU_LOG_PCALL) {
2664
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2665
                new_cs, new_eip, shift, addend);
2666
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2667
    }
2668
#endif
2669
    if ((new_cs & 0xfffc) == 0)
2670
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2671
    if (load_segment(&e1, &e2, new_cs) != 0)
2672
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2673
    if (!(e2 & DESC_S_MASK) ||
2674
        !(e2 & DESC_CS_MASK))
2675
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2676
    cpl = env->hflags & HF_CPL_MASK;
2677
    rpl = new_cs & 3;
2678
    if (rpl < cpl)
2679
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2680
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2681
    if (e2 & DESC_C_MASK) {
2682
        if (dpl > rpl)
2683
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2684
    } else {
2685
        if (dpl != rpl)
2686
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2687
    }
2688
    if (!(e2 & DESC_P_MASK))
2689
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2690

    
2691
    sp += addend;
2692
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2693
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2694
        /* return to same priledge level */
2695
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2696
                       get_seg_base(e1, e2),
2697
                       get_seg_limit(e1, e2),
2698
                       e2);
2699
    } else {
2700
        /* return to different privilege level */
2701
#ifdef TARGET_X86_64
2702
        if (shift == 2) {
2703
            POPQ(sp, new_esp);
2704
            POPQ(sp, new_ss);
2705
            new_ss &= 0xffff;
2706
        } else
2707
#endif
2708
        if (shift == 1) {
2709
            /* 32 bits */
2710
            POPL(ssp, sp, sp_mask, new_esp);
2711
            POPL(ssp, sp, sp_mask, new_ss);
2712
            new_ss &= 0xffff;
2713
        } else {
2714
            /* 16 bits */
2715
            POPW(ssp, sp, sp_mask, new_esp);
2716
            POPW(ssp, sp, sp_mask, new_ss);
2717
        }
2718
#ifdef DEBUG_PCALL
2719
        if (loglevel & CPU_LOG_PCALL) {
2720
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2721
                    new_ss, new_esp);
2722
        }
2723
#endif
2724
        if ((new_ss & 0xfffc) == 0) {
2725
#ifdef TARGET_X86_64
2726
            /* NULL ss is allowed in long mode if cpl != 3*/
2727
            /* XXX: test CS64 ? */
2728
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2729
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2730
                                       0, 0xffffffff,
2731
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2732
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2733
                                       DESC_W_MASK | DESC_A_MASK);
2734
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2735
            } else
2736
#endif
2737
            {
2738
                raise_exception_err(EXCP0D_GPF, 0);
2739
            }
2740
        } else {
2741
            if ((new_ss & 3) != rpl)
2742
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2743
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2744
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2745
            if (!(ss_e2 & DESC_S_MASK) ||
2746
                (ss_e2 & DESC_CS_MASK) ||
2747
                !(ss_e2 & DESC_W_MASK))
2748
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2749
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2750
            if (dpl != rpl)
2751
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2752
            if (!(ss_e2 & DESC_P_MASK))
2753
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2754
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2755
                                   get_seg_base(ss_e1, ss_e2),
2756
                                   get_seg_limit(ss_e1, ss_e2),
2757
                                   ss_e2);
2758
        }
2759

    
2760
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2761
                       get_seg_base(e1, e2),
2762
                       get_seg_limit(e1, e2),
2763
                       e2);
2764
        cpu_x86_set_cpl(env, rpl);
2765
        sp = new_esp;
2766
#ifdef TARGET_X86_64
2767
        if (env->hflags & HF_CS64_MASK)
2768
            sp_mask = -1;
2769
        else
2770
#endif
2771
            sp_mask = get_sp_mask(ss_e2);
2772

    
2773
        /* validate data segments */
2774
        validate_seg(R_ES, rpl);
2775
        validate_seg(R_DS, rpl);
2776
        validate_seg(R_FS, rpl);
2777
        validate_seg(R_GS, rpl);
2778

    
2779
        sp += addend;
2780
    }
2781
    SET_ESP(sp, sp_mask);
2782
    env->eip = new_eip;
2783
    if (is_iret) {
2784
        /* NOTE: 'cpl' is the _old_ CPL */
2785
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2786
        if (cpl == 0)
2787
            eflags_mask |= IOPL_MASK;
2788
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2789
        if (cpl <= iopl)
2790
            eflags_mask |= IF_MASK;
2791
        if (shift == 0)
2792
            eflags_mask &= 0xffff;
2793
        load_eflags(new_eflags, eflags_mask);
2794
    }
2795
    return;
2796

    
2797
 return_to_vm86:
2798
    POPL(ssp, sp, sp_mask, new_esp);
2799
    POPL(ssp, sp, sp_mask, new_ss);
2800
    POPL(ssp, sp, sp_mask, new_es);
2801
    POPL(ssp, sp, sp_mask, new_ds);
2802
    POPL(ssp, sp, sp_mask, new_fs);
2803
    POPL(ssp, sp, sp_mask, new_gs);
2804

    
2805
    /* modify processor state */
2806
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2807
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2808
    load_seg_vm(R_CS, new_cs & 0xffff);
2809
    cpu_x86_set_cpl(env, 3);
2810
    load_seg_vm(R_SS, new_ss & 0xffff);
2811
    load_seg_vm(R_ES, new_es & 0xffff);
2812
    load_seg_vm(R_DS, new_ds & 0xffff);
2813
    load_seg_vm(R_FS, new_fs & 0xffff);
2814
    load_seg_vm(R_GS, new_gs & 0xffff);
2815

    
2816
    env->eip = new_eip & 0xffff;
2817
    ESP = new_esp;
2818
}
2819

    
2820
void helper_iret_protected(int shift, int next_eip)
2821
{
2822
    int tss_selector, type;
2823
    uint32_t e1, e2;
2824

    
2825
    /* specific case for TSS */
2826
    if (env->eflags & NT_MASK) {
2827
#ifdef TARGET_X86_64
2828
        if (env->hflags & HF_LMA_MASK)
2829
            raise_exception_err(EXCP0D_GPF, 0);
2830
#endif
2831
        tss_selector = lduw_kernel(env->tr.base + 0);
2832
        if (tss_selector & 4)
2833
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2834
        if (load_segment(&e1, &e2, tss_selector) != 0)
2835
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2836
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2837
        /* NOTE: we check both segment and busy TSS */
2838
        if (type != 3)
2839
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2840
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2841
    } else {
2842
        helper_ret_protected(shift, 1, 0);
2843
    }
2844
    env->hflags &= ~HF_NMI_MASK;
2845
#ifdef USE_KQEMU
2846
    if (kqemu_is_ok(env)) {
2847
        CC_OP = CC_OP_EFLAGS;
2848
        env->exception_index = -1;
2849
        cpu_loop_exit();
2850
    }
2851
#endif
2852
}
2853

    
2854
void helper_lret_protected(int shift, int addend)
2855
{
2856
    helper_ret_protected(shift, 0, addend);
2857
#ifdef USE_KQEMU
2858
    if (kqemu_is_ok(env)) {
2859
        env->exception_index = -1;
2860
        cpu_loop_exit();
2861
    }
2862
#endif
2863
}
2864

    
2865
void helper_sysenter(void)
2866
{
2867
    if (env->sysenter_cs == 0) {
2868
        raise_exception_err(EXCP0D_GPF, 0);
2869
    }
2870
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2871
    cpu_x86_set_cpl(env, 0);
2872
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2873
                           0, 0xffffffff,
2874
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2875
                           DESC_S_MASK |
2876
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2877
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2878
                           0, 0xffffffff,
2879
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2880
                           DESC_S_MASK |
2881
                           DESC_W_MASK | DESC_A_MASK);
2882
    ESP = env->sysenter_esp;
2883
    EIP = env->sysenter_eip;
2884
}
2885

    
2886
void helper_sysexit(void)
2887
{
2888
    int cpl;
2889

    
2890
    cpl = env->hflags & HF_CPL_MASK;
2891
    if (env->sysenter_cs == 0 || cpl != 0) {
2892
        raise_exception_err(EXCP0D_GPF, 0);
2893
    }
2894
    cpu_x86_set_cpl(env, 3);
2895
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2896
                           0, 0xffffffff,
2897
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2898
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2899
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2900
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2901
                           0, 0xffffffff,
2902
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2904
                           DESC_W_MASK | DESC_A_MASK);
2905
    ESP = ECX;
2906
    EIP = EDX;
2907
#ifdef USE_KQEMU
2908
    if (kqemu_is_ok(env)) {
2909
        env->exception_index = -1;
2910
        cpu_loop_exit();
2911
    }
2912
#endif
2913
}
2914

    
2915
void helper_movl_crN_T0(int reg, target_ulong t0)
2916
{
2917
#if !defined(CONFIG_USER_ONLY)
2918
    switch(reg) {
2919
    case 0:
2920
        cpu_x86_update_cr0(env, t0);
2921
        break;
2922
    case 3:
2923
        cpu_x86_update_cr3(env, t0);
2924
        break;
2925
    case 4:
2926
        cpu_x86_update_cr4(env, t0);
2927
        break;
2928
    case 8:
2929
        cpu_set_apic_tpr(env, t0);
2930
        env->cr[8] = t0;
2931
        break;
2932
    default:
2933
        env->cr[reg] = t0;
2934
        break;
2935
    }
2936
#endif
2937
}
2938

    
2939
void helper_lmsw(target_ulong t0)
2940
{
2941
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2942
       if already set to one. */
2943
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2944
    helper_movl_crN_T0(0, t0);
2945
}
2946

    
2947
void helper_clts(void)
2948
{
2949
    env->cr[0] &= ~CR0_TS_MASK;
2950
    env->hflags &= ~HF_TS_MASK;
2951
}
2952

    
2953
#if !defined(CONFIG_USER_ONLY)
2954
target_ulong helper_movtl_T0_cr8(void)
2955
{
2956
    return cpu_get_apic_tpr(env);
2957
}
2958
#endif
2959

    
2960
/* XXX: do more */
2961
void helper_movl_drN_T0(int reg, target_ulong t0)
2962
{
2963
    env->dr[reg] = t0;
2964
}
2965

    
2966
void helper_invlpg(target_ulong addr)
2967
{
2968
    cpu_x86_flush_tlb(env, addr);
2969
}
2970

    
2971
void helper_rdtsc(void)
2972
{
2973
    uint64_t val;
2974

    
2975
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2976
        raise_exception(EXCP0D_GPF);
2977
    }
2978
    val = cpu_get_tsc(env);
2979
    EAX = (uint32_t)(val);
2980
    EDX = (uint32_t)(val >> 32);
2981
}
2982

    
2983
void helper_rdpmc(void)
2984
{
2985
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2986
        raise_exception(EXCP0D_GPF);
2987
    }
2988

    
2989
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2990
    
2991
    /* currently unimplemented */
2992
    raise_exception_err(EXCP06_ILLOP, 0);
2993
}
2994

    
2995
#if defined(CONFIG_USER_ONLY)
2996
void helper_wrmsr(void)
2997
{
2998
}
2999

    
3000
void helper_rdmsr(void)
3001
{
3002
}
3003
#else
3004
void helper_wrmsr(void)
3005
{
3006
    uint64_t val;
3007

    
3008
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3009

    
3010
    switch((uint32_t)ECX) {
3011
    case MSR_IA32_SYSENTER_CS:
3012
        env->sysenter_cs = val & 0xffff;
3013
        break;
3014
    case MSR_IA32_SYSENTER_ESP:
3015
        env->sysenter_esp = val;
3016
        break;
3017
    case MSR_IA32_SYSENTER_EIP:
3018
        env->sysenter_eip = val;
3019
        break;
3020
    case MSR_IA32_APICBASE:
3021
        cpu_set_apic_base(env, val);
3022
        break;
3023
    case MSR_EFER:
3024
        {
3025
            uint64_t update_mask;
3026
            update_mask = 0;
3027
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3028
                update_mask |= MSR_EFER_SCE;
3029
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3030
                update_mask |= MSR_EFER_LME;
3031
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3032
                update_mask |= MSR_EFER_FFXSR;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3034
                update_mask |= MSR_EFER_NXE;
3035
            env->efer = (env->efer & ~update_mask) |
3036
            (val & update_mask);
3037
        }
3038
        break;
3039
    case MSR_STAR:
3040
        env->star = val;
3041
        break;
3042
    case MSR_PAT:
3043
        env->pat = val;
3044
        break;
3045
    case MSR_VM_HSAVE_PA:
3046
        env->vm_hsave = val;
3047
        break;
3048
#ifdef TARGET_X86_64
3049
    case MSR_LSTAR:
3050
        env->lstar = val;
3051
        break;
3052
    case MSR_CSTAR:
3053
        env->cstar = val;
3054
        break;
3055
    case MSR_FMASK:
3056
        env->fmask = val;
3057
        break;
3058
    case MSR_FSBASE:
3059
        env->segs[R_FS].base = val;
3060
        break;
3061
    case MSR_GSBASE:
3062
        env->segs[R_GS].base = val;
3063
        break;
3064
    case MSR_KERNELGSBASE:
3065
        env->kernelgsbase = val;
3066
        break;
3067
#endif
3068
    default:
3069
        /* XXX: exception ? */
3070
        break;
3071
    }
3072
}
3073

    
3074
void helper_rdmsr(void)
3075
{
3076
    uint64_t val;
3077
    switch((uint32_t)ECX) {
3078
    case MSR_IA32_SYSENTER_CS:
3079
        val = env->sysenter_cs;
3080
        break;
3081
    case MSR_IA32_SYSENTER_ESP:
3082
        val = env->sysenter_esp;
3083
        break;
3084
    case MSR_IA32_SYSENTER_EIP:
3085
        val = env->sysenter_eip;
3086
        break;
3087
    case MSR_IA32_APICBASE:
3088
        val = cpu_get_apic_base(env);
3089
        break;
3090
    case MSR_EFER:
3091
        val = env->efer;
3092
        break;
3093
    case MSR_STAR:
3094
        val = env->star;
3095
        break;
3096
    case MSR_PAT:
3097
        val = env->pat;
3098
        break;
3099
    case MSR_VM_HSAVE_PA:
3100
        val = env->vm_hsave;
3101
        break;
3102
#ifdef TARGET_X86_64
3103
    case MSR_LSTAR:
3104
        val = env->lstar;
3105
        break;
3106
    case MSR_CSTAR:
3107
        val = env->cstar;
3108
        break;
3109
    case MSR_FMASK:
3110
        val = env->fmask;
3111
        break;
3112
    case MSR_FSBASE:
3113
        val = env->segs[R_FS].base;
3114
        break;
3115
    case MSR_GSBASE:
3116
        val = env->segs[R_GS].base;
3117
        break;
3118
    case MSR_KERNELGSBASE:
3119
        val = env->kernelgsbase;
3120
        break;
3121
#endif
3122
    default:
3123
        /* XXX: exception ? */
3124
        val = 0;
3125
        break;
3126
    }
3127
    EAX = (uint32_t)(val);
3128
    EDX = (uint32_t)(val >> 32);
3129
}
3130
#endif
3131

    
3132
uint32_t helper_lsl(uint32_t selector)
3133
{
3134
    unsigned int limit;
3135
    uint32_t e1, e2, eflags;
3136
    int rpl, dpl, cpl, type;
3137

    
3138
    selector &= 0xffff;
3139
    eflags = cc_table[CC_OP].compute_all();
3140
    if (load_segment(&e1, &e2, selector) != 0)
3141
        goto fail;
3142
    rpl = selector & 3;
3143
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3144
    cpl = env->hflags & HF_CPL_MASK;
3145
    if (e2 & DESC_S_MASK) {
3146
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3147
            /* conforming */
3148
        } else {
3149
            if (dpl < cpl || dpl < rpl)
3150
                goto fail;
3151
        }
3152
    } else {
3153
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3154
        switch(type) {
3155
        case 1:
3156
        case 2:
3157
        case 3:
3158
        case 9:
3159
        case 11:
3160
            break;
3161
        default:
3162
            goto fail;
3163
        }
3164
        if (dpl < cpl || dpl < rpl) {
3165
        fail:
3166
            CC_SRC = eflags & ~CC_Z;
3167
            return 0;
3168
        }
3169
    }
3170
    limit = get_seg_limit(e1, e2);
3171
    CC_SRC = eflags | CC_Z;
3172
    return limit;
3173
}
3174

    
3175
uint32_t helper_lar(uint32_t selector)
3176
{
3177
    uint32_t e1, e2, eflags;
3178
    int rpl, dpl, cpl, type;
3179

    
3180
    selector &= 0xffff;
3181
    eflags = cc_table[CC_OP].compute_all();
3182
    if ((selector & 0xfffc) == 0)
3183
        goto fail;
3184
    if (load_segment(&e1, &e2, selector) != 0)
3185
        goto fail;
3186
    rpl = selector & 3;
3187
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3188
    cpl = env->hflags & HF_CPL_MASK;
3189
    if (e2 & DESC_S_MASK) {
3190
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3191
            /* conforming */
3192
        } else {
3193
            if (dpl < cpl || dpl < rpl)
3194
                goto fail;
3195
        }
3196
    } else {
3197
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3198
        switch(type) {
3199
        case 1:
3200
        case 2:
3201
        case 3:
3202
        case 4:
3203
        case 5:
3204
        case 9:
3205
        case 11:
3206
        case 12:
3207
            break;
3208
        default:
3209
            goto fail;
3210
        }
3211
        if (dpl < cpl || dpl < rpl) {
3212
        fail:
3213
            CC_SRC = eflags & ~CC_Z;
3214
            return 0;
3215
        }
3216
    }
3217
    CC_SRC = eflags | CC_Z;
3218
    return e2 & 0x00f0ff00;
3219
}
3220

    
3221
void helper_verr(uint32_t selector)
3222
{
3223
    uint32_t e1, e2, eflags;
3224
    int rpl, dpl, cpl;
3225

    
3226
    selector &= 0xffff;
3227
    eflags = cc_table[CC_OP].compute_all();
3228
    if ((selector & 0xfffc) == 0)
3229
        goto fail;
3230
    if (load_segment(&e1, &e2, selector) != 0)
3231
        goto fail;
3232
    if (!(e2 & DESC_S_MASK))
3233
        goto fail;
3234
    rpl = selector & 3;
3235
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3236
    cpl = env->hflags & HF_CPL_MASK;
3237
    if (e2 & DESC_CS_MASK) {
3238
        if (!(e2 & DESC_R_MASK))
3239
            goto fail;
3240
        if (!(e2 & DESC_C_MASK)) {
3241
            if (dpl < cpl || dpl < rpl)
3242
                goto fail;
3243
        }
3244
    } else {
3245
        if (dpl < cpl || dpl < rpl) {
3246
        fail:
3247
            CC_SRC = eflags & ~CC_Z;
3248
            return;
3249
        }
3250
    }
3251
    CC_SRC = eflags | CC_Z;
3252
}
3253

    
3254
void helper_verw(uint32_t selector)
3255
{
3256
    uint32_t e1, e2, eflags;
3257
    int rpl, dpl, cpl;
3258

    
3259
    selector &= 0xffff;
3260
    eflags = cc_table[CC_OP].compute_all();
3261
    if ((selector & 0xfffc) == 0)
3262
        goto fail;
3263
    if (load_segment(&e1, &e2, selector) != 0)
3264
        goto fail;
3265
    if (!(e2 & DESC_S_MASK))
3266
        goto fail;
3267
    rpl = selector & 3;
3268
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3269
    cpl = env->hflags & HF_CPL_MASK;
3270
    if (e2 & DESC_CS_MASK) {
3271
        goto fail;
3272
    } else {
3273
        if (dpl < cpl || dpl < rpl)
3274
            goto fail;
3275
        if (!(e2 & DESC_W_MASK)) {
3276
        fail:
3277
            CC_SRC = eflags & ~CC_Z;
3278
            return;
3279
        }
3280
    }
3281
    CC_SRC = eflags | CC_Z;
3282
}
3283

    
3284
/* x87 FPU helpers */
3285

    
3286
static void fpu_set_exception(int mask)
3287
{
3288
    env->fpus |= mask;
3289
    if (env->fpus & (~env->fpuc & FPUC_EM))
3290
        env->fpus |= FPUS_SE | FPUS_B;
3291
}
3292

    
3293
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3294
{
3295
    if (b == 0.0)
3296
        fpu_set_exception(FPUS_ZE);
3297
    return a / b;
3298
}
3299

    
3300
void fpu_raise_exception(void)
3301
{
3302
    if (env->cr[0] & CR0_NE_MASK) {
3303
        raise_exception(EXCP10_COPR);
3304
    }
3305
#if !defined(CONFIG_USER_ONLY)
3306
    else {
3307
        cpu_set_ferr(env);
3308
    }
3309
#endif
3310
}
3311

    
3312
void helper_flds_FT0(uint32_t val)
3313
{
3314
    union {
3315
        float32 f;
3316
        uint32_t i;
3317
    } u;
3318
    u.i = val;
3319
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3320
}
3321

    
3322
void helper_fldl_FT0(uint64_t val)
3323
{
3324
    union {
3325
        float64 f;
3326
        uint64_t i;
3327
    } u;
3328
    u.i = val;
3329
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3330
}
3331

    
3332
void helper_fildl_FT0(int32_t val)
3333
{
3334
    FT0 = int32_to_floatx(val, &env->fp_status);
3335
}
3336

    
3337
void helper_flds_ST0(uint32_t val)
3338
{
3339
    int new_fpstt;
3340
    union {
3341
        float32 f;
3342
        uint32_t i;
3343
    } u;
3344
    new_fpstt = (env->fpstt - 1) & 7;
3345
    u.i = val;
3346
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3347
    env->fpstt = new_fpstt;
3348
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3349
}
3350

    
3351
void helper_fldl_ST0(uint64_t val)
3352
{
3353
    int new_fpstt;
3354
    union {
3355
        float64 f;
3356
        uint64_t i;
3357
    } u;
3358
    new_fpstt = (env->fpstt - 1) & 7;
3359
    u.i = val;
3360
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3361
    env->fpstt = new_fpstt;
3362
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3363
}
3364

    
3365
void helper_fildl_ST0(int32_t val)
3366
{
3367
    int new_fpstt;
3368
    new_fpstt = (env->fpstt - 1) & 7;
3369
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3370
    env->fpstt = new_fpstt;
3371
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3372
}
3373

    
3374
void helper_fildll_ST0(int64_t val)
3375
{
3376
    int new_fpstt;
3377
    new_fpstt = (env->fpstt - 1) & 7;
3378
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3379
    env->fpstt = new_fpstt;
3380
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3381
}
3382

    
3383
uint32_t helper_fsts_ST0(void)
3384
{
3385
    union {
3386
        float32 f;
3387
        uint32_t i;
3388
    } u;
3389
    u.f = floatx_to_float32(ST0, &env->fp_status);
3390
    return u.i;
3391
}
3392

    
3393
uint64_t helper_fstl_ST0(void)
3394
{
3395
    union {
3396
        float64 f;
3397
        uint64_t i;
3398
    } u;
3399
    u.f = floatx_to_float64(ST0, &env->fp_status);
3400
    return u.i;
3401
}
3402

    
3403
int32_t helper_fist_ST0(void)
3404
{
3405
    int32_t val;
3406
    val = floatx_to_int32(ST0, &env->fp_status);
3407
    if (val != (int16_t)val)
3408
        val = -32768;
3409
    return val;
3410
}
3411

    
3412
int32_t helper_fistl_ST0(void)
3413
{
3414
    int32_t val;
3415
    val = floatx_to_int32(ST0, &env->fp_status);
3416
    return val;
3417
}
3418

    
3419
int64_t helper_fistll_ST0(void)
3420
{
3421
    int64_t val;
3422
    val = floatx_to_int64(ST0, &env->fp_status);
3423
    return val;
3424
}
3425

    
3426
int32_t helper_fistt_ST0(void)
3427
{
3428
    int32_t val;
3429
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3430
    if (val != (int16_t)val)
3431
        val = -32768;
3432
    return val;
3433
}
3434

    
3435
int32_t helper_fisttl_ST0(void)
3436
{
3437
    int32_t val;
3438
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3439
    return val;
3440
}
3441

    
3442
int64_t helper_fisttll_ST0(void)
3443
{
3444
    int64_t val;
3445
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3446
    return val;
3447
}
3448

    
3449
void helper_fldt_ST0(target_ulong ptr)
3450
{
3451
    int new_fpstt;
3452
    new_fpstt = (env->fpstt - 1) & 7;
3453
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3454
    env->fpstt = new_fpstt;
3455
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3456
}
3457

    
3458
void helper_fstt_ST0(target_ulong ptr)
3459
{
3460
    helper_fstt(ST0, ptr);
3461
}
3462

    
3463
void helper_fpush(void)
3464
{
3465
    fpush();
3466
}
3467

    
3468
void helper_fpop(void)
3469
{
3470
    fpop();
3471
}
3472

    
3473
void helper_fdecstp(void)
3474
{
3475
    env->fpstt = (env->fpstt - 1) & 7;
3476
    env->fpus &= (~0x4700);
3477
}
3478

    
3479
void helper_fincstp(void)
3480
{
3481
    env->fpstt = (env->fpstt + 1) & 7;
3482
    env->fpus &= (~0x4700);
3483
}
3484

    
3485
/* FPU move */
3486

    
3487
void helper_ffree_STN(int st_index)
3488
{
3489
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3490
}
3491

    
3492
void helper_fmov_ST0_FT0(void)
3493
{
3494
    ST0 = FT0;
3495
}
3496

    
3497
void helper_fmov_FT0_STN(int st_index)
3498
{
3499
    FT0 = ST(st_index);
3500
}
3501

    
3502
void helper_fmov_ST0_STN(int st_index)
3503
{
3504
    ST0 = ST(st_index);
3505
}
3506

    
3507
void helper_fmov_STN_ST0(int st_index)
3508
{
3509
    ST(st_index) = ST0;
3510
}
3511

    
3512
void helper_fxchg_ST0_STN(int st_index)
3513
{
3514
    CPU86_LDouble tmp;
3515
    tmp = ST(st_index);
3516
    ST(st_index) = ST0;
3517
    ST0 = tmp;
3518
}
3519

    
3520
/* FPU operations */
3521

    
3522
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3523

    
3524
void helper_fcom_ST0_FT0(void)
3525
{
3526
    int ret;
3527

    
3528
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3529
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3530
    FORCE_RET();
3531
}
3532

    
3533
void helper_fucom_ST0_FT0(void)
3534
{
3535
    int ret;
3536

    
3537
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3538
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3539
    FORCE_RET();
3540
}
3541

    
3542
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3543

    
3544
void helper_fcomi_ST0_FT0(void)
3545
{
3546
    int eflags;
3547
    int ret;
3548

    
3549
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3550
    eflags = cc_table[CC_OP].compute_all();
3551
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3552
    CC_SRC = eflags;
3553
    FORCE_RET();
3554
}
3555

    
3556
void helper_fucomi_ST0_FT0(void)
3557
{
3558
    int eflags;
3559
    int ret;
3560

    
3561
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3562
    eflags = cc_table[CC_OP].compute_all();
3563
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3564
    CC_SRC = eflags;
3565
    FORCE_RET();
3566
}
3567

    
3568
void helper_fadd_ST0_FT0(void)
3569
{
3570
    ST0 += FT0;
3571
}
3572

    
3573
void helper_fmul_ST0_FT0(void)
3574
{
3575
    ST0 *= FT0;
3576
}
3577

    
3578
void helper_fsub_ST0_FT0(void)
3579
{
3580
    ST0 -= FT0;
3581
}
3582

    
3583
void helper_fsubr_ST0_FT0(void)
3584
{
3585
    ST0 = FT0 - ST0;
3586
}
3587

    
3588
void helper_fdiv_ST0_FT0(void)
3589
{
3590
    ST0 = helper_fdiv(ST0, FT0);
3591
}
3592

    
3593
void helper_fdivr_ST0_FT0(void)
3594
{
3595
    ST0 = helper_fdiv(FT0, ST0);
3596
}
3597

    
3598
/* fp operations between STN and ST0 */
3599

    
3600
void helper_fadd_STN_ST0(int st_index)
3601
{
3602
    ST(st_index) += ST0;
3603
}
3604

    
3605
void helper_fmul_STN_ST0(int st_index)
3606
{
3607
    ST(st_index) *= ST0;
3608
}
3609

    
3610
void helper_fsub_STN_ST0(int st_index)
3611
{
3612
    ST(st_index) -= ST0;
3613
}
3614

    
3615
void helper_fsubr_STN_ST0(int st_index)
3616
{
3617
    CPU86_LDouble *p;
3618
    p = &ST(st_index);
3619
    *p = ST0 - *p;
3620
}
3621

    
3622
void helper_fdiv_STN_ST0(int st_index)
3623
{
3624
    CPU86_LDouble *p;
3625
    p = &ST(st_index);
3626
    *p = helper_fdiv(*p, ST0);
3627
}
3628

    
3629
void helper_fdivr_STN_ST0(int st_index)
3630
{
3631
    CPU86_LDouble *p;
3632
    p = &ST(st_index);
3633
    *p = helper_fdiv(ST0, *p);
3634
}
3635

    
3636
/* misc FPU operations */
3637
void helper_fchs_ST0(void)
3638
{
3639
    ST0 = floatx_chs(ST0);
3640
}
3641

    
3642
void helper_fabs_ST0(void)
3643
{
3644
    ST0 = floatx_abs(ST0);
3645
}
3646

    
3647
void helper_fld1_ST0(void)
3648
{
3649
    ST0 = f15rk[1];
3650
}
3651

    
3652
void helper_fldl2t_ST0(void)
3653
{
3654
    ST0 = f15rk[6];
3655
}
3656

    
3657
void helper_fldl2e_ST0(void)
3658
{
3659
    ST0 = f15rk[5];
3660
}
3661

    
3662
void helper_fldpi_ST0(void)
3663
{
3664
    ST0 = f15rk[2];
3665
}
3666

    
3667
void helper_fldlg2_ST0(void)
3668
{
3669
    ST0 = f15rk[3];
3670
}
3671

    
3672
void helper_fldln2_ST0(void)
3673
{
3674
    ST0 = f15rk[4];
3675
}
3676

    
3677
void helper_fldz_ST0(void)
3678
{
3679
    ST0 = f15rk[0];
3680
}
3681

    
3682
void helper_fldz_FT0(void)
3683
{
3684
    FT0 = f15rk[0];
3685
}
3686

    
3687
uint32_t helper_fnstsw(void)
3688
{
3689
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3690
}
3691

    
3692
uint32_t helper_fnstcw(void)
3693
{
3694
    return env->fpuc;
3695
}
3696

    
3697
static void update_fp_status(void)
3698
{
3699
    int rnd_type;
3700

    
3701
    /* set rounding mode */
3702
    switch(env->fpuc & RC_MASK) {
3703
    default:
3704
    case RC_NEAR:
3705
        rnd_type = float_round_nearest_even;
3706
        break;
3707
    case RC_DOWN:
3708
        rnd_type = float_round_down;
3709
        break;
3710
    case RC_UP:
3711
        rnd_type = float_round_up;
3712
        break;
3713
    case RC_CHOP:
3714
        rnd_type = float_round_to_zero;
3715
        break;
3716
    }
3717
    set_float_rounding_mode(rnd_type, &env->fp_status);
3718
#ifdef FLOATX80
3719
    switch((env->fpuc >> 8) & 3) {
3720
    case 0:
3721
        rnd_type = 32;
3722
        break;
3723
    case 2:
3724
        rnd_type = 64;
3725
        break;
3726
    case 3:
3727
    default:
3728
        rnd_type = 80;
3729
        break;
3730
    }
3731
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3732
#endif
3733
}
3734

    
3735
void helper_fldcw(uint32_t val)
3736
{
3737
    env->fpuc = val;
3738
    update_fp_status();
3739
}
3740

    
3741
void helper_fclex(void)
3742
{
3743
    env->fpus &= 0x7f00;
3744
}
3745

    
3746
void helper_fwait(void)
3747
{
3748
    if (env->fpus & FPUS_SE)
3749
        fpu_raise_exception();
3750
    FORCE_RET();
3751
}
3752

    
3753
void helper_fninit(void)
3754
{
3755
    env->fpus = 0;
3756
    env->fpstt = 0;
3757
    env->fpuc = 0x37f;
3758
    env->fptags[0] = 1;
3759
    env->fptags[1] = 1;
3760
    env->fptags[2] = 1;
3761
    env->fptags[3] = 1;
3762
    env->fptags[4] = 1;
3763
    env->fptags[5] = 1;
3764
    env->fptags[6] = 1;
3765
    env->fptags[7] = 1;
3766
}
3767

    
3768
/* BCD ops */
3769

    
3770
void helper_fbld_ST0(target_ulong ptr)
3771
{
3772
    CPU86_LDouble tmp;
3773
    uint64_t val;
3774
    unsigned int v;
3775
    int i;
3776

    
3777
    val = 0;
3778
    for(i = 8; i >= 0; i--) {
3779
        v = ldub(ptr + i);
3780
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3781
    }
3782
    tmp = val;
3783
    if (ldub(ptr + 9) & 0x80)
3784
        tmp = -tmp;
3785
    fpush();
3786
    ST0 = tmp;
3787
}
3788

    
3789
void helper_fbst_ST0(target_ulong ptr)
3790
{
3791
    int v;
3792
    target_ulong mem_ref, mem_end;
3793
    int64_t val;
3794

    
3795
    val = floatx_to_int64(ST0, &env->fp_status);
3796
    mem_ref = ptr;
3797
    mem_end = mem_ref + 9;
3798
    if (val < 0) {
3799
        stb(mem_end, 0x80);
3800
        val = -val;
3801
    } else {
3802
        stb(mem_end, 0x00);
3803
    }
3804
    while (mem_ref < mem_end) {
3805
        if (val == 0)
3806
            break;
3807
        v = val % 100;
3808
        val = val / 100;
3809
        v = ((v / 10) << 4) | (v % 10);
3810
        stb(mem_ref++, v);
3811
    }
3812
    while (mem_ref < mem_end) {
3813
        stb(mem_ref++, 0);
3814
    }
3815
}
3816

    
3817
void helper_f2xm1(void)
3818
{
3819
    ST0 = pow(2.0,ST0) - 1.0;
3820
}
3821

    
3822
void helper_fyl2x(void)
3823
{
3824
    CPU86_LDouble fptemp;
3825

    
3826
    fptemp = ST0;
3827
    if (fptemp>0.0){
3828
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3829
        ST1 *= fptemp;
3830
        fpop();
3831
    } else {
3832
        env->fpus &= (~0x4700);
3833
        env->fpus |= 0x400;
3834
    }
3835
}
3836

    
3837
void helper_fptan(void)
3838
{
3839
    CPU86_LDouble fptemp;
3840

    
3841
    fptemp = ST0;
3842
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3843
        env->fpus |= 0x400;
3844
    } else {
3845
        ST0 = tan(fptemp);
3846
        fpush();
3847
        ST0 = 1.0;
3848
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3849
        /* the above code is for  |arg| < 2**52 only */
3850
    }
3851
}
3852

    
3853
void helper_fpatan(void)
3854
{
3855
    CPU86_LDouble fptemp, fpsrcop;
3856

    
3857
    fpsrcop = ST1;
3858
    fptemp = ST0;
3859
    ST1 = atan2(fpsrcop,fptemp);
3860
    fpop();
3861
}
3862

    
3863
void helper_fxtract(void)
3864
{
3865
    CPU86_LDoubleU temp;
3866
    unsigned int expdif;
3867

    
3868
    temp.d = ST0;
3869
    expdif = EXPD(temp) - EXPBIAS;
3870
    /*DP exponent bias*/
3871
    ST0 = expdif;
3872
    fpush();
3873
    BIASEXPONENT(temp);
3874
    ST0 = temp.d;
3875
}
3876

    
3877
void helper_fprem1(void)
3878
{
3879
    CPU86_LDouble dblq, fpsrcop, fptemp;
3880
    CPU86_LDoubleU fpsrcop1, fptemp1;
3881
    int expdif;
3882
    signed long long int q;
3883

    
3884
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3885
        ST0 = 0.0 / 0.0; /* NaN */
3886
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3887
        return;
3888
    }
3889

    
3890
    fpsrcop = ST0;
3891
    fptemp = ST1;
3892
    fpsrcop1.d = fpsrcop;
3893
    fptemp1.d = fptemp;
3894
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3895

    
3896
    if (expdif < 0) {
3897
        /* optimisation? taken from the AMD docs */
3898
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3899
        /* ST0 is unchanged */
3900
        return;
3901
    }
3902

    
3903
    if (expdif < 53) {
3904
        dblq = fpsrcop / fptemp;
3905
        /* round dblq towards nearest integer */
3906
        dblq = rint(dblq);
3907
        ST0 = fpsrcop - fptemp * dblq;
3908

    
3909
        /* convert dblq to q by truncating towards zero */
3910
        if (dblq < 0.0)
3911
           q = (signed long long int)(-dblq);
3912
        else
3913
           q = (signed long long int)dblq;
3914

    
3915
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3916
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3917
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3918
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3919
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3920
    } else {
3921
        env->fpus |= 0x400;  /* C2 <-- 1 */
3922
        fptemp = pow(2.0, expdif - 50);
3923
        fpsrcop = (ST0 / ST1) / fptemp;
3924
        /* fpsrcop = integer obtained by chopping */
3925
        fpsrcop = (fpsrcop < 0.0) ?
3926
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3927
        ST0 -= (ST1 * fpsrcop * fptemp);
3928
    }
3929
}
3930

    
3931
void helper_fprem(void)
3932
{
3933
    CPU86_LDouble dblq, fpsrcop, fptemp;
3934
    CPU86_LDoubleU fpsrcop1, fptemp1;
3935
    int expdif;
3936
    signed long long int q;
3937

    
3938
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3939
       ST0 = 0.0 / 0.0; /* NaN */
3940
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3941
       return;
3942
    }
3943

    
3944
    fpsrcop = (CPU86_LDouble)ST0;
3945
    fptemp = (CPU86_LDouble)ST1;
3946
    fpsrcop1.d = fpsrcop;
3947
    fptemp1.d = fptemp;
3948
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3949

    
3950
    if (expdif < 0) {
3951
        /* optimisation? taken from the AMD docs */
3952
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3953
        /* ST0 is unchanged */
3954
        return;
3955
    }
3956

    
3957
    if ( expdif < 53 ) {
3958
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3959
        /* round dblq towards zero */
3960
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3961
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3962

    
3963
        /* convert dblq to q by truncating towards zero */
3964
        if (dblq < 0.0)
3965
           q = (signed long long int)(-dblq);
3966
        else
3967
           q = (signed long long int)dblq;
3968

    
3969
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3970
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3971
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3972
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3973
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3974
    } else {
3975
        int N = 32 + (expdif % 32); /* as per AMD docs */
3976
        env->fpus |= 0x400;  /* C2 <-- 1 */
3977
        fptemp = pow(2.0, (double)(expdif - N));
3978
        fpsrcop = (ST0 / ST1) / fptemp;
3979
        /* fpsrcop = integer obtained by chopping */
3980
        fpsrcop = (fpsrcop < 0.0) ?
3981
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3982
        ST0 -= (ST1 * fpsrcop * fptemp);
3983
    }
3984
}
3985

    
3986
void helper_fyl2xp1(void)
3987
{
3988
    CPU86_LDouble fptemp;
3989

    
3990
    fptemp = ST0;
3991
    if ((fptemp+1.0)>0.0) {
3992
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3993
        ST1 *= fptemp;
3994
        fpop();
3995
    } else {
3996
        env->fpus &= (~0x4700);
3997
        env->fpus |= 0x400;
3998
    }
3999
}
4000

    
4001
void helper_fsqrt(void)
4002
{
4003
    CPU86_LDouble fptemp;
4004

    
4005
    fptemp = ST0;
4006
    if (fptemp<0.0) {
4007
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4008
        env->fpus |= 0x400;
4009
    }
4010
    ST0 = sqrt(fptemp);
4011
}
4012

    
4013
void helper_fsincos(void)
4014
{
4015
    CPU86_LDouble fptemp;
4016

    
4017
    fptemp = ST0;
4018
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4019
        env->fpus |= 0x400;
4020
    } else {
4021
        ST0 = sin(fptemp);
4022
        fpush();
4023
        ST0 = cos(fptemp);
4024
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4025
        /* the above code is for  |arg| < 2**63 only */
4026
    }
4027
}
4028

    
4029
void helper_frndint(void)
4030
{
4031
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4032
}
4033

    
4034
void helper_fscale(void)
4035
{
4036
    ST0 = ldexp (ST0, (int)(ST1));
4037
}
4038

    
4039
void helper_fsin(void)
4040
{
4041
    CPU86_LDouble fptemp;
4042

    
4043
    fptemp = ST0;
4044
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4045
        env->fpus |= 0x400;
4046
    } else {
4047
        ST0 = sin(fptemp);
4048
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4049
        /* the above code is for  |arg| < 2**53 only */
4050
    }
4051
}
4052

    
4053
void helper_fcos(void)
4054
{
4055
    CPU86_LDouble fptemp;
4056

    
4057
    fptemp = ST0;
4058
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4059
        env->fpus |= 0x400;
4060
    } else {
4061
        ST0 = cos(fptemp);
4062
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4063
        /* the above code is for  |arg5 < 2**63 only */
4064
    }
4065
}
4066

    
4067
void helper_fxam_ST0(void)
4068
{
4069
    CPU86_LDoubleU temp;
4070
    int expdif;
4071

    
4072
    temp.d = ST0;
4073

    
4074
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4075
    if (SIGND(temp))
4076
        env->fpus |= 0x200; /* C1 <-- 1 */
4077

    
4078
    /* XXX: test fptags too */
4079
    expdif = EXPD(temp);
4080
    if (expdif == MAXEXPD) {
4081
#ifdef USE_X86LDOUBLE
4082
        if (MANTD(temp) == 0x8000000000000000ULL)
4083
#else
4084
        if (MANTD(temp) == 0)
4085
#endif
4086
            env->fpus |=  0x500 /*Infinity*/;
4087
        else
4088
            env->fpus |=  0x100 /*NaN*/;
4089
    } else if (expdif == 0) {
4090
        if (MANTD(temp) == 0)
4091
            env->fpus |=  0x4000 /*Zero*/;
4092
        else
4093
            env->fpus |= 0x4400 /*Denormal*/;
4094
    } else {
4095
        env->fpus |= 0x400;
4096
    }
4097
}
4098

    
4099
void helper_fstenv(target_ulong ptr, int data32)
4100
{
4101
    int fpus, fptag, exp, i;
4102
    uint64_t mant;
4103
    CPU86_LDoubleU tmp;
4104

    
4105
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4106
    fptag = 0;
4107
    for (i=7; i>=0; i--) {
4108
        fptag <<= 2;
4109
        if (env->fptags[i]) {
4110
            fptag |= 3;
4111
        } else {
4112
            tmp.d = env->fpregs[i].d;
4113
            exp = EXPD(tmp);
4114
            mant = MANTD(tmp);
4115
            if (exp == 0 && mant == 0) {
4116
                /* zero */
4117
                fptag |= 1;
4118
            } else if (exp == 0 || exp == MAXEXPD
4119
#ifdef USE_X86LDOUBLE
4120
                       || (mant & (1LL << 63)) == 0
4121
#endif
4122
                       ) {
4123
                /* NaNs, infinity, denormal */
4124
                fptag |= 2;
4125
            }
4126
        }
4127
    }
4128
    if (data32) {
4129
        /* 32 bit */
4130
        stl(ptr, env->fpuc);
4131
        stl(ptr + 4, fpus);
4132
        stl(ptr + 8, fptag);
4133
        stl(ptr + 12, 0); /* fpip */
4134
        stl(ptr + 16, 0); /* fpcs */
4135
        stl(ptr + 20, 0); /* fpoo */
4136
        stl(ptr + 24, 0); /* fpos */
4137
    } else {
4138
        /* 16 bit */
4139
        stw(ptr, env->fpuc);
4140
        stw(ptr + 2, fpus);
4141
        stw(ptr + 4, fptag);
4142
        stw(ptr + 6, 0);
4143
        stw(ptr + 8, 0);
4144
        stw(ptr + 10, 0);
4145
        stw(ptr + 12, 0);
4146
    }
4147
}
4148

    
4149
void helper_fldenv(target_ulong ptr, int data32)
4150
{
4151
    int i, fpus, fptag;
4152

    
4153
    if (data32) {
4154
        env->fpuc = lduw(ptr);
4155
        fpus = lduw(ptr + 4);
4156
        fptag = lduw(ptr + 8);
4157
    }
4158
    else {
4159
        env->fpuc = lduw(ptr);
4160
        fpus = lduw(ptr + 2);
4161
        fptag = lduw(ptr + 4);
4162
    }
4163
    env->fpstt = (fpus >> 11) & 7;
4164
    env->fpus = fpus & ~0x3800;
4165
    for(i = 0;i < 8; i++) {
4166
        env->fptags[i] = ((fptag & 3) == 3);
4167
        fptag >>= 2;
4168
    }
4169
}
4170

    
4171
void helper_fsave(target_ulong ptr, int data32)
4172
{
4173
    CPU86_LDouble tmp;
4174
    int i;
4175

    
4176
    helper_fstenv(ptr, data32);
4177

    
4178
    ptr += (14 << data32);
4179
    for(i = 0;i < 8; i++) {
4180
        tmp = ST(i);
4181
        helper_fstt(tmp, ptr);
4182
        ptr += 10;
4183
    }
4184

    
4185
    /* fninit */
4186
    env->fpus = 0;
4187
    env->fpstt = 0;
4188
    env->fpuc = 0x37f;
4189
    env->fptags[0] = 1;
4190
    env->fptags[1] = 1;
4191
    env->fptags[2] = 1;
4192
    env->fptags[3] = 1;
4193
    env->fptags[4] = 1;
4194
    env->fptags[5] = 1;
4195
    env->fptags[6] = 1;
4196
    env->fptags[7] = 1;
4197
}
4198

    
4199
void helper_frstor(target_ulong ptr, int data32)
4200
{
4201
    CPU86_LDouble tmp;
4202
    int i;
4203

    
4204
    helper_fldenv(ptr, data32);
4205
    ptr += (14 << data32);
4206

    
4207
    for(i = 0;i < 8; i++) {
4208
        tmp = helper_fldt(ptr);
4209
        ST(i) = tmp;
4210
        ptr += 10;
4211
    }
4212
}
4213

    
4214
void helper_fxsave(target_ulong ptr, int data64)
4215
{
4216
    int fpus, fptag, i, nb_xmm_regs;
4217
    CPU86_LDouble tmp;
4218
    target_ulong addr;
4219

    
4220
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4221
    fptag = 0;
4222
    for(i = 0; i < 8; i++) {
4223
        fptag |= (env->fptags[i] << i);
4224
    }
4225
    stw(ptr, env->fpuc);
4226
    stw(ptr + 2, fpus);
4227
    stw(ptr + 4, fptag ^ 0xff);
4228

    
4229
    addr = ptr + 0x20;
4230
    for(i = 0;i < 8; i++) {
4231
        tmp = ST(i);
4232
        helper_fstt(tmp, addr);
4233
        addr += 16;
4234
    }
4235

    
4236
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4237
        /* XXX: finish it */
4238
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4239
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4240
        nb_xmm_regs = 8 << data64;
4241
        addr = ptr + 0xa0;
4242
        for(i = 0; i < nb_xmm_regs; i++) {
4243
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4244
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4245
            addr += 16;
4246
        }
4247
    }
4248
}
4249

    
4250
void helper_fxrstor(target_ulong ptr, int data64)
4251
{
4252
    int i, fpus, fptag, nb_xmm_regs;
4253
    CPU86_LDouble tmp;
4254
    target_ulong addr;
4255

    
4256
    env->fpuc = lduw(ptr);
4257
    fpus = lduw(ptr + 2);
4258
    fptag = lduw(ptr + 4);
4259
    env->fpstt = (fpus >> 11) & 7;
4260
    env->fpus = fpus & ~0x3800;
4261
    fptag ^= 0xff;
4262
    for(i = 0;i < 8; i++) {
4263
        env->fptags[i] = ((fptag >> i) & 1);
4264
    }
4265

    
4266
    addr = ptr + 0x20;
4267
    for(i = 0;i < 8; i++) {
4268
        tmp = helper_fldt(addr);
4269
        ST(i) = tmp;
4270
        addr += 16;
4271
    }
4272

    
4273
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4274
        /* XXX: finish it */
4275
        env->mxcsr = ldl(ptr + 0x18);
4276
        //ldl(ptr + 0x1c);
4277
        nb_xmm_regs = 8 << data64;
4278
        addr = ptr + 0xa0;
4279
        for(i = 0; i < nb_xmm_regs; i++) {
4280
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4281
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4282
            addr += 16;
4283
        }
4284
    }
4285
}
4286

    
4287
#ifndef USE_X86LDOUBLE
4288

    
4289
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4290
{
4291
    CPU86_LDoubleU temp;
4292
    int e;
4293

    
4294
    temp.d = f;
4295
    /* mantissa */
4296
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4297
    /* exponent + sign */
4298
    e = EXPD(temp) - EXPBIAS + 16383;
4299
    e |= SIGND(temp) >> 16;
4300
    *pexp = e;
4301
}
4302

    
4303
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4304
{
4305
    CPU86_LDoubleU temp;
4306
    int e;
4307
    uint64_t ll;
4308

    
4309
    /* XXX: handle overflow ? */
4310
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4311
    e |= (upper >> 4) & 0x800; /* sign */
4312
    ll = (mant >> 11) & ((1LL << 52) - 1);
4313
#ifdef __arm__
4314
    temp.l.upper = (e << 20) | (ll >> 32);
4315
    temp.l.lower = ll;
4316
#else
4317
    temp.ll = ll | ((uint64_t)e << 52);
4318
#endif
4319
    return temp.d;
4320
}
4321

    
4322
#else
4323

    
4324
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4325
{
4326
    CPU86_LDoubleU temp;
4327

    
4328
    temp.d = f;
4329
    *pmant = temp.l.lower;
4330
    *pexp = temp.l.upper;
4331
}
4332

    
4333
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4334
{
4335
    CPU86_LDoubleU temp;
4336

    
4337
    temp.l.upper = upper;
4338
    temp.l.lower = mant;
4339
    return temp.d;
4340
}
4341
#endif
4342

    
4343
#ifdef TARGET_X86_64
4344

    
4345
//#define DEBUG_MULDIV
4346

    
4347
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4348
{
4349
    *plow += a;
4350
    /* carry test */
4351
    if (*plow < a)
4352
        (*phigh)++;
4353
    *phigh += b;
4354
}
4355

    
4356
static void neg128(uint64_t *plow, uint64_t *phigh)
4357
{
4358
    *plow = ~ *plow;
4359
    *phigh = ~ *phigh;
4360
    add128(plow, phigh, 1, 0);
4361
}
4362

    
4363
/* return TRUE if overflow */
4364
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4365
{
4366
    uint64_t q, r, a1, a0;
4367
    int i, qb, ab;
4368

    
4369
    a0 = *plow;
4370
    a1 = *phigh;
4371
    if (a1 == 0) {
4372
        q = a0 / b;
4373
        r = a0 % b;
4374
        *plow = q;
4375
        *phigh = r;
4376
    } else {
4377
        if (a1 >= b)
4378
            return 1;
4379
        /* XXX: use a better algorithm */
4380
        for(i = 0; i < 64; i++) {
4381
            ab = a1 >> 63;
4382
            a1 = (a1 << 1) | (a0 >> 63);
4383
            if (ab || a1 >= b) {
4384
                a1 -= b;
4385
                qb = 1;
4386
            } else {
4387
                qb = 0;
4388
            }
4389
            a0 = (a0 << 1) | qb;
4390
        }
4391
#if defined(DEBUG_MULDIV)
4392
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4393
               *phigh, *plow, b, a0, a1);
4394
#endif
4395
        *plow = a0;
4396
        *phigh = a1;
4397
    }
4398
    return 0;
4399
}
4400

    
4401
/* return TRUE if overflow */
4402
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4403
{
4404
    int sa, sb;
4405
    sa = ((int64_t)*phigh < 0);
4406
    if (sa)
4407
        neg128(plow, phigh);
4408
    sb = (b < 0);
4409
    if (sb)
4410
        b = -b;
4411
    if (div64(plow, phigh, b) != 0)
4412
        return 1;
4413
    if (sa ^ sb) {
4414
        if (*plow > (1ULL << 63))
4415
            return 1;
4416
        *plow = - *plow;
4417
    } else {
4418
        if (*plow >= (1ULL << 63))
4419
            return 1;
4420
    }
4421
    if (sa)
4422
        *phigh = - *phigh;
4423
    return 0;
4424
}
4425

    
4426
void helper_mulq_EAX_T0(target_ulong t0)
4427
{
4428
    uint64_t r0, r1;
4429

    
4430
    mulu64(&r0, &r1, EAX, t0);
4431
    EAX = r0;
4432
    EDX = r1;
4433
    CC_DST = r0;
4434
    CC_SRC = r1;
4435
}
4436

    
4437
void helper_imulq_EAX_T0(target_ulong t0)
4438
{
4439
    uint64_t r0, r1;
4440

    
4441
    muls64(&r0, &r1, EAX, t0);
4442
    EAX = r0;
4443
    EDX = r1;
4444
    CC_DST = r0;
4445
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4446
}
4447

    
4448
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4449
{
4450
    uint64_t r0, r1;
4451

    
4452
    muls64(&r0, &r1, t0, t1);
4453
    CC_DST = r0;
4454
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4455
    return r0;
4456
}
4457

    
4458
void helper_divq_EAX(target_ulong t0)
4459
{
4460
    uint64_t r0, r1;
4461
    if (t0 == 0) {
4462
        raise_exception(EXCP00_DIVZ);
4463
    }
4464
    r0 = EAX;
4465
    r1 = EDX;
4466
    if (div64(&r0, &r1, t0))
4467
        raise_exception(EXCP00_DIVZ);
4468
    EAX = r0;
4469
    EDX = r1;
4470
}
4471

    
4472
void helper_idivq_EAX(target_ulong t0)
4473
{
4474
    uint64_t r0, r1;
4475
    if (t0 == 0) {
4476
        raise_exception(EXCP00_DIVZ);
4477
    }
4478
    r0 = EAX;
4479
    r1 = EDX;
4480
    if (idiv64(&r0, &r1, t0))
4481
        raise_exception(EXCP00_DIVZ);
4482
    EAX = r0;
4483
    EDX = r1;
4484
}
4485
#endif
4486

    
4487
void helper_hlt(void)
4488
{
4489
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4490
    env->hflags |= HF_HALTED_MASK;
4491
    env->exception_index = EXCP_HLT;
4492
    cpu_loop_exit();
4493
}
4494

    
4495
void helper_monitor(target_ulong ptr)
4496
{
4497
    if ((uint32_t)ECX != 0)
4498
        raise_exception(EXCP0D_GPF);
4499
    /* XXX: store address ? */
4500
}
4501

    
4502
void helper_mwait(void)
4503
{
4504
    if ((uint32_t)ECX != 0)
4505
        raise_exception(EXCP0D_GPF);
4506
    /* XXX: not complete but not completely erroneous */
4507
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4508
        /* more than one CPU: do not sleep because another CPU may
4509
           wake this one */
4510
    } else {
4511
        helper_hlt();
4512
    }
4513
}
4514

    
4515
void helper_debug(void)
4516
{
4517
    env->exception_index = EXCP_DEBUG;
4518
    cpu_loop_exit();
4519
}
4520

    
4521
void helper_raise_interrupt(int intno, int next_eip_addend)
4522
{
4523
    raise_interrupt(intno, 1, 0, next_eip_addend);
4524
}
4525

    
4526
void helper_raise_exception(int exception_index)
4527
{
4528
    raise_exception(exception_index);
4529
}
4530

    
4531
void helper_cli(void)
4532
{
4533
    env->eflags &= ~IF_MASK;
4534
}
4535

    
4536
void helper_sti(void)
4537
{
4538
    env->eflags |= IF_MASK;
4539
}
4540

    
4541
#if 0
4542
/* vm86plus instructions */
4543
void helper_cli_vm(void)
4544
{
4545
    env->eflags &= ~VIF_MASK;
4546
}
4547

4548
void helper_sti_vm(void)
4549
{
4550
    env->eflags |= VIF_MASK;
4551
    if (env->eflags & VIP_MASK) {
4552
        raise_exception(EXCP0D_GPF);
4553
    }
4554
}
4555
#endif
4556

    
4557
void helper_set_inhibit_irq(void)
4558
{
4559
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4560
}
4561

    
4562
void helper_reset_inhibit_irq(void)
4563
{
4564
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4565
}
4566

    
4567
void helper_boundw(target_ulong a0, int v)
4568
{
4569
    int low, high;
4570
    low = ldsw(a0);
4571
    high = ldsw(a0 + 2);
4572
    v = (int16_t)v;
4573
    if (v < low || v > high) {
4574
        raise_exception(EXCP05_BOUND);
4575
    }
4576
    FORCE_RET();
4577
}
4578

    
4579
void helper_boundl(target_ulong a0, int v)
4580
{
4581
    int low, high;
4582
    low = ldl(a0);
4583
    high = ldl(a0 + 4);
4584
    if (v < low || v > high) {
4585
        raise_exception(EXCP05_BOUND);
4586
    }
4587
    FORCE_RET();
4588
}
4589

    
4590
static float approx_rsqrt(float a)
4591
{
4592
    return 1.0 / sqrt(a);
4593
}
4594

    
4595
static float approx_rcp(float a)
4596
{
4597
    return 1.0 / a;
4598
}
4599

    
4600
#if !defined(CONFIG_USER_ONLY)
4601

    
4602
#define MMUSUFFIX _mmu
4603
#ifdef __s390__
4604
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4605
#else
4606
# define GETPC() (__builtin_return_address(0))
4607
#endif
4608

    
4609
#define SHIFT 0
4610
#include "softmmu_template.h"
4611

    
4612
#define SHIFT 1
4613
#include "softmmu_template.h"
4614

    
4615
#define SHIFT 2
4616
#include "softmmu_template.h"
4617

    
4618
#define SHIFT 3
4619
#include "softmmu_template.h"
4620

    
4621
#endif
4622

    
4623
/* try to fill the TLB and return an exception if error. If retaddr is
4624
   NULL, it means that the function was called in C code (i.e. not
4625
   from generated code or from helper.c) */
4626
/* XXX: fix it to restore all registers */
4627
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4628
{
4629
    TranslationBlock *tb;
4630
    int ret;
4631
    unsigned long pc;
4632
    CPUX86State *saved_env;
4633

    
4634
    /* XXX: hack to restore env in all cases, even if not called from
4635
       generated code */
4636
    saved_env = env;
4637
    env = cpu_single_env;
4638

    
4639
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4640
    if (ret) {
4641
        if (retaddr) {
4642
            /* now we have a real cpu fault */
4643
            pc = (unsigned long)retaddr;
4644
            tb = tb_find_pc(pc);
4645
            if (tb) {
4646
                /* the PC is inside the translated code. It means that we have
4647
                   a virtual CPU fault */
4648
                cpu_restore_state(tb, env, pc, NULL);
4649
            }
4650
        }
4651
        if (retaddr)
4652
            raise_exception_err(env->exception_index, env->error_code);
4653
        else
4654
            raise_exception_err_norestore(env->exception_index, env->error_code);
4655
    }
4656
    env = saved_env;
4657
}
4658

    
4659

    
4660
/* Secure Virtual Machine helpers */
4661

    
4662
void helper_stgi(void)
4663
{
4664
    env->hflags |= HF_GIF_MASK;
4665
}
4666

    
4667
void helper_clgi(void)
4668
{
4669
    env->hflags &= ~HF_GIF_MASK;
4670
}
4671

    
4672
#if defined(CONFIG_USER_ONLY)
4673

    
4674
void helper_vmrun(void) 
4675
{ 
4676
}
4677
void helper_vmmcall(void) 
4678
{ 
4679
}
4680
void helper_vmload(void) 
4681
{ 
4682
}
4683
void helper_vmsave(void) 
4684
{ 
4685
}
4686
void helper_skinit(void) 
4687
{ 
4688
}
4689
void helper_invlpga(void) 
4690
{ 
4691
}
4692
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4693
{ 
4694
}
4695
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4696
{
4697
}
4698

    
4699
void helper_svm_check_io(uint32_t port, uint32_t param, 
4700
                         uint32_t next_eip_addend)
4701
{
4702
}
4703
#else
4704

    
4705
static inline uint32_t
4706
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4707
{
4708
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4709
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4710
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4711
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4712
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4713
}
4714

    
4715
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4716
{
4717
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4718
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4719
}
4720

    
4721
void helper_vmrun(void)
4722
{
4723
    target_ulong addr;
4724
    uint32_t event_inj;
4725
    uint32_t int_ctl;
4726

    
4727
    addr = EAX;
4728
    if (loglevel & CPU_LOG_TB_IN_ASM)
4729
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4730

    
4731
    env->vm_vmcb = addr;
4732

    
4733
    /* save the current CPU state in the hsave page */
4734
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4735
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4736

    
4737
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4738
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4739

    
4740
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4741
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4742
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4743
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4744
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4745
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4746
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4747

    
4748
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4749
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4750

    
4751
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4752
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4753
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4754
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4755

    
4756
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4757
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4758
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4759

    
4760
    /* load the interception bitmaps so we do not need to access the
4761
       vmcb in svm mode */
4762
    /* We shift all the intercept bits so we can OR them with the TB
4763
       flags later on */
4764
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4765
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4766
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4767
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4768
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4769
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4770

    
4771
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4772
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4773

    
4774
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4775
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4776

    
4777
    /* clear exit_info_2 so we behave like the real hardware */
4778
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4779

    
4780
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4781
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4782
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4783
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4784
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4785
    if (int_ctl & V_INTR_MASKING_MASK) {
4786
        env->cr[8] = int_ctl & V_TPR_MASK;
4787
        cpu_set_apic_tpr(env, env->cr[8]);
4788
        if (env->eflags & IF_MASK)
4789
            env->hflags |= HF_HIF_MASK;
4790
    }
4791

    
4792
#ifdef TARGET_X86_64
4793
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4794
    env->hflags &= ~HF_LMA_MASK;
4795
    if (env->efer & MSR_EFER_LMA)
4796
       env->hflags |= HF_LMA_MASK;
4797
#endif
4798
    env->eflags = 0;
4799
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4800
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4801
    CC_OP = CC_OP_EFLAGS;
4802
    CC_DST = 0xffffffff;
4803

    
4804
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4805
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4806
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4807
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4808

    
4809
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4810
    env->eip = EIP;
4811
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4812
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4813
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4814
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4815
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4816

    
4817
    /* FIXME: guest state consistency checks */
4818

    
4819
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4820
        case TLB_CONTROL_DO_NOTHING:
4821
            break;
4822
        case TLB_CONTROL_FLUSH_ALL_ASID:
4823
            /* FIXME: this is not 100% correct but should work for now */
4824
            tlb_flush(env, 1);
4825
        break;
4826
    }
4827

    
4828
    helper_stgi();
4829

    
4830
    /* maybe we need to inject an event */
4831
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4832
    if (event_inj & SVM_EVTINJ_VALID) {
4833
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4834
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4835
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4836
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4837

    
4838
        if (loglevel & CPU_LOG_TB_IN_ASM)
4839
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4840
        /* FIXME: need to implement valid_err */
4841
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4842
        case SVM_EVTINJ_TYPE_INTR:
4843
                env->exception_index = vector;
4844
                env->error_code = event_inj_err;
4845
                env->exception_is_int = 0;
4846
                env->exception_next_eip = -1;
4847
                if (loglevel & CPU_LOG_TB_IN_ASM)
4848
                    fprintf(logfile, "INTR");
4849
                break;
4850
        case SVM_EVTINJ_TYPE_NMI:
4851
                env->exception_index = vector;
4852
                env->error_code = event_inj_err;
4853
                env->exception_is_int = 0;
4854
                env->exception_next_eip = EIP;
4855
                if (loglevel & CPU_LOG_TB_IN_ASM)
4856
                    fprintf(logfile, "NMI");
4857
                break;
4858
        case SVM_EVTINJ_TYPE_EXEPT:
4859
                env->exception_index = vector;
4860
                env->error_code = event_inj_err;
4861
                env->exception_is_int = 0;
4862
                env->exception_next_eip = -1;
4863
                if (loglevel & CPU_LOG_TB_IN_ASM)
4864
                    fprintf(logfile, "EXEPT");
4865
                break;
4866
        case SVM_EVTINJ_TYPE_SOFT:
4867
                env->exception_index = vector;
4868
                env->error_code = event_inj_err;
4869
                env->exception_is_int = 1;
4870
                env->exception_next_eip = EIP;
4871
                if (loglevel & CPU_LOG_TB_IN_ASM)
4872
                    fprintf(logfile, "SOFT");
4873
                break;
4874
        }
4875
        if (loglevel & CPU_LOG_TB_IN_ASM)
4876
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4877
    }
4878
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4879
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4880
    }
4881

    
4882
    cpu_loop_exit();
4883
}
4884

    
4885
void helper_vmmcall(void)
4886
{
4887
    if (loglevel & CPU_LOG_TB_IN_ASM)
4888
        fprintf(logfile,"vmmcall!\n");
4889
}
4890

    
4891
void helper_vmload(void)
4892
{
4893
    target_ulong addr;
4894
    addr = EAX;
4895
    if (loglevel & CPU_LOG_TB_IN_ASM)
4896
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4897
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4898
                env->segs[R_FS].base);
4899

    
4900
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4901
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4902
    SVM_LOAD_SEG2(addr, tr, tr);
4903
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4904

    
4905
#ifdef TARGET_X86_64
4906
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4907
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4908
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4909
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4910
#endif
4911
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4912
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4913
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4914
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4915
}
4916

    
4917
void helper_vmsave(void)
4918
{
4919
    target_ulong addr;
4920
    addr = EAX;
4921
    if (loglevel & CPU_LOG_TB_IN_ASM)
4922
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4923
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4924
                env->segs[R_FS].base);
4925

    
4926
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4927
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4928
    SVM_SAVE_SEG(addr, tr, tr);
4929
    SVM_SAVE_SEG(addr, ldt, ldtr);
4930

    
4931
#ifdef TARGET_X86_64
4932
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4933
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4934
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4935
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4936
#endif
4937
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4938
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4939
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4940
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4941
}
4942

    
4943
void helper_skinit(void)
4944
{
4945
    if (loglevel & CPU_LOG_TB_IN_ASM)
4946
        fprintf(logfile,"skinit!\n");
4947
}
4948

    
4949
void helper_invlpga(void)
4950
{
4951
    tlb_flush(env, 0);
4952
}
4953

    
4954
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4955
{
4956
    switch(type) {
4957
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4958
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4959
            helper_vmexit(type, param);
4960
        }
4961
        break;
4962
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4963
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4964
            helper_vmexit(type, param);
4965
        }
4966
        break;
4967
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4968
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4969
            helper_vmexit(type, param);
4970
        }
4971
        break;
4972
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4973
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4974
            helper_vmexit(type, param);
4975
        }
4976
        break;
4977
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4978
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4979
            helper_vmexit(type, param);
4980
        }
4981
        break;
4982
    case SVM_EXIT_IOIO:
4983
        break;
4984

    
4985
    case SVM_EXIT_MSR:
4986
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4987
            /* FIXME: this should be read in at vmrun (faster this way?) */
4988
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4989
            uint32_t t0, t1;
4990
            switch((uint32_t)ECX) {
4991
            case 0 ... 0x1fff:
4992
                t0 = (ECX * 2) % 8;
4993
                t1 = ECX / 8;
4994
                break;
4995
            case 0xc0000000 ... 0xc0001fff:
4996
                t0 = (8192 + ECX - 0xc0000000) * 2;
4997
                t1 = (t0 / 8);
4998
                t0 %= 8;
4999
                break;
5000
            case 0xc0010000 ... 0xc0011fff:
5001
                t0 = (16384 + ECX - 0xc0010000) * 2;
5002
                t1 = (t0 / 8);
5003
                t0 %= 8;
5004
                break;
5005
            default:
5006
                helper_vmexit(type, param);
5007
                t0 = 0;
5008
                t1 = 0;
5009
                break;
5010
            }
5011
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5012
                helper_vmexit(type, param);
5013
        }
5014
        break;
5015
    default:
5016
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5017
            helper_vmexit(type, param);
5018
        }
5019
        break;
5020
    }
5021
}
5022

    
5023
void helper_svm_check_io(uint32_t port, uint32_t param, 
5024
                         uint32_t next_eip_addend)
5025
{
5026
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5027
        /* FIXME: this should be read in at vmrun (faster this way?) */
5028
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5029
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5030
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5031
            /* next EIP */
5032
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5033
                     env->eip + next_eip_addend);
5034
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5035
        }
5036
    }
5037
}
5038

    
5039
/* Note: currently only 32 bits of exit_code are used */
5040
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5041
{
5042
    uint32_t int_ctl;
5043

    
5044
    if (loglevel & CPU_LOG_TB_IN_ASM)
5045
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5046
                exit_code, exit_info_1,
5047
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5048
                EIP);
5049

    
5050
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5051
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5052
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5053
    } else {
5054
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5055
    }
5056

    
5057
    /* Save the VM state in the vmcb */
5058
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5059
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5060
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5061
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5062

    
5063
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5064
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5065

    
5066
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5067
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5068

    
5069
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5070
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5071
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5072
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5073
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5074

    
5075
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5076
        int_ctl &= ~V_TPR_MASK;
5077
        int_ctl |= env->cr[8] & V_TPR_MASK;
5078
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5079
    }
5080

    
5081
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5082
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5083
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5084
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5085
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5086
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5087
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5088

    
5089
    /* Reload the host state from vm_hsave */
5090
    env->hflags &= ~HF_HIF_MASK;
5091
    env->intercept = 0;
5092
    env->intercept_exceptions = 0;
5093
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5094

    
5095
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5096
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5097

    
5098
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5099
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5100

    
5101
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5102
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5103
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5104
    if (int_ctl & V_INTR_MASKING_MASK) {
5105
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5106
        cpu_set_apic_tpr(env, env->cr[8]);
5107
    }
5108
    /* we need to set the efer after the crs so the hidden flags get set properly */
5109
#ifdef TARGET_X86_64
5110
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5111
    env->hflags &= ~HF_LMA_MASK;
5112
    if (env->efer & MSR_EFER_LMA)
5113
       env->hflags |= HF_LMA_MASK;
5114
#endif
5115

    
5116
    env->eflags = 0;
5117
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5118
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5119
    CC_OP = CC_OP_EFLAGS;
5120

    
5121
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5122
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5123
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5124
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5125

    
5126
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5127
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5128
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5129

    
5130
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5131
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5132

    
5133
    /* other setups */
5134
    cpu_x86_set_cpl(env, 0);
5135
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5136
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5137

    
5138
    helper_clgi();
5139
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5140

    
5141
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5142

    
5143
    /* Clears the TSC_OFFSET inside the processor. */
5144

    
5145
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5146
       from the page table indicated the host's CR3. If the PDPEs contain
5147
       illegal state, the processor causes a shutdown. */
5148

    
5149
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5150
    env->cr[0] |= CR0_PE_MASK;
5151
    env->eflags &= ~VM_MASK;
5152

    
5153
    /* Disables all breakpoints in the host DR7 register. */
5154

    
5155
    /* Checks the reloaded host state for consistency. */
5156

    
5157
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5158
       host's code segment or non-canonical (in the case of long mode), a
5159
       #GP fault is delivered inside the host.) */
5160

    
5161
    /* remove any pending exception */
5162
    env->exception_index = -1;
5163
    env->error_code = 0;
5164
    env->old_exception = -1;
5165

    
5166
    cpu_loop_exit();
5167
}
5168

    
5169
#endif
5170

    
5171
/* MMX/SSE */
5172
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5173
void helper_enter_mmx(void)
5174
{
5175
    env->fpstt = 0;
5176
    *(uint32_t *)(env->fptags) = 0;
5177
    *(uint32_t *)(env->fptags + 4) = 0;
5178
}
5179

    
5180
void helper_emms(void)
5181
{
5182
    /* set to empty state */
5183
    *(uint32_t *)(env->fptags) = 0x01010101;
5184
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5185
}
5186

    
5187
/* XXX: suppress */
5188
void helper_movq(uint64_t *d, uint64_t *s)
5189
{
5190
    *d = *s;
5191
}
5192

    
5193
#define SHIFT 0
5194
#include "ops_sse.h"
5195

    
5196
#define SHIFT 1
5197
#include "ops_sse.h"
5198

    
5199
#define SHIFT 0
5200
#include "helper_template.h"
5201
#undef SHIFT
5202

    
5203
#define SHIFT 1
5204
#include "helper_template.h"
5205
#undef SHIFT
5206

    
5207
#define SHIFT 2
5208
#include "helper_template.h"
5209
#undef SHIFT
5210

    
5211
#ifdef TARGET_X86_64
5212

    
5213
#define SHIFT 3
5214
#include "helper_template.h"
5215
#undef SHIFT
5216

    
5217
#endif
5218

    
5219
/* bit operations */
5220
target_ulong helper_bsf(target_ulong t0)
5221
{
5222
    int count;
5223
    target_ulong res;
5224

    
5225
    res = t0;
5226
    count = 0;
5227
    while ((res & 1) == 0) {
5228
        count++;
5229
        res >>= 1;
5230
    }
5231
    return count;
5232
}
5233

    
5234
target_ulong helper_bsr(target_ulong t0)
5235
{
5236
    int count;
5237
    target_ulong res, mask;
5238
    
5239
    res = t0;
5240
    count = TARGET_LONG_BITS - 1;
5241
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5242
    while ((res & mask) == 0) {
5243
        count--;
5244
        res <<= 1;
5245
    }
5246
    return count;
5247
}
5248

    
5249

    
5250
static int compute_all_eflags(void)
5251
{
5252
    return CC_SRC;
5253
}
5254

    
5255
static int compute_c_eflags(void)
5256
{
5257
    return CC_SRC & CC_C;
5258
}
5259

    
5260
CCTable cc_table[CC_OP_NB] = {
5261
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5262

    
5263
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5264

    
5265
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5266
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5267
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5268

    
5269
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5270
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5271
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5272

    
5273
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5274
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5275
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5276

    
5277
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5278
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5279
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5280

    
5281
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5282
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5283
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5284

    
5285
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5286
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5287
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5288

    
5289
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5290
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5291
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5292

    
5293
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5294
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5295
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5296

    
5297
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5298
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5299
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5300

    
5301
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5302
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5303
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5304

    
5305
#ifdef TARGET_X86_64
5306
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5307

    
5308
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5309

    
5310
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5311

    
5312
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5313

    
5314
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5315

    
5316
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5317

    
5318
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5319

    
5320
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5321

    
5322
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5323

    
5324
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5325
#endif
5326
};
5327