Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ b6abf97d

History | View | Annotate | Download (145.4 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
/* return non zero if error */
112
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
113
                               int selector)
114
{
115
    SegmentCache *dt;
116
    int index;
117
    target_ulong ptr;
118

    
119
    if (selector & 0x4)
120
        dt = &env->ldt;
121
    else
122
        dt = &env->gdt;
123
    index = selector & ~7;
124
    if ((index + 7) > dt->limit)
125
        return -1;
126
    ptr = dt->base + index;
127
    *e1_ptr = ldl_kernel(ptr);
128
    *e2_ptr = ldl_kernel(ptr + 4);
129
    return 0;
130
}
131

    
132
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
133
{
134
    unsigned int limit;
135
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
136
    if (e2 & DESC_G_MASK)
137
        limit = (limit << 12) | 0xfff;
138
    return limit;
139
}
140

    
141
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142
{
143
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
144
}
145

    
146
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147
{
148
    sc->base = get_seg_base(e1, e2);
149
    sc->limit = get_seg_limit(e1, e2);
150
    sc->flags = e2;
151
}
152

    
153
/* init the segment cache in vm86 mode. */
154
static inline void load_seg_vm(int seg, int selector)
155
{
156
    selector &= 0xffff;
157
    cpu_x86_load_seg_cache(env, seg, selector,
158
                           (selector << 4), 0xffff, 0);
159
}
160

    
161
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
162
                                       uint32_t *esp_ptr, int dpl)
163
{
164
    int type, index, shift;
165

    
166
#if 0
167
    {
168
        int i;
169
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
170
        for(i=0;i<env->tr.limit;i++) {
171
            printf("%02x ", env->tr.base[i]);
172
            if ((i & 7) == 7) printf("\n");
173
        }
174
        printf("\n");
175
    }
176
#endif
177

    
178
    if (!(env->tr.flags & DESC_P_MASK))
179
        cpu_abort(env, "invalid tss");
180
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181
    if ((type & 7) != 1)
182
        cpu_abort(env, "invalid tss type");
183
    shift = type >> 3;
184
    index = (dpl * 4 + 2) << shift;
185
    if (index + (4 << shift) - 1 > env->tr.limit)
186
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187
    if (shift == 0) {
188
        *esp_ptr = lduw_kernel(env->tr.base + index);
189
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190
    } else {
191
        *esp_ptr = ldl_kernel(env->tr.base + index);
192
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
193
    }
194
}
195

    
196
/* XXX: merge with load_seg() */
197
static void tss_load_seg(int seg_reg, int selector)
198
{
199
    uint32_t e1, e2;
200
    int rpl, dpl, cpl;
201

    
202
    if ((selector & 0xfffc) != 0) {
203
        if (load_segment(&e1, &e2, selector) != 0)
204
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205
        if (!(e2 & DESC_S_MASK))
206
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207
        rpl = selector & 3;
208
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
209
        cpl = env->hflags & HF_CPL_MASK;
210
        if (seg_reg == R_CS) {
211
            if (!(e2 & DESC_CS_MASK))
212
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213
            /* XXX: is it correct ? */
214
            if (dpl != rpl)
215
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
            if ((e2 & DESC_C_MASK) && dpl > rpl)
217
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
        } else if (seg_reg == R_SS) {
219
            /* SS must be writable data */
220
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
            if (dpl != cpl || dpl != rpl)
223
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224
        } else {
225
            /* not readable code */
226
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
227
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228
            /* if data or non conforming code, checks the rights */
229
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
230
                if (dpl < cpl || dpl < rpl)
231
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
            }
233
        }
234
        if (!(e2 & DESC_P_MASK))
235
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
236
        cpu_x86_load_seg_cache(env, seg_reg, selector,
237
                       get_seg_base(e1, e2),
238
                       get_seg_limit(e1, e2),
239
                       e2);
240
    } else {
241
        if (seg_reg == R_SS || seg_reg == R_CS)
242
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
    }
244
}
245

    
246
#define SWITCH_TSS_JMP  0
247
#define SWITCH_TSS_IRET 1
248
#define SWITCH_TSS_CALL 2
249

    
250
/* XXX: restore CPU state in registers (PowerPC case) */
251
static void switch_tss(int tss_selector,
252
                       uint32_t e1, uint32_t e2, int source,
253
                       uint32_t next_eip)
254
{
255
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
256
    target_ulong tss_base;
257
    uint32_t new_regs[8], new_segs[6];
258
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
259
    uint32_t old_eflags, eflags_mask;
260
    SegmentCache *dt;
261
    int index;
262
    target_ulong ptr;
263

    
264
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265
#ifdef DEBUG_PCALL
266
    if (loglevel & CPU_LOG_PCALL)
267
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
268
#endif
269

    
270
    /* if task gate, we read the TSS segment and we load it */
271
    if (type == 5) {
272
        if (!(e2 & DESC_P_MASK))
273
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
274
        tss_selector = e1 >> 16;
275
        if (tss_selector & 4)
276
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
277
        if (load_segment(&e1, &e2, tss_selector) != 0)
278
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279
        if (e2 & DESC_S_MASK)
280
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
281
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282
        if ((type & 7) != 1)
283
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284
    }
285

    
286
    if (!(e2 & DESC_P_MASK))
287
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288

    
289
    if (type & 8)
290
        tss_limit_max = 103;
291
    else
292
        tss_limit_max = 43;
293
    tss_limit = get_seg_limit(e1, e2);
294
    tss_base = get_seg_base(e1, e2);
295
    if ((tss_selector & 4) != 0 ||
296
        tss_limit < tss_limit_max)
297
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299
    if (old_type & 8)
300
        old_tss_limit_max = 103;
301
    else
302
        old_tss_limit_max = 43;
303

    
304
    /* read all the registers from the new TSS */
305
    if (type & 8) {
306
        /* 32 bit */
307
        new_cr3 = ldl_kernel(tss_base + 0x1c);
308
        new_eip = ldl_kernel(tss_base + 0x20);
309
        new_eflags = ldl_kernel(tss_base + 0x24);
310
        for(i = 0; i < 8; i++)
311
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
312
        for(i = 0; i < 6; i++)
313
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
314
        new_ldt = lduw_kernel(tss_base + 0x60);
315
        new_trap = ldl_kernel(tss_base + 0x64);
316
    } else {
317
        /* 16 bit */
318
        new_cr3 = 0;
319
        new_eip = lduw_kernel(tss_base + 0x0e);
320
        new_eflags = lduw_kernel(tss_base + 0x10);
321
        for(i = 0; i < 8; i++)
322
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
323
        for(i = 0; i < 4; i++)
324
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
325
        new_ldt = lduw_kernel(tss_base + 0x2a);
326
        new_segs[R_FS] = 0;
327
        new_segs[R_GS] = 0;
328
        new_trap = 0;
329
    }
330

    
331
    /* NOTE: we must avoid memory exceptions during the task switch,
332
       so we make dummy accesses before */
333
    /* XXX: it can still fail in some cases, so a bigger hack is
334
       necessary to valid the TLB after having done the accesses */
335

    
336
    v1 = ldub_kernel(env->tr.base);
337
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
338
    stb_kernel(env->tr.base, v1);
339
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
340

    
341
    /* clear busy bit (it is restartable) */
342
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
343
        target_ulong ptr;
344
        uint32_t e2;
345
        ptr = env->gdt.base + (env->tr.selector & ~7);
346
        e2 = ldl_kernel(ptr + 4);
347
        e2 &= ~DESC_TSS_BUSY_MASK;
348
        stl_kernel(ptr + 4, e2);
349
    }
350
    old_eflags = compute_eflags();
351
    if (source == SWITCH_TSS_IRET)
352
        old_eflags &= ~NT_MASK;
353

    
354
    /* save the current state in the old TSS */
355
    if (type & 8) {
356
        /* 32 bit */
357
        stl_kernel(env->tr.base + 0x20, next_eip);
358
        stl_kernel(env->tr.base + 0x24, old_eflags);
359
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
360
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
361
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
362
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
363
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
364
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
365
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
366
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
367
        for(i = 0; i < 6; i++)
368
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
369
    } else {
370
        /* 16 bit */
371
        stw_kernel(env->tr.base + 0x0e, next_eip);
372
        stw_kernel(env->tr.base + 0x10, old_eflags);
373
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
374
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
375
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
376
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
377
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
378
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
379
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
380
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
381
        for(i = 0; i < 4; i++)
382
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
383
    }
384

    
385
    /* now if an exception occurs, it will occurs in the next task
386
       context */
387

    
388
    if (source == SWITCH_TSS_CALL) {
389
        stw_kernel(tss_base, env->tr.selector);
390
        new_eflags |= NT_MASK;
391
    }
392

    
393
    /* set busy bit */
394
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
395
        target_ulong ptr;
396
        uint32_t e2;
397
        ptr = env->gdt.base + (tss_selector & ~7);
398
        e2 = ldl_kernel(ptr + 4);
399
        e2 |= DESC_TSS_BUSY_MASK;
400
        stl_kernel(ptr + 4, e2);
401
    }
402

    
403
    /* set the new CPU state */
404
    /* from this point, any exception which occurs can give problems */
405
    env->cr[0] |= CR0_TS_MASK;
406
    env->hflags |= HF_TS_MASK;
407
    env->tr.selector = tss_selector;
408
    env->tr.base = tss_base;
409
    env->tr.limit = tss_limit;
410
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411

    
412
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
413
        cpu_x86_update_cr3(env, new_cr3);
414
    }
415

    
416
    /* load all registers without an exception, then reload them with
417
       possible exception */
418
    env->eip = new_eip;
419
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
420
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421
    if (!(type & 8))
422
        eflags_mask &= 0xffff;
423
    load_eflags(new_eflags, eflags_mask);
424
    /* XXX: what to do in 16 bit case ? */
425
    EAX = new_regs[0];
426
    ECX = new_regs[1];
427
    EDX = new_regs[2];
428
    EBX = new_regs[3];
429
    ESP = new_regs[4];
430
    EBP = new_regs[5];
431
    ESI = new_regs[6];
432
    EDI = new_regs[7];
433
    if (new_eflags & VM_MASK) {
434
        for(i = 0; i < 6; i++)
435
            load_seg_vm(i, new_segs[i]);
436
        /* in vm86, CPL is always 3 */
437
        cpu_x86_set_cpl(env, 3);
438
    } else {
439
        /* CPL is set the RPL of CS */
440
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441
        /* first just selectors as the rest may trigger exceptions */
442
        for(i = 0; i < 6; i++)
443
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
444
    }
445

    
446
    env->ldt.selector = new_ldt & ~4;
447
    env->ldt.base = 0;
448
    env->ldt.limit = 0;
449
    env->ldt.flags = 0;
450

    
451
    /* load the LDT */
452
    if (new_ldt & 4)
453
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454

    
455
    if ((new_ldt & 0xfffc) != 0) {
456
        dt = &env->gdt;
457
        index = new_ldt & ~7;
458
        if ((index + 7) > dt->limit)
459
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460
        ptr = dt->base + index;
461
        e1 = ldl_kernel(ptr);
462
        e2 = ldl_kernel(ptr + 4);
463
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        if (!(e2 & DESC_P_MASK))
466
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
468
    }
469

    
470
    /* load the segments */
471
    if (!(new_eflags & VM_MASK)) {
472
        tss_load_seg(R_CS, new_segs[R_CS]);
473
        tss_load_seg(R_SS, new_segs[R_SS]);
474
        tss_load_seg(R_ES, new_segs[R_ES]);
475
        tss_load_seg(R_DS, new_segs[R_DS]);
476
        tss_load_seg(R_FS, new_segs[R_FS]);
477
        tss_load_seg(R_GS, new_segs[R_GS]);
478
    }
479

    
480
    /* check that EIP is in the CS segment limits */
481
    if (new_eip > env->segs[R_CS].limit) {
482
        /* XXX: different exception if CALL ? */
483
        raise_exception_err(EXCP0D_GPF, 0);
484
    }
485
}
486

    
487
/* check if Port I/O is allowed in TSS */
488
static inline void check_io(int addr, int size)
489
{
490
    int io_offset, val, mask;
491

    
492
    /* TSS must be a valid 32 bit one */
493
    if (!(env->tr.flags & DESC_P_MASK) ||
494
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
495
        env->tr.limit < 103)
496
        goto fail;
497
    io_offset = lduw_kernel(env->tr.base + 0x66);
498
    io_offset += (addr >> 3);
499
    /* Note: the check needs two bytes */
500
    if ((io_offset + 1) > env->tr.limit)
501
        goto fail;
502
    val = lduw_kernel(env->tr.base + io_offset);
503
    val >>= (addr & 7);
504
    mask = (1 << size) - 1;
505
    /* all bits must be zero to allow the I/O */
506
    if ((val & mask) != 0) {
507
    fail:
508
        raise_exception_err(EXCP0D_GPF, 0);
509
    }
510
}
511

    
512
void helper_check_iob(uint32_t t0)
513
{
514
    check_io(t0, 1);
515
}
516

    
517
void helper_check_iow(uint32_t t0)
518
{
519
    check_io(t0, 2);
520
}
521

    
522
void helper_check_iol(uint32_t t0)
523
{
524
    check_io(t0, 4);
525
}
526

    
527
void helper_outb(uint32_t port, uint32_t data)
528
{
529
    cpu_outb(env, port, data & 0xff);
530
}
531

    
532
target_ulong helper_inb(uint32_t port)
533
{
534
    return cpu_inb(env, port);
535
}
536

    
537
void helper_outw(uint32_t port, uint32_t data)
538
{
539
    cpu_outw(env, port, data & 0xffff);
540
}
541

    
542
target_ulong helper_inw(uint32_t port)
543
{
544
    return cpu_inw(env, port);
545
}
546

    
547
void helper_outl(uint32_t port, uint32_t data)
548
{
549
    cpu_outl(env, port, data);
550
}
551

    
552
target_ulong helper_inl(uint32_t port)
553
{
554
    return cpu_inl(env, port);
555
}
556

    
557
static inline unsigned int get_sp_mask(unsigned int e2)
558
{
559
    if (e2 & DESC_B_MASK)
560
        return 0xffffffff;
561
    else
562
        return 0xffff;
563
}
564

    
565
#ifdef TARGET_X86_64
566
#define SET_ESP(val, sp_mask)\
567
do {\
568
    if ((sp_mask) == 0xffff)\
569
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
570
    else if ((sp_mask) == 0xffffffffLL)\
571
        ESP = (uint32_t)(val);\
572
    else\
573
        ESP = (val);\
574
} while (0)
575
#else
576
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
577
#endif
578

    
579
/* XXX: add a is_user flag to have proper security support */
580
#define PUSHW(ssp, sp, sp_mask, val)\
581
{\
582
    sp -= 2;\
583
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
584
}
585

    
586
#define PUSHL(ssp, sp, sp_mask, val)\
587
{\
588
    sp -= 4;\
589
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
590
}
591

    
592
#define POPW(ssp, sp, sp_mask, val)\
593
{\
594
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
595
    sp += 2;\
596
}
597

    
598
#define POPL(ssp, sp, sp_mask, val)\
599
{\
600
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
601
    sp += 4;\
602
}
603

    
604
/* protected mode interrupt */
605
static void do_interrupt_protected(int intno, int is_int, int error_code,
606
                                   unsigned int next_eip, int is_hw)
607
{
608
    SegmentCache *dt;
609
    target_ulong ptr, ssp;
610
    int type, dpl, selector, ss_dpl, cpl;
611
    int has_error_code, new_stack, shift;
612
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
613
    uint32_t old_eip, sp_mask;
614
    int svm_should_check = 1;
615

    
616
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
617
        next_eip = EIP;
618
        svm_should_check = 0;
619
    }
620

    
621
    if (svm_should_check
622
        && (INTERCEPTEDl(_exceptions, 1 << intno)
623
        && !is_int)) {
624
        raise_interrupt(intno, is_int, error_code, 0);
625
    }
626
    has_error_code = 0;
627
    if (!is_int && !is_hw) {
628
        switch(intno) {
629
        case 8:
630
        case 10:
631
        case 11:
632
        case 12:
633
        case 13:
634
        case 14:
635
        case 17:
636
            has_error_code = 1;
637
            break;
638
        }
639
    }
640
    if (is_int)
641
        old_eip = next_eip;
642
    else
643
        old_eip = env->eip;
644

    
645
    dt = &env->idt;
646
    if (intno * 8 + 7 > dt->limit)
647
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648
    ptr = dt->base + intno * 8;
649
    e1 = ldl_kernel(ptr);
650
    e2 = ldl_kernel(ptr + 4);
651
    /* check gate type */
652
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
653
    switch(type) {
654
    case 5: /* task gate */
655
        /* must do that check here to return the correct error code */
656
        if (!(e2 & DESC_P_MASK))
657
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
659
        if (has_error_code) {
660
            int type;
661
            uint32_t mask;
662
            /* push the error code */
663
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
664
            shift = type >> 3;
665
            if (env->segs[R_SS].flags & DESC_B_MASK)
666
                mask = 0xffffffff;
667
            else
668
                mask = 0xffff;
669
            esp = (ESP - (2 << shift)) & mask;
670
            ssp = env->segs[R_SS].base + esp;
671
            if (shift)
672
                stl_kernel(ssp, error_code);
673
            else
674
                stw_kernel(ssp, error_code);
675
            SET_ESP(esp, mask);
676
        }
677
        return;
678
    case 6: /* 286 interrupt gate */
679
    case 7: /* 286 trap gate */
680
    case 14: /* 386 interrupt gate */
681
    case 15: /* 386 trap gate */
682
        break;
683
    default:
684
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
685
        break;
686
    }
687
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688
    cpl = env->hflags & HF_CPL_MASK;
689
    /* check privledge if software int */
690
    if (is_int && dpl < cpl)
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
    /* check valid bit */
693
    if (!(e2 & DESC_P_MASK))
694
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
695
    selector = e1 >> 16;
696
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
697
    if ((selector & 0xfffc) == 0)
698
        raise_exception_err(EXCP0D_GPF, 0);
699

    
700
    if (load_segment(&e1, &e2, selector) != 0)
701
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
702
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
705
    if (dpl > cpl)
706
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
    if (!(e2 & DESC_P_MASK))
708
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
709
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
710
        /* to inner privilege */
711
        get_ss_esp_from_tss(&ss, &esp, dpl);
712
        if ((ss & 0xfffc) == 0)
713
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714
        if ((ss & 3) != dpl)
715
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
716
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
717
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
719
        if (ss_dpl != dpl)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if (!(ss_e2 & DESC_S_MASK) ||
722
            (ss_e2 & DESC_CS_MASK) ||
723
            !(ss_e2 & DESC_W_MASK))
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        if (!(ss_e2 & DESC_P_MASK))
726
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
727
        new_stack = 1;
728
        sp_mask = get_sp_mask(ss_e2);
729
        ssp = get_seg_base(ss_e1, ss_e2);
730
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
731
        /* to same privilege */
732
        if (env->eflags & VM_MASK)
733
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
        new_stack = 0;
735
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
736
        ssp = env->segs[R_SS].base;
737
        esp = ESP;
738
        dpl = cpl;
739
    } else {
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0; /* avoid warning */
742
        sp_mask = 0; /* avoid warning */
743
        ssp = 0; /* avoid warning */
744
        esp = 0; /* avoid warning */
745
    }
746

    
747
    shift = type >> 3;
748

    
749
#if 0
750
    /* XXX: check that enough room is available */
751
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
752
    if (env->eflags & VM_MASK)
753
        push_size += 8;
754
    push_size <<= shift;
755
#endif
756
    if (shift == 1) {
757
        if (new_stack) {
758
            if (env->eflags & VM_MASK) {
759
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
760
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
761
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
762
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
763
            }
764
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
765
            PUSHL(ssp, esp, sp_mask, ESP);
766
        }
767
        PUSHL(ssp, esp, sp_mask, compute_eflags());
768
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
769
        PUSHL(ssp, esp, sp_mask, old_eip);
770
        if (has_error_code) {
771
            PUSHL(ssp, esp, sp_mask, error_code);
772
        }
773
    } else {
774
        if (new_stack) {
775
            if (env->eflags & VM_MASK) {
776
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
777
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
778
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
779
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
780
            }
781
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
782
            PUSHW(ssp, esp, sp_mask, ESP);
783
        }
784
        PUSHW(ssp, esp, sp_mask, compute_eflags());
785
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
786
        PUSHW(ssp, esp, sp_mask, old_eip);
787
        if (has_error_code) {
788
            PUSHW(ssp, esp, sp_mask, error_code);
789
        }
790
    }
791

    
792
    if (new_stack) {
793
        if (env->eflags & VM_MASK) {
794
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798
        }
799
        ss = (ss & ~3) | dpl;
800
        cpu_x86_load_seg_cache(env, R_SS, ss,
801
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802
    }
803
    SET_ESP(esp, sp_mask);
804

    
805
    selector = (selector & ~3) | dpl;
806
    cpu_x86_load_seg_cache(env, R_CS, selector,
807
                   get_seg_base(e1, e2),
808
                   get_seg_limit(e1, e2),
809
                   e2);
810
    cpu_x86_set_cpl(env, dpl);
811
    env->eip = offset;
812

    
813
    /* interrupt gate clear IF mask */
814
    if ((type & 1) == 0) {
815
        env->eflags &= ~IF_MASK;
816
    }
817
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
818
}
819

    
820
#ifdef TARGET_X86_64
821

    
822
#define PUSHQ(sp, val)\
823
{\
824
    sp -= 8;\
825
    stq_kernel(sp, (val));\
826
}
827

    
828
#define POPQ(sp, val)\
829
{\
830
    val = ldq_kernel(sp);\
831
    sp += 8;\
832
}
833

    
834
static inline target_ulong get_rsp_from_tss(int level)
835
{
836
    int index;
837

    
838
#if 0
839
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840
           env->tr.base, env->tr.limit);
841
#endif
842

    
843
    if (!(env->tr.flags & DESC_P_MASK))
844
        cpu_abort(env, "invalid tss");
845
    index = 8 * level + 4;
846
    if ((index + 7) > env->tr.limit)
847
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
848
    return ldq_kernel(env->tr.base + index);
849
}
850

    
851
/* 64 bit interrupt */
852
static void do_interrupt64(int intno, int is_int, int error_code,
853
                           target_ulong next_eip, int is_hw)
854
{
855
    SegmentCache *dt;
856
    target_ulong ptr;
857
    int type, dpl, selector, cpl, ist;
858
    int has_error_code, new_stack;
859
    uint32_t e1, e2, e3, ss;
860
    target_ulong old_eip, esp, offset;
861
    int svm_should_check = 1;
862

    
863
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
864
        next_eip = EIP;
865
        svm_should_check = 0;
866
    }
867
    if (svm_should_check
868
        && INTERCEPTEDl(_exceptions, 1 << intno)
869
        && !is_int) {
870
        raise_interrupt(intno, is_int, error_code, 0);
871
    }
872
    has_error_code = 0;
873
    if (!is_int && !is_hw) {
874
        switch(intno) {
875
        case 8:
876
        case 10:
877
        case 11:
878
        case 12:
879
        case 13:
880
        case 14:
881
        case 17:
882
            has_error_code = 1;
883
            break;
884
        }
885
    }
886
    if (is_int)
887
        old_eip = next_eip;
888
    else
889
        old_eip = env->eip;
890

    
891
    dt = &env->idt;
892
    if (intno * 16 + 15 > dt->limit)
893
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
894
    ptr = dt->base + intno * 16;
895
    e1 = ldl_kernel(ptr);
896
    e2 = ldl_kernel(ptr + 4);
897
    e3 = ldl_kernel(ptr + 8);
898
    /* check gate type */
899
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
900
    switch(type) {
901
    case 14: /* 386 interrupt gate */
902
    case 15: /* 386 trap gate */
903
        break;
904
    default:
905
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
        break;
907
    }
908
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909
    cpl = env->hflags & HF_CPL_MASK;
910
    /* check privledge if software int */
911
    if (is_int && dpl < cpl)
912
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913
    /* check valid bit */
914
    if (!(e2 & DESC_P_MASK))
915
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
916
    selector = e1 >> 16;
917
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
918
    ist = e2 & 7;
919
    if ((selector & 0xfffc) == 0)
920
        raise_exception_err(EXCP0D_GPF, 0);
921

    
922
    if (load_segment(&e1, &e2, selector) != 0)
923
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
927
    if (dpl > cpl)
928
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929
    if (!(e2 & DESC_P_MASK))
930
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
931
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
934
        /* to inner privilege */
935
        if (ist != 0)
936
            esp = get_rsp_from_tss(ist + 3);
937
        else
938
            esp = get_rsp_from_tss(dpl);
939
        esp &= ~0xfLL; /* align stack */
940
        ss = 0;
941
        new_stack = 1;
942
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
943
        /* to same privilege */
944
        if (env->eflags & VM_MASK)
945
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
        new_stack = 0;
947
        if (ist != 0)
948
            esp = get_rsp_from_tss(ist + 3);
949
        else
950
            esp = ESP;
951
        esp &= ~0xfLL; /* align stack */
952
        dpl = cpl;
953
    } else {
954
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0; /* avoid warning */
956
        esp = 0; /* avoid warning */
957
    }
958

    
959
    PUSHQ(esp, env->segs[R_SS].selector);
960
    PUSHQ(esp, ESP);
961
    PUSHQ(esp, compute_eflags());
962
    PUSHQ(esp, env->segs[R_CS].selector);
963
    PUSHQ(esp, old_eip);
964
    if (has_error_code) {
965
        PUSHQ(esp, error_code);
966
    }
967

    
968
    if (new_stack) {
969
        ss = 0 | dpl;
970
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
971
    }
972
    ESP = esp;
973

    
974
    selector = (selector & ~3) | dpl;
975
    cpu_x86_load_seg_cache(env, R_CS, selector,
976
                   get_seg_base(e1, e2),
977
                   get_seg_limit(e1, e2),
978
                   e2);
979
    cpu_x86_set_cpl(env, dpl);
980
    env->eip = offset;
981

    
982
    /* interrupt gate clear IF mask */
983
    if ((type & 1) == 0) {
984
        env->eflags &= ~IF_MASK;
985
    }
986
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
987
}
988
#endif
989

    
990
#if defined(CONFIG_USER_ONLY)
991
void helper_syscall(int next_eip_addend)
992
{
993
    env->exception_index = EXCP_SYSCALL;
994
    env->exception_next_eip = env->eip + next_eip_addend;
995
    cpu_loop_exit();
996
}
997
#else
998
void helper_syscall(int next_eip_addend)
999
{
1000
    int selector;
1001

    
1002
    if (!(env->efer & MSR_EFER_SCE)) {
1003
        raise_exception_err(EXCP06_ILLOP, 0);
1004
    }
1005
    selector = (env->star >> 32) & 0xffff;
1006
#ifdef TARGET_X86_64
1007
    if (env->hflags & HF_LMA_MASK) {
1008
        int code64;
1009

    
1010
        ECX = env->eip + next_eip_addend;
1011
        env->regs[11] = compute_eflags();
1012

    
1013
        code64 = env->hflags & HF_CS64_MASK;
1014

    
1015
        cpu_x86_set_cpl(env, 0);
1016
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1017
                           0, 0xffffffff,
1018
                               DESC_G_MASK | DESC_P_MASK |
1019
                               DESC_S_MASK |
1020
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1021
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1022
                               0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_W_MASK | DESC_A_MASK);
1026
        env->eflags &= ~env->fmask;
1027
        load_eflags(env->eflags, 0);
1028
        if (code64)
1029
            env->eip = env->lstar;
1030
        else
1031
            env->eip = env->cstar;
1032
    } else
1033
#endif
1034
    {
1035
        ECX = (uint32_t)(env->eip + next_eip_addend);
1036

    
1037
        cpu_x86_set_cpl(env, 0);
1038
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1039
                           0, 0xffffffff,
1040
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1041
                               DESC_S_MASK |
1042
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1043
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1044
                               0, 0xffffffff,
1045
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
                               DESC_S_MASK |
1047
                               DESC_W_MASK | DESC_A_MASK);
1048
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1049
        env->eip = (uint32_t)env->star;
1050
    }
1051
}
1052
#endif
1053

    
1054
void helper_sysret(int dflag)
1055
{
1056
    int cpl, selector;
1057

    
1058
    if (!(env->efer & MSR_EFER_SCE)) {
1059
        raise_exception_err(EXCP06_ILLOP, 0);
1060
    }
1061
    cpl = env->hflags & HF_CPL_MASK;
1062
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1063
        raise_exception_err(EXCP0D_GPF, 0);
1064
    }
1065
    selector = (env->star >> 48) & 0xffff;
1066
#ifdef TARGET_X86_64
1067
    if (env->hflags & HF_LMA_MASK) {
1068
        if (dflag == 2) {
1069
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1070
                                   0, 0xffffffff,
1071
                                   DESC_G_MASK | DESC_P_MASK |
1072
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1074
                                   DESC_L_MASK);
1075
            env->eip = ECX;
1076
        } else {
1077
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082
            env->eip = (uint32_t)ECX;
1083
        }
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1090
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1091
        cpu_x86_set_cpl(env, 3);
1092
    } else
1093
#endif
1094
    {
1095
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1096
                               0, 0xffffffff,
1097
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1098
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1100
        env->eip = (uint32_t)ECX;
1101
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_W_MASK | DESC_A_MASK);
1106
        env->eflags |= IF_MASK;
1107
        cpu_x86_set_cpl(env, 3);
1108
    }
1109
#ifdef USE_KQEMU
1110
    if (kqemu_is_ok(env)) {
1111
        if (env->hflags & HF_LMA_MASK)
1112
            CC_OP = CC_OP_EFLAGS;
1113
        env->exception_index = -1;
1114
        cpu_loop_exit();
1115
    }
1116
#endif
1117
}
1118

    
1119
/* real mode interrupt */
1120
static void do_interrupt_real(int intno, int is_int, int error_code,
1121
                              unsigned int next_eip)
1122
{
1123
    SegmentCache *dt;
1124
    target_ulong ptr, ssp;
1125
    int selector;
1126
    uint32_t offset, esp;
1127
    uint32_t old_cs, old_eip;
1128
    int svm_should_check = 1;
1129

    
1130
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1131
        next_eip = EIP;
1132
        svm_should_check = 0;
1133
    }
1134
    if (svm_should_check
1135
        && INTERCEPTEDl(_exceptions, 1 << intno)
1136
        && !is_int) {
1137
        raise_interrupt(intno, is_int, error_code, 0);
1138
    }
1139
    /* real mode (simpler !) */
1140
    dt = &env->idt;
1141
    if (intno * 4 + 3 > dt->limit)
1142
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1143
    ptr = dt->base + intno * 4;
1144
    offset = lduw_kernel(ptr);
1145
    selector = lduw_kernel(ptr + 2);
1146
    esp = ESP;
1147
    ssp = env->segs[R_SS].base;
1148
    if (is_int)
1149
        old_eip = next_eip;
1150
    else
1151
        old_eip = env->eip;
1152
    old_cs = env->segs[R_CS].selector;
1153
    /* XXX: use SS segment size ? */
1154
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1155
    PUSHW(ssp, esp, 0xffff, old_cs);
1156
    PUSHW(ssp, esp, 0xffff, old_eip);
1157

    
1158
    /* update processor state */
1159
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1160
    env->eip = offset;
1161
    env->segs[R_CS].selector = selector;
1162
    env->segs[R_CS].base = (selector << 4);
1163
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1164
}
1165

    
1166
/* fake user mode interrupt */
1167
void do_interrupt_user(int intno, int is_int, int error_code,
1168
                       target_ulong next_eip)
1169
{
1170
    SegmentCache *dt;
1171
    target_ulong ptr;
1172
    int dpl, cpl, shift;
1173
    uint32_t e2;
1174

    
1175
    dt = &env->idt;
1176
    if (env->hflags & HF_LMA_MASK) {
1177
        shift = 4;
1178
    } else {
1179
        shift = 3;
1180
    }
1181
    ptr = dt->base + (intno << shift);
1182
    e2 = ldl_kernel(ptr + 4);
1183

    
1184
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1185
    cpl = env->hflags & HF_CPL_MASK;
1186
    /* check privledge if software int */
1187
    if (is_int && dpl < cpl)
1188
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1189

    
1190
    /* Since we emulate only user space, we cannot do more than
1191
       exiting the emulation with the suitable exception and error
1192
       code */
1193
    if (is_int)
1194
        EIP = next_eip;
1195
}
1196

    
1197
/*
1198
 * Begin execution of an interruption. is_int is TRUE if coming from
1199
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1200
 * instruction. It is only relevant if is_int is TRUE.
1201
 */
1202
void do_interrupt(int intno, int is_int, int error_code,
1203
                  target_ulong next_eip, int is_hw)
1204
{
1205
    if (loglevel & CPU_LOG_INT) {
1206
        if ((env->cr[0] & CR0_PE_MASK)) {
1207
            static int count;
1208
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1209
                    count, intno, error_code, is_int,
1210
                    env->hflags & HF_CPL_MASK,
1211
                    env->segs[R_CS].selector, EIP,
1212
                    (int)env->segs[R_CS].base + EIP,
1213
                    env->segs[R_SS].selector, ESP);
1214
            if (intno == 0x0e) {
1215
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1216
            } else {
1217
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1218
            }
1219
            fprintf(logfile, "\n");
1220
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1221
#if 0
1222
            {
1223
                int i;
1224
                uint8_t *ptr;
1225
                fprintf(logfile, "       code=");
1226
                ptr = env->segs[R_CS].base + env->eip;
1227
                for(i = 0; i < 16; i++) {
1228
                    fprintf(logfile, " %02x", ldub(ptr + i));
1229
                }
1230
                fprintf(logfile, "\n");
1231
            }
1232
#endif
1233
            count++;
1234
        }
1235
    }
1236
    if (env->cr[0] & CR0_PE_MASK) {
1237
#if TARGET_X86_64
1238
        if (env->hflags & HF_LMA_MASK) {
1239
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1240
        } else
1241
#endif
1242
        {
1243
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1244
        }
1245
    } else {
1246
        do_interrupt_real(intno, is_int, error_code, next_eip);
1247
    }
1248
}
1249

    
1250
/*
1251
 * Check nested exceptions and change to double or triple fault if
1252
 * needed. It should only be called, if this is not an interrupt.
1253
 * Returns the new exception number.
1254
 */
1255
static int check_exception(int intno, int *error_code)
1256
{
1257
    int first_contributory = env->old_exception == 0 ||
1258
                              (env->old_exception >= 10 &&
1259
                               env->old_exception <= 13);
1260
    int second_contributory = intno == 0 ||
1261
                               (intno >= 10 && intno <= 13);
1262

    
1263
    if (loglevel & CPU_LOG_INT)
1264
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1265
                env->old_exception, intno);
1266

    
1267
    if (env->old_exception == EXCP08_DBLE)
1268
        cpu_abort(env, "triple fault");
1269

    
1270
    if ((first_contributory && second_contributory)
1271
        || (env->old_exception == EXCP0E_PAGE &&
1272
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1273
        intno = EXCP08_DBLE;
1274
        *error_code = 0;
1275
    }
1276

    
1277
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1278
        (intno == EXCP08_DBLE))
1279
        env->old_exception = intno;
1280

    
1281
    return intno;
1282
}
1283

    
1284
/*
1285
 * Signal an interruption. It is executed in the main CPU loop.
1286
 * is_int is TRUE if coming from the int instruction. next_eip is the
1287
 * EIP value AFTER the interrupt instruction. It is only relevant if
1288
 * is_int is TRUE.
1289
 */
1290
void raise_interrupt(int intno, int is_int, int error_code,
1291
                     int next_eip_addend)
1292
{
1293
    if (!is_int) {
1294
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1295
        intno = check_exception(intno, &error_code);
1296
    }
1297

    
1298
    env->exception_index = intno;
1299
    env->error_code = error_code;
1300
    env->exception_is_int = is_int;
1301
    env->exception_next_eip = env->eip + next_eip_addend;
1302
    cpu_loop_exit();
1303
}
1304

    
1305
/* same as raise_exception_err, but do not restore global registers */
1306
static void raise_exception_err_norestore(int exception_index, int error_code)
1307
{
1308
    exception_index = check_exception(exception_index, &error_code);
1309

    
1310
    env->exception_index = exception_index;
1311
    env->error_code = error_code;
1312
    env->exception_is_int = 0;
1313
    env->exception_next_eip = 0;
1314
    longjmp(env->jmp_env, 1);
1315
}
1316

    
1317
/* shortcuts to generate exceptions */
1318

    
1319
void (raise_exception_err)(int exception_index, int error_code)
1320
{
1321
    raise_interrupt(exception_index, 0, error_code, 0);
1322
}
1323

    
1324
void raise_exception(int exception_index)
1325
{
1326
    raise_interrupt(exception_index, 0, 0, 0);
1327
}
1328

    
1329
/* SMM support */
1330

    
1331
#if defined(CONFIG_USER_ONLY)
1332

    
1333
void do_smm_enter(void)
1334
{
1335
}
1336

    
1337
void helper_rsm(void)
1338
{
1339
}
1340

    
1341
#else
1342

    
1343
#ifdef TARGET_X86_64
1344
#define SMM_REVISION_ID 0x00020064
1345
#else
1346
#define SMM_REVISION_ID 0x00020000
1347
#endif
1348

    
1349
void do_smm_enter(void)
1350
{
1351
    target_ulong sm_state;
1352
    SegmentCache *dt;
1353
    int i, offset;
1354

    
1355
    if (loglevel & CPU_LOG_INT) {
1356
        fprintf(logfile, "SMM: enter\n");
1357
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1358
    }
1359

    
1360
    env->hflags |= HF_SMM_MASK;
1361
    cpu_smm_update(env);
1362

    
1363
    sm_state = env->smbase + 0x8000;
1364

    
1365
#ifdef TARGET_X86_64
1366
    for(i = 0; i < 6; i++) {
1367
        dt = &env->segs[i];
1368
        offset = 0x7e00 + i * 16;
1369
        stw_phys(sm_state + offset, dt->selector);
1370
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371
        stl_phys(sm_state + offset + 4, dt->limit);
1372
        stq_phys(sm_state + offset + 8, dt->base);
1373
    }
1374

    
1375
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1376
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1377

    
1378
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1380
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1382

    
1383
    stq_phys(sm_state + 0x7e88, env->idt.base);
1384
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1385

    
1386
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1387
    stq_phys(sm_state + 0x7e98, env->tr.base);
1388
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1389
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1390

    
1391
    stq_phys(sm_state + 0x7ed0, env->efer);
1392

    
1393
    stq_phys(sm_state + 0x7ff8, EAX);
1394
    stq_phys(sm_state + 0x7ff0, ECX);
1395
    stq_phys(sm_state + 0x7fe8, EDX);
1396
    stq_phys(sm_state + 0x7fe0, EBX);
1397
    stq_phys(sm_state + 0x7fd8, ESP);
1398
    stq_phys(sm_state + 0x7fd0, EBP);
1399
    stq_phys(sm_state + 0x7fc8, ESI);
1400
    stq_phys(sm_state + 0x7fc0, EDI);
1401
    for(i = 8; i < 16; i++)
1402
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403
    stq_phys(sm_state + 0x7f78, env->eip);
1404
    stl_phys(sm_state + 0x7f70, compute_eflags());
1405
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1406
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1407

    
1408
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1409
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1410
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1411

    
1412
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413
    stl_phys(sm_state + 0x7f00, env->smbase);
1414
#else
1415
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1418
    stl_phys(sm_state + 0x7ff0, env->eip);
1419
    stl_phys(sm_state + 0x7fec, EDI);
1420
    stl_phys(sm_state + 0x7fe8, ESI);
1421
    stl_phys(sm_state + 0x7fe4, EBP);
1422
    stl_phys(sm_state + 0x7fe0, ESP);
1423
    stl_phys(sm_state + 0x7fdc, EBX);
1424
    stl_phys(sm_state + 0x7fd8, EDX);
1425
    stl_phys(sm_state + 0x7fd4, ECX);
1426
    stl_phys(sm_state + 0x7fd0, EAX);
1427
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1429

    
1430
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431
    stl_phys(sm_state + 0x7f64, env->tr.base);
1432
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1433
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1434

    
1435
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1437
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1439

    
1440
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1441
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1442

    
1443
    stl_phys(sm_state + 0x7f58, env->idt.base);
1444
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1445

    
1446
    for(i = 0; i < 6; i++) {
1447
        dt = &env->segs[i];
1448
        if (i < 3)
1449
            offset = 0x7f84 + i * 12;
1450
        else
1451
            offset = 0x7f2c + (i - 3) * 12;
1452
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453
        stl_phys(sm_state + offset + 8, dt->base);
1454
        stl_phys(sm_state + offset + 4, dt->limit);
1455
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1456
    }
1457
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1458

    
1459
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460
    stl_phys(sm_state + 0x7ef8, env->smbase);
1461
#endif
1462
    /* init SMM cpu state */
1463

    
1464
#ifdef TARGET_X86_64
1465
    env->efer = 0;
1466
    env->hflags &= ~HF_LMA_MASK;
1467
#endif
1468
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1469
    env->eip = 0x00008000;
1470
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1471
                           0xffffffff, 0);
1472
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1473
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1474
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1475
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1476
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1477

    
1478
    cpu_x86_update_cr0(env,
1479
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1480
    cpu_x86_update_cr4(env, 0);
1481
    env->dr[7] = 0x00000400;
1482
    CC_OP = CC_OP_EFLAGS;
1483
}
1484

    
1485
void helper_rsm(void)
1486
{
1487
    target_ulong sm_state;
1488
    int i, offset;
1489
    uint32_t val;
1490

    
1491
    sm_state = env->smbase + 0x8000;
1492
#ifdef TARGET_X86_64
1493
    env->efer = ldq_phys(sm_state + 0x7ed0);
1494
    if (env->efer & MSR_EFER_LMA)
1495
        env->hflags |= HF_LMA_MASK;
1496
    else
1497
        env->hflags &= ~HF_LMA_MASK;
1498

    
1499
    for(i = 0; i < 6; i++) {
1500
        offset = 0x7e00 + i * 16;
1501
        cpu_x86_load_seg_cache(env, i,
1502
                               lduw_phys(sm_state + offset),
1503
                               ldq_phys(sm_state + offset + 8),
1504
                               ldl_phys(sm_state + offset + 4),
1505
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1506
    }
1507

    
1508
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1509
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1510

    
1511
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1512
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1513
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1514
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1515

    
1516
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1517
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1518

    
1519
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1520
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1521
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1522
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1523

    
1524
    EAX = ldq_phys(sm_state + 0x7ff8);
1525
    ECX = ldq_phys(sm_state + 0x7ff0);
1526
    EDX = ldq_phys(sm_state + 0x7fe8);
1527
    EBX = ldq_phys(sm_state + 0x7fe0);
1528
    ESP = ldq_phys(sm_state + 0x7fd8);
1529
    EBP = ldq_phys(sm_state + 0x7fd0);
1530
    ESI = ldq_phys(sm_state + 0x7fc8);
1531
    EDI = ldq_phys(sm_state + 0x7fc0);
1532
    for(i = 8; i < 16; i++)
1533
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1534
    env->eip = ldq_phys(sm_state + 0x7f78);
1535
    load_eflags(ldl_phys(sm_state + 0x7f70),
1536
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1538
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1539

    
1540
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1541
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1542
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1543

    
1544
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1545
    if (val & 0x20000) {
1546
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1547
    }
1548
#else
1549
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1550
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1551
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1552
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1553
    env->eip = ldl_phys(sm_state + 0x7ff0);
1554
    EDI = ldl_phys(sm_state + 0x7fec);
1555
    ESI = ldl_phys(sm_state + 0x7fe8);
1556
    EBP = ldl_phys(sm_state + 0x7fe4);
1557
    ESP = ldl_phys(sm_state + 0x7fe0);
1558
    EBX = ldl_phys(sm_state + 0x7fdc);
1559
    EDX = ldl_phys(sm_state + 0x7fd8);
1560
    ECX = ldl_phys(sm_state + 0x7fd4);
1561
    EAX = ldl_phys(sm_state + 0x7fd0);
1562
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1563
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1564

    
1565
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1566
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1567
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1568
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1569

    
1570
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1571
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1572
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1573
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1574

    
1575
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1576
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1577

    
1578
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1579
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1580

    
1581
    for(i = 0; i < 6; i++) {
1582
        if (i < 3)
1583
            offset = 0x7f84 + i * 12;
1584
        else
1585
            offset = 0x7f2c + (i - 3) * 12;
1586
        cpu_x86_load_seg_cache(env, i,
1587
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1588
                               ldl_phys(sm_state + offset + 8),
1589
                               ldl_phys(sm_state + offset + 4),
1590
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1591
    }
1592
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1593

    
1594
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1595
    if (val & 0x20000) {
1596
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1597
    }
1598
#endif
1599
    CC_OP = CC_OP_EFLAGS;
1600
    env->hflags &= ~HF_SMM_MASK;
1601
    cpu_smm_update(env);
1602

    
1603
    if (loglevel & CPU_LOG_INT) {
1604
        fprintf(logfile, "SMM: after RSM\n");
1605
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1606
    }
1607
}
1608

    
1609
#endif /* !CONFIG_USER_ONLY */
1610

    
1611

    
1612
#ifdef BUGGY_GCC_DIV64
1613
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1614
   call it from another function */
1615
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1616
{
1617
    *q_ptr = num / den;
1618
    return num % den;
1619
}
1620

    
1621
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1622
{
1623
    *q_ptr = num / den;
1624
    return num % den;
1625
}
1626
#endif
1627

    
1628
/* division, flags are undefined */
1629

    
1630
void helper_divb_AL(target_ulong t0)
1631
{
1632
    unsigned int num, den, q, r;
1633

    
1634
    num = (EAX & 0xffff);
1635
    den = (t0 & 0xff);
1636
    if (den == 0) {
1637
        raise_exception(EXCP00_DIVZ);
1638
    }
1639
    q = (num / den);
1640
    if (q > 0xff)
1641
        raise_exception(EXCP00_DIVZ);
1642
    q &= 0xff;
1643
    r = (num % den) & 0xff;
1644
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1645
}
1646

    
1647
void helper_idivb_AL(target_ulong t0)
1648
{
1649
    int num, den, q, r;
1650

    
1651
    num = (int16_t)EAX;
1652
    den = (int8_t)t0;
1653
    if (den == 0) {
1654
        raise_exception(EXCP00_DIVZ);
1655
    }
1656
    q = (num / den);
1657
    if (q != (int8_t)q)
1658
        raise_exception(EXCP00_DIVZ);
1659
    q &= 0xff;
1660
    r = (num % den) & 0xff;
1661
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1662
}
1663

    
1664
void helper_divw_AX(target_ulong t0)
1665
{
1666
    unsigned int num, den, q, r;
1667

    
1668
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1669
    den = (t0 & 0xffff);
1670
    if (den == 0) {
1671
        raise_exception(EXCP00_DIVZ);
1672
    }
1673
    q = (num / den);
1674
    if (q > 0xffff)
1675
        raise_exception(EXCP00_DIVZ);
1676
    q &= 0xffff;
1677
    r = (num % den) & 0xffff;
1678
    EAX = (EAX & ~0xffff) | q;
1679
    EDX = (EDX & ~0xffff) | r;
1680
}
1681

    
1682
void helper_idivw_AX(target_ulong t0)
1683
{
1684
    int num, den, q, r;
1685

    
1686
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1687
    den = (int16_t)t0;
1688
    if (den == 0) {
1689
        raise_exception(EXCP00_DIVZ);
1690
    }
1691
    q = (num / den);
1692
    if (q != (int16_t)q)
1693
        raise_exception(EXCP00_DIVZ);
1694
    q &= 0xffff;
1695
    r = (num % den) & 0xffff;
1696
    EAX = (EAX & ~0xffff) | q;
1697
    EDX = (EDX & ~0xffff) | r;
1698
}
1699

    
1700
void helper_divl_EAX(target_ulong t0)
1701
{
1702
    unsigned int den, r;
1703
    uint64_t num, q;
1704

    
1705
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1706
    den = t0;
1707
    if (den == 0) {
1708
        raise_exception(EXCP00_DIVZ);
1709
    }
1710
#ifdef BUGGY_GCC_DIV64
1711
    r = div32(&q, num, den);
1712
#else
1713
    q = (num / den);
1714
    r = (num % den);
1715
#endif
1716
    if (q > 0xffffffff)
1717
        raise_exception(EXCP00_DIVZ);
1718
    EAX = (uint32_t)q;
1719
    EDX = (uint32_t)r;
1720
}
1721

    
1722
void helper_idivl_EAX(target_ulong t0)
1723
{
1724
    int den, r;
1725
    int64_t num, q;
1726

    
1727
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1728
    den = t0;
1729
    if (den == 0) {
1730
        raise_exception(EXCP00_DIVZ);
1731
    }
1732
#ifdef BUGGY_GCC_DIV64
1733
    r = idiv32(&q, num, den);
1734
#else
1735
    q = (num / den);
1736
    r = (num % den);
1737
#endif
1738
    if (q != (int32_t)q)
1739
        raise_exception(EXCP00_DIVZ);
1740
    EAX = (uint32_t)q;
1741
    EDX = (uint32_t)r;
1742
}
1743

    
1744
/* bcd */
1745

    
1746
/* XXX: exception */
1747
void helper_aam(int base)
1748
{
1749
    int al, ah;
1750
    al = EAX & 0xff;
1751
    ah = al / base;
1752
    al = al % base;
1753
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1754
    CC_DST = al;
1755
}
1756

    
1757
void helper_aad(int base)
1758
{
1759
    int al, ah;
1760
    al = EAX & 0xff;
1761
    ah = (EAX >> 8) & 0xff;
1762
    al = ((ah * base) + al) & 0xff;
1763
    EAX = (EAX & ~0xffff) | al;
1764
    CC_DST = al;
1765
}
1766

    
1767
void helper_aaa(void)
1768
{
1769
    int icarry;
1770
    int al, ah, af;
1771
    int eflags;
1772

    
1773
    eflags = cc_table[CC_OP].compute_all();
1774
    af = eflags & CC_A;
1775
    al = EAX & 0xff;
1776
    ah = (EAX >> 8) & 0xff;
1777

    
1778
    icarry = (al > 0xf9);
1779
    if (((al & 0x0f) > 9 ) || af) {
1780
        al = (al + 6) & 0x0f;
1781
        ah = (ah + 1 + icarry) & 0xff;
1782
        eflags |= CC_C | CC_A;
1783
    } else {
1784
        eflags &= ~(CC_C | CC_A);
1785
        al &= 0x0f;
1786
    }
1787
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1788
    CC_SRC = eflags;
1789
    FORCE_RET();
1790
}
1791

    
1792
void helper_aas(void)
1793
{
1794
    int icarry;
1795
    int al, ah, af;
1796
    int eflags;
1797

    
1798
    eflags = cc_table[CC_OP].compute_all();
1799
    af = eflags & CC_A;
1800
    al = EAX & 0xff;
1801
    ah = (EAX >> 8) & 0xff;
1802

    
1803
    icarry = (al < 6);
1804
    if (((al & 0x0f) > 9 ) || af) {
1805
        al = (al - 6) & 0x0f;
1806
        ah = (ah - 1 - icarry) & 0xff;
1807
        eflags |= CC_C | CC_A;
1808
    } else {
1809
        eflags &= ~(CC_C | CC_A);
1810
        al &= 0x0f;
1811
    }
1812
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1813
    CC_SRC = eflags;
1814
    FORCE_RET();
1815
}
1816

    
1817
void helper_daa(void)
1818
{
1819
    int al, af, cf;
1820
    int eflags;
1821

    
1822
    eflags = cc_table[CC_OP].compute_all();
1823
    cf = eflags & CC_C;
1824
    af = eflags & CC_A;
1825
    al = EAX & 0xff;
1826

    
1827
    eflags = 0;
1828
    if (((al & 0x0f) > 9 ) || af) {
1829
        al = (al + 6) & 0xff;
1830
        eflags |= CC_A;
1831
    }
1832
    if ((al > 0x9f) || cf) {
1833
        al = (al + 0x60) & 0xff;
1834
        eflags |= CC_C;
1835
    }
1836
    EAX = (EAX & ~0xff) | al;
1837
    /* well, speed is not an issue here, so we compute the flags by hand */
1838
    eflags |= (al == 0) << 6; /* zf */
1839
    eflags |= parity_table[al]; /* pf */
1840
    eflags |= (al & 0x80); /* sf */
1841
    CC_SRC = eflags;
1842
    FORCE_RET();
1843
}
1844

    
1845
void helper_das(void)
1846
{
1847
    int al, al1, af, cf;
1848
    int eflags;
1849

    
1850
    eflags = cc_table[CC_OP].compute_all();
1851
    cf = eflags & CC_C;
1852
    af = eflags & CC_A;
1853
    al = EAX & 0xff;
1854

    
1855
    eflags = 0;
1856
    al1 = al;
1857
    if (((al & 0x0f) > 9 ) || af) {
1858
        eflags |= CC_A;
1859
        if (al < 6 || cf)
1860
            eflags |= CC_C;
1861
        al = (al - 6) & 0xff;
1862
    }
1863
    if ((al1 > 0x99) || cf) {
1864
        al = (al - 0x60) & 0xff;
1865
        eflags |= CC_C;
1866
    }
1867
    EAX = (EAX & ~0xff) | al;
1868
    /* well, speed is not an issue here, so we compute the flags by hand */
1869
    eflags |= (al == 0) << 6; /* zf */
1870
    eflags |= parity_table[al]; /* pf */
1871
    eflags |= (al & 0x80); /* sf */
1872
    CC_SRC = eflags;
1873
    FORCE_RET();
1874
}
1875

    
1876
void helper_cmpxchg8b(target_ulong a0)
1877
{
1878
    uint64_t d;
1879
    int eflags;
1880

    
1881
    eflags = cc_table[CC_OP].compute_all();
1882
    d = ldq(a0);
1883
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1884
        stq(a0, ((uint64_t)ECX << 32) | EBX);
1885
        eflags |= CC_Z;
1886
    } else {
1887
        EDX = (uint32_t)(d >> 32);
1888
        EAX = (uint32_t)d;
1889
        eflags &= ~CC_Z;
1890
    }
1891
    CC_SRC = eflags;
1892
}
1893

    
1894
void helper_single_step(void)
1895
{
1896
    env->dr[6] |= 0x4000;
1897
    raise_exception(EXCP01_SSTP);
1898
}
1899

    
1900
void helper_cpuid(void)
1901
{
1902
    uint32_t index;
1903
    index = (uint32_t)EAX;
1904

    
1905
    /* test if maximum index reached */
1906
    if (index & 0x80000000) {
1907
        if (index > env->cpuid_xlevel)
1908
            index = env->cpuid_level;
1909
    } else {
1910
        if (index > env->cpuid_level)
1911
            index = env->cpuid_level;
1912
    }
1913

    
1914
    switch(index) {
1915
    case 0:
1916
        EAX = env->cpuid_level;
1917
        EBX = env->cpuid_vendor1;
1918
        EDX = env->cpuid_vendor2;
1919
        ECX = env->cpuid_vendor3;
1920
        break;
1921
    case 1:
1922
        EAX = env->cpuid_version;
1923
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1924
        ECX = env->cpuid_ext_features;
1925
        EDX = env->cpuid_features;
1926
        break;
1927
    case 2:
1928
        /* cache info: needed for Pentium Pro compatibility */
1929
        EAX = 1;
1930
        EBX = 0;
1931
        ECX = 0;
1932
        EDX = 0x2c307d;
1933
        break;
1934
    case 0x80000000:
1935
        EAX = env->cpuid_xlevel;
1936
        EBX = env->cpuid_vendor1;
1937
        EDX = env->cpuid_vendor2;
1938
        ECX = env->cpuid_vendor3;
1939
        break;
1940
    case 0x80000001:
1941
        EAX = env->cpuid_features;
1942
        EBX = 0;
1943
        ECX = env->cpuid_ext3_features;
1944
        EDX = env->cpuid_ext2_features;
1945
        break;
1946
    case 0x80000002:
1947
    case 0x80000003:
1948
    case 0x80000004:
1949
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1950
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1951
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1952
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1953
        break;
1954
    case 0x80000005:
1955
        /* cache info (L1 cache) */
1956
        EAX = 0x01ff01ff;
1957
        EBX = 0x01ff01ff;
1958
        ECX = 0x40020140;
1959
        EDX = 0x40020140;
1960
        break;
1961
    case 0x80000006:
1962
        /* cache info (L2 cache) */
1963
        EAX = 0;
1964
        EBX = 0x42004200;
1965
        ECX = 0x02008140;
1966
        EDX = 0;
1967
        break;
1968
    case 0x80000008:
1969
        /* virtual & phys address size in low 2 bytes. */
1970
/* XXX: This value must match the one used in the MMU code. */ 
1971
#if defined(TARGET_X86_64)
1972
#  if defined(USE_KQEMU)
1973
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1974
#  else
1975
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1976
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1977
#  endif
1978
#else
1979
# if defined(USE_KQEMU)
1980
        EAX = 0x00000020;        /* 32 bits physical */
1981
#  else
1982
        EAX = 0x00000024;        /* 36 bits physical */
1983
#  endif
1984
#endif
1985
        EBX = 0;
1986
        ECX = 0;
1987
        EDX = 0;
1988
        break;
1989
    case 0x8000000A:
1990
        EAX = 0x00000001;
1991
        EBX = 0;
1992
        ECX = 0;
1993
        EDX = 0;
1994
        break;
1995
    default:
1996
        /* reserved values: zero */
1997
        EAX = 0;
1998
        EBX = 0;
1999
        ECX = 0;
2000
        EDX = 0;
2001
        break;
2002
    }
2003
}
2004

    
2005
void helper_enter_level(int level, int data32, target_ulong t1)
2006
{
2007
    target_ulong ssp;
2008
    uint32_t esp_mask, esp, ebp;
2009

    
2010
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2011
    ssp = env->segs[R_SS].base;
2012
    ebp = EBP;
2013
    esp = ESP;
2014
    if (data32) {
2015
        /* 32 bit */
2016
        esp -= 4;
2017
        while (--level) {
2018
            esp -= 4;
2019
            ebp -= 4;
2020
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2021
        }
2022
        esp -= 4;
2023
        stl(ssp + (esp & esp_mask), t1);
2024
    } else {
2025
        /* 16 bit */
2026
        esp -= 2;
2027
        while (--level) {
2028
            esp -= 2;
2029
            ebp -= 2;
2030
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2031
        }
2032
        esp -= 2;
2033
        stw(ssp + (esp & esp_mask), t1);
2034
    }
2035
}
2036

    
2037
#ifdef TARGET_X86_64
2038
void helper_enter64_level(int level, int data64, target_ulong t1)
2039
{
2040
    target_ulong esp, ebp;
2041
    ebp = EBP;
2042
    esp = ESP;
2043

    
2044
    if (data64) {
2045
        /* 64 bit */
2046
        esp -= 8;
2047
        while (--level) {
2048
            esp -= 8;
2049
            ebp -= 8;
2050
            stq(esp, ldq(ebp));
2051
        }
2052
        esp -= 8;
2053
        stq(esp, t1);
2054
    } else {
2055
        /* 16 bit */
2056
        esp -= 2;
2057
        while (--level) {
2058
            esp -= 2;
2059
            ebp -= 2;
2060
            stw(esp, lduw(ebp));
2061
        }
2062
        esp -= 2;
2063
        stw(esp, t1);
2064
    }
2065
}
2066
#endif
2067

    
2068
void helper_lldt(int selector)
2069
{
2070
    SegmentCache *dt;
2071
    uint32_t e1, e2;
2072
    int index, entry_limit;
2073
    target_ulong ptr;
2074

    
2075
    selector &= 0xffff;
2076
    if ((selector & 0xfffc) == 0) {
2077
        /* XXX: NULL selector case: invalid LDT */
2078
        env->ldt.base = 0;
2079
        env->ldt.limit = 0;
2080
    } else {
2081
        if (selector & 0x4)
2082
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083
        dt = &env->gdt;
2084
        index = selector & ~7;
2085
#ifdef TARGET_X86_64
2086
        if (env->hflags & HF_LMA_MASK)
2087
            entry_limit = 15;
2088
        else
2089
#endif
2090
            entry_limit = 7;
2091
        if ((index + entry_limit) > dt->limit)
2092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093
        ptr = dt->base + index;
2094
        e1 = ldl_kernel(ptr);
2095
        e2 = ldl_kernel(ptr + 4);
2096
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2097
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098
        if (!(e2 & DESC_P_MASK))
2099
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2100
#ifdef TARGET_X86_64
2101
        if (env->hflags & HF_LMA_MASK) {
2102
            uint32_t e3;
2103
            e3 = ldl_kernel(ptr + 8);
2104
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2105
            env->ldt.base |= (target_ulong)e3 << 32;
2106
        } else
2107
#endif
2108
        {
2109
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2110
        }
2111
    }
2112
    env->ldt.selector = selector;
2113
}
2114

    
2115
void helper_ltr(int selector)
2116
{
2117
    SegmentCache *dt;
2118
    uint32_t e1, e2;
2119
    int index, type, entry_limit;
2120
    target_ulong ptr;
2121

    
2122
    selector &= 0xffff;
2123
    if ((selector & 0xfffc) == 0) {
2124
        /* NULL selector case: invalid TR */
2125
        env->tr.base = 0;
2126
        env->tr.limit = 0;
2127
        env->tr.flags = 0;
2128
    } else {
2129
        if (selector & 0x4)
2130
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131
        dt = &env->gdt;
2132
        index = selector & ~7;
2133
#ifdef TARGET_X86_64
2134
        if (env->hflags & HF_LMA_MASK)
2135
            entry_limit = 15;
2136
        else
2137
#endif
2138
            entry_limit = 7;
2139
        if ((index + entry_limit) > dt->limit)
2140
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2141
        ptr = dt->base + index;
2142
        e1 = ldl_kernel(ptr);
2143
        e2 = ldl_kernel(ptr + 4);
2144
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2145
        if ((e2 & DESC_S_MASK) ||
2146
            (type != 1 && type != 9))
2147
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148
        if (!(e2 & DESC_P_MASK))
2149
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2150
#ifdef TARGET_X86_64
2151
        if (env->hflags & HF_LMA_MASK) {
2152
            uint32_t e3, e4;
2153
            e3 = ldl_kernel(ptr + 8);
2154
            e4 = ldl_kernel(ptr + 12);
2155
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2156
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2158
            env->tr.base |= (target_ulong)e3 << 32;
2159
        } else
2160
#endif
2161
        {
2162
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2163
        }
2164
        e2 |= DESC_TSS_BUSY_MASK;
2165
        stl_kernel(ptr + 4, e2);
2166
    }
2167
    env->tr.selector = selector;
2168
}
2169

    
2170
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2171
void helper_load_seg(int seg_reg, int selector)
2172
{
2173
    uint32_t e1, e2;
2174
    int cpl, dpl, rpl;
2175
    SegmentCache *dt;
2176
    int index;
2177
    target_ulong ptr;
2178

    
2179
    selector &= 0xffff;
2180
    cpl = env->hflags & HF_CPL_MASK;
2181
    if ((selector & 0xfffc) == 0) {
2182
        /* null selector case */
2183
        if (seg_reg == R_SS
2184
#ifdef TARGET_X86_64
2185
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2186
#endif
2187
            )
2188
            raise_exception_err(EXCP0D_GPF, 0);
2189
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2190
    } else {
2191

    
2192
        if (selector & 0x4)
2193
            dt = &env->ldt;
2194
        else
2195
            dt = &env->gdt;
2196
        index = selector & ~7;
2197
        if ((index + 7) > dt->limit)
2198
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199
        ptr = dt->base + index;
2200
        e1 = ldl_kernel(ptr);
2201
        e2 = ldl_kernel(ptr + 4);
2202

    
2203
        if (!(e2 & DESC_S_MASK))
2204
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205
        rpl = selector & 3;
2206
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2207
        if (seg_reg == R_SS) {
2208
            /* must be writable segment */
2209
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2210
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2211
            if (rpl != cpl || dpl != cpl)
2212
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2213
        } else {
2214
            /* must be readable segment */
2215
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2216
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2217

    
2218
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2219
                /* if not conforming code, test rights */
2220
                if (dpl < cpl || dpl < rpl)
2221
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2222
            }
2223
        }
2224

    
2225
        if (!(e2 & DESC_P_MASK)) {
2226
            if (seg_reg == R_SS)
2227
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2228
            else
2229
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2230
        }
2231

    
2232
        /* set the access bit if not already set */
2233
        if (!(e2 & DESC_A_MASK)) {
2234
            e2 |= DESC_A_MASK;
2235
            stl_kernel(ptr + 4, e2);
2236
        }
2237

    
2238
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2239
                       get_seg_base(e1, e2),
2240
                       get_seg_limit(e1, e2),
2241
                       e2);
2242
#if 0
2243
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2244
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2245
#endif
2246
    }
2247
}
2248

    
2249
/* protected mode jump */
2250
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2251
                           int next_eip_addend)
2252
{
2253
    int gate_cs, type;
2254
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2255
    target_ulong next_eip;
2256

    
2257
    if ((new_cs & 0xfffc) == 0)
2258
        raise_exception_err(EXCP0D_GPF, 0);
2259
    if (load_segment(&e1, &e2, new_cs) != 0)
2260
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2261
    cpl = env->hflags & HF_CPL_MASK;
2262
    if (e2 & DESC_S_MASK) {
2263
        if (!(e2 & DESC_CS_MASK))
2264
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2266
        if (e2 & DESC_C_MASK) {
2267
            /* conforming code segment */
2268
            if (dpl > cpl)
2269
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2270
        } else {
2271
            /* non conforming code segment */
2272
            rpl = new_cs & 3;
2273
            if (rpl > cpl)
2274
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2275
            if (dpl != cpl)
2276
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277
        }
2278
        if (!(e2 & DESC_P_MASK))
2279
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2280
        limit = get_seg_limit(e1, e2);
2281
        if (new_eip > limit &&
2282
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2283
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2285
                       get_seg_base(e1, e2), limit, e2);
2286
        EIP = new_eip;
2287
    } else {
2288
        /* jump to call or task gate */
2289
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2290
        rpl = new_cs & 3;
2291
        cpl = env->hflags & HF_CPL_MASK;
2292
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2293
        switch(type) {
2294
        case 1: /* 286 TSS */
2295
        case 9: /* 386 TSS */
2296
        case 5: /* task gate */
2297
            if (dpl < cpl || dpl < rpl)
2298
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2299
            next_eip = env->eip + next_eip_addend;
2300
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2301
            CC_OP = CC_OP_EFLAGS;
2302
            break;
2303
        case 4: /* 286 call gate */
2304
        case 12: /* 386 call gate */
2305
            if ((dpl < cpl) || (dpl < rpl))
2306
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2307
            if (!(e2 & DESC_P_MASK))
2308
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2309
            gate_cs = e1 >> 16;
2310
            new_eip = (e1 & 0xffff);
2311
            if (type == 12)
2312
                new_eip |= (e2 & 0xffff0000);
2313
            if (load_segment(&e1, &e2, gate_cs) != 0)
2314
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2315
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316
            /* must be code segment */
2317
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2318
                 (DESC_S_MASK | DESC_CS_MASK)))
2319
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2320
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2321
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2322
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2323
            if (!(e2 & DESC_P_MASK))
2324
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2325
            limit = get_seg_limit(e1, e2);
2326
            if (new_eip > limit)
2327
                raise_exception_err(EXCP0D_GPF, 0);
2328
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2329
                                   get_seg_base(e1, e2), limit, e2);
2330
            EIP = new_eip;
2331
            break;
2332
        default:
2333
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
            break;
2335
        }
2336
    }
2337
}
2338

    
2339
/* real mode call */
2340
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2341
                       int shift, int next_eip)
2342
{
2343
    int new_eip;
2344
    uint32_t esp, esp_mask;
2345
    target_ulong ssp;
2346

    
2347
    new_eip = new_eip1;
2348
    esp = ESP;
2349
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2350
    ssp = env->segs[R_SS].base;
2351
    if (shift) {
2352
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2353
        PUSHL(ssp, esp, esp_mask, next_eip);
2354
    } else {
2355
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2356
        PUSHW(ssp, esp, esp_mask, next_eip);
2357
    }
2358

    
2359
    SET_ESP(esp, esp_mask);
2360
    env->eip = new_eip;
2361
    env->segs[R_CS].selector = new_cs;
2362
    env->segs[R_CS].base = (new_cs << 4);
2363
}
2364

    
2365
/* protected mode call */
2366
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2367
                            int shift, int next_eip_addend)
2368
{
2369
    int new_stack, i;
2370
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2371
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2372
    uint32_t val, limit, old_sp_mask;
2373
    target_ulong ssp, old_ssp, next_eip;
2374

    
2375
    next_eip = env->eip + next_eip_addend;
2376
#ifdef DEBUG_PCALL
2377
    if (loglevel & CPU_LOG_PCALL) {
2378
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2379
                new_cs, (uint32_t)new_eip, shift);
2380
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2381
    }
2382
#endif
2383
    if ((new_cs & 0xfffc) == 0)
2384
        raise_exception_err(EXCP0D_GPF, 0);
2385
    if (load_segment(&e1, &e2, new_cs) != 0)
2386
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387
    cpl = env->hflags & HF_CPL_MASK;
2388
#ifdef DEBUG_PCALL
2389
    if (loglevel & CPU_LOG_PCALL) {
2390
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2391
    }
2392
#endif
2393
    if (e2 & DESC_S_MASK) {
2394
        if (!(e2 & DESC_CS_MASK))
2395
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2397
        if (e2 & DESC_C_MASK) {
2398
            /* conforming code segment */
2399
            if (dpl > cpl)
2400
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2401
        } else {
2402
            /* non conforming code segment */
2403
            rpl = new_cs & 3;
2404
            if (rpl > cpl)
2405
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406
            if (dpl != cpl)
2407
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408
        }
2409
        if (!(e2 & DESC_P_MASK))
2410
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2411

    
2412
#ifdef TARGET_X86_64
2413
        /* XXX: check 16/32 bit cases in long mode */
2414
        if (shift == 2) {
2415
            target_ulong rsp;
2416
            /* 64 bit case */
2417
            rsp = ESP;
2418
            PUSHQ(rsp, env->segs[R_CS].selector);
2419
            PUSHQ(rsp, next_eip);
2420
            /* from this point, not restartable */
2421
            ESP = rsp;
2422
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2423
                                   get_seg_base(e1, e2),
2424
                                   get_seg_limit(e1, e2), e2);
2425
            EIP = new_eip;
2426
        } else
2427
#endif
2428
        {
2429
            sp = ESP;
2430
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2431
            ssp = env->segs[R_SS].base;
2432
            if (shift) {
2433
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2434
                PUSHL(ssp, sp, sp_mask, next_eip);
2435
            } else {
2436
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2437
                PUSHW(ssp, sp, sp_mask, next_eip);
2438
            }
2439

    
2440
            limit = get_seg_limit(e1, e2);
2441
            if (new_eip > limit)
2442
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443
            /* from this point, not restartable */
2444
            SET_ESP(sp, sp_mask);
2445
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2446
                                   get_seg_base(e1, e2), limit, e2);
2447
            EIP = new_eip;
2448
        }
2449
    } else {
2450
        /* check gate type */
2451
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2452
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2453
        rpl = new_cs & 3;
2454
        switch(type) {
2455
        case 1: /* available 286 TSS */
2456
        case 9: /* available 386 TSS */
2457
        case 5: /* task gate */
2458
            if (dpl < cpl || dpl < rpl)
2459
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2460
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2461
            CC_OP = CC_OP_EFLAGS;
2462
            return;
2463
        case 4: /* 286 call gate */
2464
        case 12: /* 386 call gate */
2465
            break;
2466
        default:
2467
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2468
            break;
2469
        }
2470
        shift = type >> 3;
2471

    
2472
        if (dpl < cpl || dpl < rpl)
2473
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2474
        /* check valid bit */
2475
        if (!(e2 & DESC_P_MASK))
2476
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2477
        selector = e1 >> 16;
2478
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2479
        param_count = e2 & 0x1f;
2480
        if ((selector & 0xfffc) == 0)
2481
            raise_exception_err(EXCP0D_GPF, 0);
2482

    
2483
        if (load_segment(&e1, &e2, selector) != 0)
2484
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2485
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2486
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2487
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488
        if (dpl > cpl)
2489
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2490
        if (!(e2 & DESC_P_MASK))
2491
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2492

    
2493
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2494
            /* to inner privilege */
2495
            get_ss_esp_from_tss(&ss, &sp, dpl);
2496
#ifdef DEBUG_PCALL
2497
            if (loglevel & CPU_LOG_PCALL)
2498
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2499
                        ss, sp, param_count, ESP);
2500
#endif
2501
            if ((ss & 0xfffc) == 0)
2502
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2503
            if ((ss & 3) != dpl)
2504
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2505
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2506
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2507
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2508
            if (ss_dpl != dpl)
2509
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2510
            if (!(ss_e2 & DESC_S_MASK) ||
2511
                (ss_e2 & DESC_CS_MASK) ||
2512
                !(ss_e2 & DESC_W_MASK))
2513
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2514
            if (!(ss_e2 & DESC_P_MASK))
2515
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2516

    
2517
            //            push_size = ((param_count * 2) + 8) << shift;
2518

    
2519
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2520
            old_ssp = env->segs[R_SS].base;
2521

    
2522
            sp_mask = get_sp_mask(ss_e2);
2523
            ssp = get_seg_base(ss_e1, ss_e2);
2524
            if (shift) {
2525
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2526
                PUSHL(ssp, sp, sp_mask, ESP);
2527
                for(i = param_count - 1; i >= 0; i--) {
2528
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2529
                    PUSHL(ssp, sp, sp_mask, val);
2530
                }
2531
            } else {
2532
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2533
                PUSHW(ssp, sp, sp_mask, ESP);
2534
                for(i = param_count - 1; i >= 0; i--) {
2535
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2536
                    PUSHW(ssp, sp, sp_mask, val);
2537
                }
2538
            }
2539
            new_stack = 1;
2540
        } else {
2541
            /* to same privilege */
2542
            sp = ESP;
2543
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2544
            ssp = env->segs[R_SS].base;
2545
            //            push_size = (4 << shift);
2546
            new_stack = 0;
2547
        }
2548

    
2549
        if (shift) {
2550
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2551
            PUSHL(ssp, sp, sp_mask, next_eip);
2552
        } else {
2553
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2554
            PUSHW(ssp, sp, sp_mask, next_eip);
2555
        }
2556

    
2557
        /* from this point, not restartable */
2558

    
2559
        if (new_stack) {
2560
            ss = (ss & ~3) | dpl;
2561
            cpu_x86_load_seg_cache(env, R_SS, ss,
2562
                                   ssp,
2563
                                   get_seg_limit(ss_e1, ss_e2),
2564
                                   ss_e2);
2565
        }
2566

    
2567
        selector = (selector & ~3) | dpl;
2568
        cpu_x86_load_seg_cache(env, R_CS, selector,
2569
                       get_seg_base(e1, e2),
2570
                       get_seg_limit(e1, e2),
2571
                       e2);
2572
        cpu_x86_set_cpl(env, dpl);
2573
        SET_ESP(sp, sp_mask);
2574
        EIP = offset;
2575
    }
2576
#ifdef USE_KQEMU
2577
    if (kqemu_is_ok(env)) {
2578
        env->exception_index = -1;
2579
        cpu_loop_exit();
2580
    }
2581
#endif
2582
}
2583

    
2584
/* real and vm86 mode iret */
2585
void helper_iret_real(int shift)
2586
{
2587
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2588
    target_ulong ssp;
2589
    int eflags_mask;
2590

    
2591
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2592
    sp = ESP;
2593
    ssp = env->segs[R_SS].base;
2594
    if (shift == 1) {
2595
        /* 32 bits */
2596
        POPL(ssp, sp, sp_mask, new_eip);
2597
        POPL(ssp, sp, sp_mask, new_cs);
2598
        new_cs &= 0xffff;
2599
        POPL(ssp, sp, sp_mask, new_eflags);
2600
    } else {
2601
        /* 16 bits */
2602
        POPW(ssp, sp, sp_mask, new_eip);
2603
        POPW(ssp, sp, sp_mask, new_cs);
2604
        POPW(ssp, sp, sp_mask, new_eflags);
2605
    }
2606
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2607
    load_seg_vm(R_CS, new_cs);
2608
    env->eip = new_eip;
2609
    if (env->eflags & VM_MASK)
2610
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2611
    else
2612
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2613
    if (shift == 0)
2614
        eflags_mask &= 0xffff;
2615
    load_eflags(new_eflags, eflags_mask);
2616
    env->hflags &= ~HF_NMI_MASK;
2617
}
2618

    
2619
static inline void validate_seg(int seg_reg, int cpl)
2620
{
2621
    int dpl;
2622
    uint32_t e2;
2623

    
2624
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2625
       they may still contain a valid base. I would be interested to
2626
       know how a real x86_64 CPU behaves */
2627
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2628
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2629
        return;
2630

    
2631
    e2 = env->segs[seg_reg].flags;
2632
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2633
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2634
        /* data or non conforming code segment */
2635
        if (dpl < cpl) {
2636
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2637
        }
2638
    }
2639
}
2640

    
2641
/* protected mode iret */
2642
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2643
{
2644
    uint32_t new_cs, new_eflags, new_ss;
2645
    uint32_t new_es, new_ds, new_fs, new_gs;
2646
    uint32_t e1, e2, ss_e1, ss_e2;
2647
    int cpl, dpl, rpl, eflags_mask, iopl;
2648
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2649

    
2650
#ifdef TARGET_X86_64
2651
    if (shift == 2)
2652
        sp_mask = -1;
2653
    else
2654
#endif
2655
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2656
    sp = ESP;
2657
    ssp = env->segs[R_SS].base;
2658
    new_eflags = 0; /* avoid warning */
2659
#ifdef TARGET_X86_64
2660
    if (shift == 2) {
2661
        POPQ(sp, new_eip);
2662
        POPQ(sp, new_cs);
2663
        new_cs &= 0xffff;
2664
        if (is_iret) {
2665
            POPQ(sp, new_eflags);
2666
        }
2667
    } else
2668
#endif
2669
    if (shift == 1) {
2670
        /* 32 bits */
2671
        POPL(ssp, sp, sp_mask, new_eip);
2672
        POPL(ssp, sp, sp_mask, new_cs);
2673
        new_cs &= 0xffff;
2674
        if (is_iret) {
2675
            POPL(ssp, sp, sp_mask, new_eflags);
2676
            if (new_eflags & VM_MASK)
2677
                goto return_to_vm86;
2678
        }
2679
    } else {
2680
        /* 16 bits */
2681
        POPW(ssp, sp, sp_mask, new_eip);
2682
        POPW(ssp, sp, sp_mask, new_cs);
2683
        if (is_iret)
2684
            POPW(ssp, sp, sp_mask, new_eflags);
2685
    }
2686
#ifdef DEBUG_PCALL
2687
    if (loglevel & CPU_LOG_PCALL) {
2688
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2689
                new_cs, new_eip, shift, addend);
2690
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2691
    }
2692
#endif
2693
    if ((new_cs & 0xfffc) == 0)
2694
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2695
    if (load_segment(&e1, &e2, new_cs) != 0)
2696
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697
    if (!(e2 & DESC_S_MASK) ||
2698
        !(e2 & DESC_CS_MASK))
2699
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2700
    cpl = env->hflags & HF_CPL_MASK;
2701
    rpl = new_cs & 3;
2702
    if (rpl < cpl)
2703
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2704
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2705
    if (e2 & DESC_C_MASK) {
2706
        if (dpl > rpl)
2707
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2708
    } else {
2709
        if (dpl != rpl)
2710
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2711
    }
2712
    if (!(e2 & DESC_P_MASK))
2713
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2714

    
2715
    sp += addend;
2716
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2717
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2718
        /* return to same priledge level */
2719
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2720
                       get_seg_base(e1, e2),
2721
                       get_seg_limit(e1, e2),
2722
                       e2);
2723
    } else {
2724
        /* return to different privilege level */
2725
#ifdef TARGET_X86_64
2726
        if (shift == 2) {
2727
            POPQ(sp, new_esp);
2728
            POPQ(sp, new_ss);
2729
            new_ss &= 0xffff;
2730
        } else
2731
#endif
2732
        if (shift == 1) {
2733
            /* 32 bits */
2734
            POPL(ssp, sp, sp_mask, new_esp);
2735
            POPL(ssp, sp, sp_mask, new_ss);
2736
            new_ss &= 0xffff;
2737
        } else {
2738
            /* 16 bits */
2739
            POPW(ssp, sp, sp_mask, new_esp);
2740
            POPW(ssp, sp, sp_mask, new_ss);
2741
        }
2742
#ifdef DEBUG_PCALL
2743
        if (loglevel & CPU_LOG_PCALL) {
2744
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2745
                    new_ss, new_esp);
2746
        }
2747
#endif
2748
        if ((new_ss & 0xfffc) == 0) {
2749
#ifdef TARGET_X86_64
2750
            /* NULL ss is allowed in long mode if cpl != 3*/
2751
            /* XXX: test CS64 ? */
2752
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2753
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2754
                                       0, 0xffffffff,
2755
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2756
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2757
                                       DESC_W_MASK | DESC_A_MASK);
2758
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2759
            } else
2760
#endif
2761
            {
2762
                raise_exception_err(EXCP0D_GPF, 0);
2763
            }
2764
        } else {
2765
            if ((new_ss & 3) != rpl)
2766
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2767
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2768
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2769
            if (!(ss_e2 & DESC_S_MASK) ||
2770
                (ss_e2 & DESC_CS_MASK) ||
2771
                !(ss_e2 & DESC_W_MASK))
2772
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2773
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2774
            if (dpl != rpl)
2775
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2776
            if (!(ss_e2 & DESC_P_MASK))
2777
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2778
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2779
                                   get_seg_base(ss_e1, ss_e2),
2780
                                   get_seg_limit(ss_e1, ss_e2),
2781
                                   ss_e2);
2782
        }
2783

    
2784
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2785
                       get_seg_base(e1, e2),
2786
                       get_seg_limit(e1, e2),
2787
                       e2);
2788
        cpu_x86_set_cpl(env, rpl);
2789
        sp = new_esp;
2790
#ifdef TARGET_X86_64
2791
        if (env->hflags & HF_CS64_MASK)
2792
            sp_mask = -1;
2793
        else
2794
#endif
2795
            sp_mask = get_sp_mask(ss_e2);
2796

    
2797
        /* validate data segments */
2798
        validate_seg(R_ES, rpl);
2799
        validate_seg(R_DS, rpl);
2800
        validate_seg(R_FS, rpl);
2801
        validate_seg(R_GS, rpl);
2802

    
2803
        sp += addend;
2804
    }
2805
    SET_ESP(sp, sp_mask);
2806
    env->eip = new_eip;
2807
    if (is_iret) {
2808
        /* NOTE: 'cpl' is the _old_ CPL */
2809
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2810
        if (cpl == 0)
2811
            eflags_mask |= IOPL_MASK;
2812
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2813
        if (cpl <= iopl)
2814
            eflags_mask |= IF_MASK;
2815
        if (shift == 0)
2816
            eflags_mask &= 0xffff;
2817
        load_eflags(new_eflags, eflags_mask);
2818
    }
2819
    return;
2820

    
2821
 return_to_vm86:
2822
    POPL(ssp, sp, sp_mask, new_esp);
2823
    POPL(ssp, sp, sp_mask, new_ss);
2824
    POPL(ssp, sp, sp_mask, new_es);
2825
    POPL(ssp, sp, sp_mask, new_ds);
2826
    POPL(ssp, sp, sp_mask, new_fs);
2827
    POPL(ssp, sp, sp_mask, new_gs);
2828

    
2829
    /* modify processor state */
2830
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2831
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2832
    load_seg_vm(R_CS, new_cs & 0xffff);
2833
    cpu_x86_set_cpl(env, 3);
2834
    load_seg_vm(R_SS, new_ss & 0xffff);
2835
    load_seg_vm(R_ES, new_es & 0xffff);
2836
    load_seg_vm(R_DS, new_ds & 0xffff);
2837
    load_seg_vm(R_FS, new_fs & 0xffff);
2838
    load_seg_vm(R_GS, new_gs & 0xffff);
2839

    
2840
    env->eip = new_eip & 0xffff;
2841
    ESP = new_esp;
2842
}
2843

    
2844
void helper_iret_protected(int shift, int next_eip)
2845
{
2846
    int tss_selector, type;
2847
    uint32_t e1, e2;
2848

    
2849
    /* specific case for TSS */
2850
    if (env->eflags & NT_MASK) {
2851
#ifdef TARGET_X86_64
2852
        if (env->hflags & HF_LMA_MASK)
2853
            raise_exception_err(EXCP0D_GPF, 0);
2854
#endif
2855
        tss_selector = lduw_kernel(env->tr.base + 0);
2856
        if (tss_selector & 4)
2857
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2858
        if (load_segment(&e1, &e2, tss_selector) != 0)
2859
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2860
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2861
        /* NOTE: we check both segment and busy TSS */
2862
        if (type != 3)
2863
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2864
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2865
    } else {
2866
        helper_ret_protected(shift, 1, 0);
2867
    }
2868
    env->hflags &= ~HF_NMI_MASK;
2869
#ifdef USE_KQEMU
2870
    if (kqemu_is_ok(env)) {
2871
        CC_OP = CC_OP_EFLAGS;
2872
        env->exception_index = -1;
2873
        cpu_loop_exit();
2874
    }
2875
#endif
2876
}
2877

    
2878
void helper_lret_protected(int shift, int addend)
2879
{
2880
    helper_ret_protected(shift, 0, addend);
2881
#ifdef USE_KQEMU
2882
    if (kqemu_is_ok(env)) {
2883
        env->exception_index = -1;
2884
        cpu_loop_exit();
2885
    }
2886
#endif
2887
}
2888

    
2889
void helper_sysenter(void)
2890
{
2891
    if (env->sysenter_cs == 0) {
2892
        raise_exception_err(EXCP0D_GPF, 0);
2893
    }
2894
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2895
    cpu_x86_set_cpl(env, 0);
2896
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2897
                           0, 0xffffffff,
2898
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2899
                           DESC_S_MASK |
2900
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2901
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2902
                           0, 0xffffffff,
2903
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2904
                           DESC_S_MASK |
2905
                           DESC_W_MASK | DESC_A_MASK);
2906
    ESP = env->sysenter_esp;
2907
    EIP = env->sysenter_eip;
2908
}
2909

    
2910
void helper_sysexit(void)
2911
{
2912
    int cpl;
2913

    
2914
    cpl = env->hflags & HF_CPL_MASK;
2915
    if (env->sysenter_cs == 0 || cpl != 0) {
2916
        raise_exception_err(EXCP0D_GPF, 0);
2917
    }
2918
    cpu_x86_set_cpl(env, 3);
2919
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2920
                           0, 0xffffffff,
2921
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2922
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2923
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2924
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2925
                           0, 0xffffffff,
2926
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2927
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2928
                           DESC_W_MASK | DESC_A_MASK);
2929
    ESP = ECX;
2930
    EIP = EDX;
2931
#ifdef USE_KQEMU
2932
    if (kqemu_is_ok(env)) {
2933
        env->exception_index = -1;
2934
        cpu_loop_exit();
2935
    }
2936
#endif
2937
}
2938

    
2939
void helper_movl_crN_T0(int reg, target_ulong t0)
2940
{
2941
#if !defined(CONFIG_USER_ONLY)
2942
    switch(reg) {
2943
    case 0:
2944
        cpu_x86_update_cr0(env, t0);
2945
        break;
2946
    case 3:
2947
        cpu_x86_update_cr3(env, t0);
2948
        break;
2949
    case 4:
2950
        cpu_x86_update_cr4(env, t0);
2951
        break;
2952
    case 8:
2953
        cpu_set_apic_tpr(env, t0);
2954
        env->cr[8] = t0;
2955
        break;
2956
    default:
2957
        env->cr[reg] = t0;
2958
        break;
2959
    }
2960
#endif
2961
}
2962

    
2963
void helper_lmsw(target_ulong t0)
2964
{
2965
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2966
       if already set to one. */
2967
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2968
    helper_movl_crN_T0(0, t0);
2969
}
2970

    
2971
void helper_clts(void)
2972
{
2973
    env->cr[0] &= ~CR0_TS_MASK;
2974
    env->hflags &= ~HF_TS_MASK;
2975
}
2976

    
2977
#if !defined(CONFIG_USER_ONLY)
2978
target_ulong helper_movtl_T0_cr8(void)
2979
{
2980
    return cpu_get_apic_tpr(env);
2981
}
2982
#endif
2983

    
2984
/* XXX: do more */
2985
void helper_movl_drN_T0(int reg, target_ulong t0)
2986
{
2987
    env->dr[reg] = t0;
2988
}
2989

    
2990
void helper_invlpg(target_ulong addr)
2991
{
2992
    cpu_x86_flush_tlb(env, addr);
2993
}
2994

    
2995
void helper_rdtsc(void)
2996
{
2997
    uint64_t val;
2998

    
2999
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3000
        raise_exception(EXCP0D_GPF);
3001
    }
3002
    val = cpu_get_tsc(env);
3003
    EAX = (uint32_t)(val);
3004
    EDX = (uint32_t)(val >> 32);
3005
}
3006

    
3007
void helper_rdpmc(void)
3008
{
3009
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3010
        raise_exception(EXCP0D_GPF);
3011
    }
3012

    
3013
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3014
    
3015
    /* currently unimplemented */
3016
    raise_exception_err(EXCP06_ILLOP, 0);
3017
}
3018

    
3019
#if defined(CONFIG_USER_ONLY)
3020
void helper_wrmsr(void)
3021
{
3022
}
3023

    
3024
void helper_rdmsr(void)
3025
{
3026
}
3027
#else
3028
void helper_wrmsr(void)
3029
{
3030
    uint64_t val;
3031

    
3032
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3033

    
3034
    switch((uint32_t)ECX) {
3035
    case MSR_IA32_SYSENTER_CS:
3036
        env->sysenter_cs = val & 0xffff;
3037
        break;
3038
    case MSR_IA32_SYSENTER_ESP:
3039
        env->sysenter_esp = val;
3040
        break;
3041
    case MSR_IA32_SYSENTER_EIP:
3042
        env->sysenter_eip = val;
3043
        break;
3044
    case MSR_IA32_APICBASE:
3045
        cpu_set_apic_base(env, val);
3046
        break;
3047
    case MSR_EFER:
3048
        {
3049
            uint64_t update_mask;
3050
            update_mask = 0;
3051
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3052
                update_mask |= MSR_EFER_SCE;
3053
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3054
                update_mask |= MSR_EFER_LME;
3055
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3056
                update_mask |= MSR_EFER_FFXSR;
3057
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3058
                update_mask |= MSR_EFER_NXE;
3059
            env->efer = (env->efer & ~update_mask) |
3060
            (val & update_mask);
3061
        }
3062
        break;
3063
    case MSR_STAR:
3064
        env->star = val;
3065
        break;
3066
    case MSR_PAT:
3067
        env->pat = val;
3068
        break;
3069
    case MSR_VM_HSAVE_PA:
3070
        env->vm_hsave = val;
3071
        break;
3072
#ifdef TARGET_X86_64
3073
    case MSR_LSTAR:
3074
        env->lstar = val;
3075
        break;
3076
    case MSR_CSTAR:
3077
        env->cstar = val;
3078
        break;
3079
    case MSR_FMASK:
3080
        env->fmask = val;
3081
        break;
3082
    case MSR_FSBASE:
3083
        env->segs[R_FS].base = val;
3084
        break;
3085
    case MSR_GSBASE:
3086
        env->segs[R_GS].base = val;
3087
        break;
3088
    case MSR_KERNELGSBASE:
3089
        env->kernelgsbase = val;
3090
        break;
3091
#endif
3092
    default:
3093
        /* XXX: exception ? */
3094
        break;
3095
    }
3096
}
3097

    
3098
void helper_rdmsr(void)
3099
{
3100
    uint64_t val;
3101
    switch((uint32_t)ECX) {
3102
    case MSR_IA32_SYSENTER_CS:
3103
        val = env->sysenter_cs;
3104
        break;
3105
    case MSR_IA32_SYSENTER_ESP:
3106
        val = env->sysenter_esp;
3107
        break;
3108
    case MSR_IA32_SYSENTER_EIP:
3109
        val = env->sysenter_eip;
3110
        break;
3111
    case MSR_IA32_APICBASE:
3112
        val = cpu_get_apic_base(env);
3113
        break;
3114
    case MSR_EFER:
3115
        val = env->efer;
3116
        break;
3117
    case MSR_STAR:
3118
        val = env->star;
3119
        break;
3120
    case MSR_PAT:
3121
        val = env->pat;
3122
        break;
3123
    case MSR_VM_HSAVE_PA:
3124
        val = env->vm_hsave;
3125
        break;
3126
#ifdef TARGET_X86_64
3127
    case MSR_LSTAR:
3128
        val = env->lstar;
3129
        break;
3130
    case MSR_CSTAR:
3131
        val = env->cstar;
3132
        break;
3133
    case MSR_FMASK:
3134
        val = env->fmask;
3135
        break;
3136
    case MSR_FSBASE:
3137
        val = env->segs[R_FS].base;
3138
        break;
3139
    case MSR_GSBASE:
3140
        val = env->segs[R_GS].base;
3141
        break;
3142
    case MSR_KERNELGSBASE:
3143
        val = env->kernelgsbase;
3144
        break;
3145
#endif
3146
    default:
3147
        /* XXX: exception ? */
3148
        val = 0;
3149
        break;
3150
    }
3151
    EAX = (uint32_t)(val);
3152
    EDX = (uint32_t)(val >> 32);
3153
}
3154
#endif
3155

    
3156
uint32_t helper_lsl(uint32_t selector)
3157
{
3158
    unsigned int limit;
3159
    uint32_t e1, e2, eflags;
3160
    int rpl, dpl, cpl, type;
3161

    
3162
    selector &= 0xffff;
3163
    eflags = cc_table[CC_OP].compute_all();
3164
    if (load_segment(&e1, &e2, selector) != 0)
3165
        goto fail;
3166
    rpl = selector & 3;
3167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3168
    cpl = env->hflags & HF_CPL_MASK;
3169
    if (e2 & DESC_S_MASK) {
3170
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3171
            /* conforming */
3172
        } else {
3173
            if (dpl < cpl || dpl < rpl)
3174
                goto fail;
3175
        }
3176
    } else {
3177
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3178
        switch(type) {
3179
        case 1:
3180
        case 2:
3181
        case 3:
3182
        case 9:
3183
        case 11:
3184
            break;
3185
        default:
3186
            goto fail;
3187
        }
3188
        if (dpl < cpl || dpl < rpl) {
3189
        fail:
3190
            CC_SRC = eflags & ~CC_Z;
3191
            return 0;
3192
        }
3193
    }
3194
    limit = get_seg_limit(e1, e2);
3195
    CC_SRC = eflags | CC_Z;
3196
    return limit;
3197
}
3198

    
3199
uint32_t helper_lar(uint32_t selector)
3200
{
3201
    uint32_t e1, e2, eflags;
3202
    int rpl, dpl, cpl, type;
3203

    
3204
    selector &= 0xffff;
3205
    eflags = cc_table[CC_OP].compute_all();
3206
    if ((selector & 0xfffc) == 0)
3207
        goto fail;
3208
    if (load_segment(&e1, &e2, selector) != 0)
3209
        goto fail;
3210
    rpl = selector & 3;
3211
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3212
    cpl = env->hflags & HF_CPL_MASK;
3213
    if (e2 & DESC_S_MASK) {
3214
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3215
            /* conforming */
3216
        } else {
3217
            if (dpl < cpl || dpl < rpl)
3218
                goto fail;
3219
        }
3220
    } else {
3221
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3222
        switch(type) {
3223
        case 1:
3224
        case 2:
3225
        case 3:
3226
        case 4:
3227
        case 5:
3228
        case 9:
3229
        case 11:
3230
        case 12:
3231
            break;
3232
        default:
3233
            goto fail;
3234
        }
3235
        if (dpl < cpl || dpl < rpl) {
3236
        fail:
3237
            CC_SRC = eflags & ~CC_Z;
3238
            return 0;
3239
        }
3240
    }
3241
    CC_SRC = eflags | CC_Z;
3242
    return e2 & 0x00f0ff00;
3243
}
3244

    
3245
void helper_verr(uint32_t selector)
3246
{
3247
    uint32_t e1, e2, eflags;
3248
    int rpl, dpl, cpl;
3249

    
3250
    selector &= 0xffff;
3251
    eflags = cc_table[CC_OP].compute_all();
3252
    if ((selector & 0xfffc) == 0)
3253
        goto fail;
3254
    if (load_segment(&e1, &e2, selector) != 0)
3255
        goto fail;
3256
    if (!(e2 & DESC_S_MASK))
3257
        goto fail;
3258
    rpl = selector & 3;
3259
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3260
    cpl = env->hflags & HF_CPL_MASK;
3261
    if (e2 & DESC_CS_MASK) {
3262
        if (!(e2 & DESC_R_MASK))
3263
            goto fail;
3264
        if (!(e2 & DESC_C_MASK)) {
3265
            if (dpl < cpl || dpl < rpl)
3266
                goto fail;
3267
        }
3268
    } else {
3269
        if (dpl < cpl || dpl < rpl) {
3270
        fail:
3271
            CC_SRC = eflags & ~CC_Z;
3272
            return;
3273
        }
3274
    }
3275
    CC_SRC = eflags | CC_Z;
3276
}
3277

    
3278
void helper_verw(uint32_t selector)
3279
{
3280
    uint32_t e1, e2, eflags;
3281
    int rpl, dpl, cpl;
3282

    
3283
    selector &= 0xffff;
3284
    eflags = cc_table[CC_OP].compute_all();
3285
    if ((selector & 0xfffc) == 0)
3286
        goto fail;
3287
    if (load_segment(&e1, &e2, selector) != 0)
3288
        goto fail;
3289
    if (!(e2 & DESC_S_MASK))
3290
        goto fail;
3291
    rpl = selector & 3;
3292
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293
    cpl = env->hflags & HF_CPL_MASK;
3294
    if (e2 & DESC_CS_MASK) {
3295
        goto fail;
3296
    } else {
3297
        if (dpl < cpl || dpl < rpl)
3298
            goto fail;
3299
        if (!(e2 & DESC_W_MASK)) {
3300
        fail:
3301
            CC_SRC = eflags & ~CC_Z;
3302
            return;
3303
        }
3304
    }
3305
    CC_SRC = eflags | CC_Z;
3306
}
3307

    
3308
/* x87 FPU helpers */
3309

    
3310
static void fpu_set_exception(int mask)
3311
{
3312
    env->fpus |= mask;
3313
    if (env->fpus & (~env->fpuc & FPUC_EM))
3314
        env->fpus |= FPUS_SE | FPUS_B;
3315
}
3316

    
3317
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3318
{
3319
    if (b == 0.0)
3320
        fpu_set_exception(FPUS_ZE);
3321
    return a / b;
3322
}
3323

    
3324
void fpu_raise_exception(void)
3325
{
3326
    if (env->cr[0] & CR0_NE_MASK) {
3327
        raise_exception(EXCP10_COPR);
3328
    }
3329
#if !defined(CONFIG_USER_ONLY)
3330
    else {
3331
        cpu_set_ferr(env);
3332
    }
3333
#endif
3334
}
3335

    
3336
void helper_flds_FT0(uint32_t val)
3337
{
3338
    union {
3339
        float32 f;
3340
        uint32_t i;
3341
    } u;
3342
    u.i = val;
3343
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3344
}
3345

    
3346
void helper_fldl_FT0(uint64_t val)
3347
{
3348
    union {
3349
        float64 f;
3350
        uint64_t i;
3351
    } u;
3352
    u.i = val;
3353
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3354
}
3355

    
3356
void helper_fildl_FT0(int32_t val)
3357
{
3358
    FT0 = int32_to_floatx(val, &env->fp_status);
3359
}
3360

    
3361
void helper_flds_ST0(uint32_t val)
3362
{
3363
    int new_fpstt;
3364
    union {
3365
        float32 f;
3366
        uint32_t i;
3367
    } u;
3368
    new_fpstt = (env->fpstt - 1) & 7;
3369
    u.i = val;
3370
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3371
    env->fpstt = new_fpstt;
3372
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3373
}
3374

    
3375
void helper_fldl_ST0(uint64_t val)
3376
{
3377
    int new_fpstt;
3378
    union {
3379
        float64 f;
3380
        uint64_t i;
3381
    } u;
3382
    new_fpstt = (env->fpstt - 1) & 7;
3383
    u.i = val;
3384
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3385
    env->fpstt = new_fpstt;
3386
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3387
}
3388

    
3389
void helper_fildl_ST0(int32_t val)
3390
{
3391
    int new_fpstt;
3392
    new_fpstt = (env->fpstt - 1) & 7;
3393
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3394
    env->fpstt = new_fpstt;
3395
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3396
}
3397

    
3398
void helper_fildll_ST0(int64_t val)
3399
{
3400
    int new_fpstt;
3401
    new_fpstt = (env->fpstt - 1) & 7;
3402
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3403
    env->fpstt = new_fpstt;
3404
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3405
}
3406

    
3407
uint32_t helper_fsts_ST0(void)
3408
{
3409
    union {
3410
        float32 f;
3411
        uint32_t i;
3412
    } u;
3413
    u.f = floatx_to_float32(ST0, &env->fp_status);
3414
    return u.i;
3415
}
3416

    
3417
uint64_t helper_fstl_ST0(void)
3418
{
3419
    union {
3420
        float64 f;
3421
        uint64_t i;
3422
    } u;
3423
    u.f = floatx_to_float64(ST0, &env->fp_status);
3424
    return u.i;
3425
}
3426

    
3427
int32_t helper_fist_ST0(void)
3428
{
3429
    int32_t val;
3430
    val = floatx_to_int32(ST0, &env->fp_status);
3431
    if (val != (int16_t)val)
3432
        val = -32768;
3433
    return val;
3434
}
3435

    
3436
int32_t helper_fistl_ST0(void)
3437
{
3438
    int32_t val;
3439
    val = floatx_to_int32(ST0, &env->fp_status);
3440
    return val;
3441
}
3442

    
3443
int64_t helper_fistll_ST0(void)
3444
{
3445
    int64_t val;
3446
    val = floatx_to_int64(ST0, &env->fp_status);
3447
    return val;
3448
}
3449

    
3450
int32_t helper_fistt_ST0(void)
3451
{
3452
    int32_t val;
3453
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3454
    if (val != (int16_t)val)
3455
        val = -32768;
3456
    return val;
3457
}
3458

    
3459
int32_t helper_fisttl_ST0(void)
3460
{
3461
    int32_t val;
3462
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3463
    return val;
3464
}
3465

    
3466
int64_t helper_fisttll_ST0(void)
3467
{
3468
    int64_t val;
3469
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3470
    return val;
3471
}
3472

    
3473
void helper_fldt_ST0(target_ulong ptr)
3474
{
3475
    int new_fpstt;
3476
    new_fpstt = (env->fpstt - 1) & 7;
3477
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3478
    env->fpstt = new_fpstt;
3479
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3480
}
3481

    
3482
void helper_fstt_ST0(target_ulong ptr)
3483
{
3484
    helper_fstt(ST0, ptr);
3485
}
3486

    
3487
void helper_fpush(void)
3488
{
3489
    fpush();
3490
}
3491

    
3492
void helper_fpop(void)
3493
{
3494
    fpop();
3495
}
3496

    
3497
void helper_fdecstp(void)
3498
{
3499
    env->fpstt = (env->fpstt - 1) & 7;
3500
    env->fpus &= (~0x4700);
3501
}
3502

    
3503
void helper_fincstp(void)
3504
{
3505
    env->fpstt = (env->fpstt + 1) & 7;
3506
    env->fpus &= (~0x4700);
3507
}
3508

    
3509
/* FPU move */
3510

    
3511
void helper_ffree_STN(int st_index)
3512
{
3513
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3514
}
3515

    
3516
void helper_fmov_ST0_FT0(void)
3517
{
3518
    ST0 = FT0;
3519
}
3520

    
3521
void helper_fmov_FT0_STN(int st_index)
3522
{
3523
    FT0 = ST(st_index);
3524
}
3525

    
3526
void helper_fmov_ST0_STN(int st_index)
3527
{
3528
    ST0 = ST(st_index);
3529
}
3530

    
3531
void helper_fmov_STN_ST0(int st_index)
3532
{
3533
    ST(st_index) = ST0;
3534
}
3535

    
3536
void helper_fxchg_ST0_STN(int st_index)
3537
{
3538
    CPU86_LDouble tmp;
3539
    tmp = ST(st_index);
3540
    ST(st_index) = ST0;
3541
    ST0 = tmp;
3542
}
3543

    
3544
/* FPU operations */
3545

    
3546
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3547

    
3548
void helper_fcom_ST0_FT0(void)
3549
{
3550
    int ret;
3551

    
3552
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3553
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3554
    FORCE_RET();
3555
}
3556

    
3557
void helper_fucom_ST0_FT0(void)
3558
{
3559
    int ret;
3560

    
3561
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3562
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3563
    FORCE_RET();
3564
}
3565

    
3566
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3567

    
3568
void helper_fcomi_ST0_FT0(void)
3569
{
3570
    int eflags;
3571
    int ret;
3572

    
3573
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3574
    eflags = cc_table[CC_OP].compute_all();
3575
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3576
    CC_SRC = eflags;
3577
    FORCE_RET();
3578
}
3579

    
3580
void helper_fucomi_ST0_FT0(void)
3581
{
3582
    int eflags;
3583
    int ret;
3584

    
3585
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3586
    eflags = cc_table[CC_OP].compute_all();
3587
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3588
    CC_SRC = eflags;
3589
    FORCE_RET();
3590
}
3591

    
3592
void helper_fadd_ST0_FT0(void)
3593
{
3594
    ST0 += FT0;
3595
}
3596

    
3597
void helper_fmul_ST0_FT0(void)
3598
{
3599
    ST0 *= FT0;
3600
}
3601

    
3602
void helper_fsub_ST0_FT0(void)
3603
{
3604
    ST0 -= FT0;
3605
}
3606

    
3607
void helper_fsubr_ST0_FT0(void)
3608
{
3609
    ST0 = FT0 - ST0;
3610
}
3611

    
3612
void helper_fdiv_ST0_FT0(void)
3613
{
3614
    ST0 = helper_fdiv(ST0, FT0);
3615
}
3616

    
3617
void helper_fdivr_ST0_FT0(void)
3618
{
3619
    ST0 = helper_fdiv(FT0, ST0);
3620
}
3621

    
3622
/* fp operations between STN and ST0 */
3623

    
3624
void helper_fadd_STN_ST0(int st_index)
3625
{
3626
    ST(st_index) += ST0;
3627
}
3628

    
3629
void helper_fmul_STN_ST0(int st_index)
3630
{
3631
    ST(st_index) *= ST0;
3632
}
3633

    
3634
void helper_fsub_STN_ST0(int st_index)
3635
{
3636
    ST(st_index) -= ST0;
3637
}
3638

    
3639
void helper_fsubr_STN_ST0(int st_index)
3640
{
3641
    CPU86_LDouble *p;
3642
    p = &ST(st_index);
3643
    *p = ST0 - *p;
3644
}
3645

    
3646
void helper_fdiv_STN_ST0(int st_index)
3647
{
3648
    CPU86_LDouble *p;
3649
    p = &ST(st_index);
3650
    *p = helper_fdiv(*p, ST0);
3651
}
3652

    
3653
void helper_fdivr_STN_ST0(int st_index)
3654
{
3655
    CPU86_LDouble *p;
3656
    p = &ST(st_index);
3657
    *p = helper_fdiv(ST0, *p);
3658
}
3659

    
3660
/* misc FPU operations */
3661
void helper_fchs_ST0(void)
3662
{
3663
    ST0 = floatx_chs(ST0);
3664
}
3665

    
3666
void helper_fabs_ST0(void)
3667
{
3668
    ST0 = floatx_abs(ST0);
3669
}
3670

    
3671
void helper_fld1_ST0(void)
3672
{
3673
    ST0 = f15rk[1];
3674
}
3675

    
3676
void helper_fldl2t_ST0(void)
3677
{
3678
    ST0 = f15rk[6];
3679
}
3680

    
3681
void helper_fldl2e_ST0(void)
3682
{
3683
    ST0 = f15rk[5];
3684
}
3685

    
3686
void helper_fldpi_ST0(void)
3687
{
3688
    ST0 = f15rk[2];
3689
}
3690

    
3691
void helper_fldlg2_ST0(void)
3692
{
3693
    ST0 = f15rk[3];
3694
}
3695

    
3696
void helper_fldln2_ST0(void)
3697
{
3698
    ST0 = f15rk[4];
3699
}
3700

    
3701
void helper_fldz_ST0(void)
3702
{
3703
    ST0 = f15rk[0];
3704
}
3705

    
3706
void helper_fldz_FT0(void)
3707
{
3708
    FT0 = f15rk[0];
3709
}
3710

    
3711
uint32_t helper_fnstsw(void)
3712
{
3713
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3714
}
3715

    
3716
uint32_t helper_fnstcw(void)
3717
{
3718
    return env->fpuc;
3719
}
3720

    
3721
static void update_fp_status(void)
3722
{
3723
    int rnd_type;
3724

    
3725
    /* set rounding mode */
3726
    switch(env->fpuc & RC_MASK) {
3727
    default:
3728
    case RC_NEAR:
3729
        rnd_type = float_round_nearest_even;
3730
        break;
3731
    case RC_DOWN:
3732
        rnd_type = float_round_down;
3733
        break;
3734
    case RC_UP:
3735
        rnd_type = float_round_up;
3736
        break;
3737
    case RC_CHOP:
3738
        rnd_type = float_round_to_zero;
3739
        break;
3740
    }
3741
    set_float_rounding_mode(rnd_type, &env->fp_status);
3742
#ifdef FLOATX80
3743
    switch((env->fpuc >> 8) & 3) {
3744
    case 0:
3745
        rnd_type = 32;
3746
        break;
3747
    case 2:
3748
        rnd_type = 64;
3749
        break;
3750
    case 3:
3751
    default:
3752
        rnd_type = 80;
3753
        break;
3754
    }
3755
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3756
#endif
3757
}
3758

    
3759
void helper_fldcw(uint32_t val)
3760
{
3761
    env->fpuc = val;
3762
    update_fp_status();
3763
}
3764

    
3765
void helper_fclex(void)
3766
{
3767
    env->fpus &= 0x7f00;
3768
}
3769

    
3770
void helper_fwait(void)
3771
{
3772
    if (env->fpus & FPUS_SE)
3773
        fpu_raise_exception();
3774
    FORCE_RET();
3775
}
3776

    
3777
void helper_fninit(void)
3778
{
3779
    env->fpus = 0;
3780
    env->fpstt = 0;
3781
    env->fpuc = 0x37f;
3782
    env->fptags[0] = 1;
3783
    env->fptags[1] = 1;
3784
    env->fptags[2] = 1;
3785
    env->fptags[3] = 1;
3786
    env->fptags[4] = 1;
3787
    env->fptags[5] = 1;
3788
    env->fptags[6] = 1;
3789
    env->fptags[7] = 1;
3790
}
3791

    
3792
/* BCD ops */
3793

    
3794
void helper_fbld_ST0(target_ulong ptr)
3795
{
3796
    CPU86_LDouble tmp;
3797
    uint64_t val;
3798
    unsigned int v;
3799
    int i;
3800

    
3801
    val = 0;
3802
    for(i = 8; i >= 0; i--) {
3803
        v = ldub(ptr + i);
3804
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3805
    }
3806
    tmp = val;
3807
    if (ldub(ptr + 9) & 0x80)
3808
        tmp = -tmp;
3809
    fpush();
3810
    ST0 = tmp;
3811
}
3812

    
3813
void helper_fbst_ST0(target_ulong ptr)
3814
{
3815
    int v;
3816
    target_ulong mem_ref, mem_end;
3817
    int64_t val;
3818

    
3819
    val = floatx_to_int64(ST0, &env->fp_status);
3820
    mem_ref = ptr;
3821
    mem_end = mem_ref + 9;
3822
    if (val < 0) {
3823
        stb(mem_end, 0x80);
3824
        val = -val;
3825
    } else {
3826
        stb(mem_end, 0x00);
3827
    }
3828
    while (mem_ref < mem_end) {
3829
        if (val == 0)
3830
            break;
3831
        v = val % 100;
3832
        val = val / 100;
3833
        v = ((v / 10) << 4) | (v % 10);
3834
        stb(mem_ref++, v);
3835
    }
3836
    while (mem_ref < mem_end) {
3837
        stb(mem_ref++, 0);
3838
    }
3839
}
3840

    
3841
void helper_f2xm1(void)
3842
{
3843
    ST0 = pow(2.0,ST0) - 1.0;
3844
}
3845

    
3846
void helper_fyl2x(void)
3847
{
3848
    CPU86_LDouble fptemp;
3849

    
3850
    fptemp = ST0;
3851
    if (fptemp>0.0){
3852
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3853
        ST1 *= fptemp;
3854
        fpop();
3855
    } else {
3856
        env->fpus &= (~0x4700);
3857
        env->fpus |= 0x400;
3858
    }
3859
}
3860

    
3861
void helper_fptan(void)
3862
{
3863
    CPU86_LDouble fptemp;
3864

    
3865
    fptemp = ST0;
3866
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3867
        env->fpus |= 0x400;
3868
    } else {
3869
        ST0 = tan(fptemp);
3870
        fpush();
3871
        ST0 = 1.0;
3872
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3873
        /* the above code is for  |arg| < 2**52 only */
3874
    }
3875
}
3876

    
3877
void helper_fpatan(void)
3878
{
3879
    CPU86_LDouble fptemp, fpsrcop;
3880

    
3881
    fpsrcop = ST1;
3882
    fptemp = ST0;
3883
    ST1 = atan2(fpsrcop,fptemp);
3884
    fpop();
3885
}
3886

    
3887
void helper_fxtract(void)
3888
{
3889
    CPU86_LDoubleU temp;
3890
    unsigned int expdif;
3891

    
3892
    temp.d = ST0;
3893
    expdif = EXPD(temp) - EXPBIAS;
3894
    /*DP exponent bias*/
3895
    ST0 = expdif;
3896
    fpush();
3897
    BIASEXPONENT(temp);
3898
    ST0 = temp.d;
3899
}
3900

    
3901
void helper_fprem1(void)
3902
{
3903
    CPU86_LDouble dblq, fpsrcop, fptemp;
3904
    CPU86_LDoubleU fpsrcop1, fptemp1;
3905
    int expdif;
3906
    signed long long int q;
3907

    
3908
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3909
        ST0 = 0.0 / 0.0; /* NaN */
3910
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3911
        return;
3912
    }
3913

    
3914
    fpsrcop = ST0;
3915
    fptemp = ST1;
3916
    fpsrcop1.d = fpsrcop;
3917
    fptemp1.d = fptemp;
3918
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3919

    
3920
    if (expdif < 0) {
3921
        /* optimisation? taken from the AMD docs */
3922
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3923
        /* ST0 is unchanged */
3924
        return;
3925
    }
3926

    
3927
    if (expdif < 53) {
3928
        dblq = fpsrcop / fptemp;
3929
        /* round dblq towards nearest integer */
3930
        dblq = rint(dblq);
3931
        ST0 = fpsrcop - fptemp * dblq;
3932

    
3933
        /* convert dblq to q by truncating towards zero */
3934
        if (dblq < 0.0)
3935
           q = (signed long long int)(-dblq);
3936
        else
3937
           q = (signed long long int)dblq;
3938

    
3939
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3940
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3941
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3942
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3943
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3944
    } else {
3945
        env->fpus |= 0x400;  /* C2 <-- 1 */
3946
        fptemp = pow(2.0, expdif - 50);
3947
        fpsrcop = (ST0 / ST1) / fptemp;
3948
        /* fpsrcop = integer obtained by chopping */
3949
        fpsrcop = (fpsrcop < 0.0) ?
3950
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3951
        ST0 -= (ST1 * fpsrcop * fptemp);
3952
    }
3953
}
3954

    
3955
void helper_fprem(void)
3956
{
3957
    CPU86_LDouble dblq, fpsrcop, fptemp;
3958
    CPU86_LDoubleU fpsrcop1, fptemp1;
3959
    int expdif;
3960
    signed long long int q;
3961

    
3962
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3963
       ST0 = 0.0 / 0.0; /* NaN */
3964
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3965
       return;
3966
    }
3967

    
3968
    fpsrcop = (CPU86_LDouble)ST0;
3969
    fptemp = (CPU86_LDouble)ST1;
3970
    fpsrcop1.d = fpsrcop;
3971
    fptemp1.d = fptemp;
3972
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3973

    
3974
    if (expdif < 0) {
3975
        /* optimisation? taken from the AMD docs */
3976
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3977
        /* ST0 is unchanged */
3978
        return;
3979
    }
3980

    
3981
    if ( expdif < 53 ) {
3982
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3983
        /* round dblq towards zero */
3984
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3985
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3986

    
3987
        /* convert dblq to q by truncating towards zero */
3988
        if (dblq < 0.0)
3989
           q = (signed long long int)(-dblq);
3990
        else
3991
           q = (signed long long int)dblq;
3992

    
3993
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3994
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3995
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3996
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3997
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3998
    } else {
3999
        int N = 32 + (expdif % 32); /* as per AMD docs */
4000
        env->fpus |= 0x400;  /* C2 <-- 1 */
4001
        fptemp = pow(2.0, (double)(expdif - N));
4002
        fpsrcop = (ST0 / ST1) / fptemp;
4003
        /* fpsrcop = integer obtained by chopping */
4004
        fpsrcop = (fpsrcop < 0.0) ?
4005
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4006
        ST0 -= (ST1 * fpsrcop * fptemp);
4007
    }
4008
}
4009

    
4010
void helper_fyl2xp1(void)
4011
{
4012
    CPU86_LDouble fptemp;
4013

    
4014
    fptemp = ST0;
4015
    if ((fptemp+1.0)>0.0) {
4016
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4017
        ST1 *= fptemp;
4018
        fpop();
4019
    } else {
4020
        env->fpus &= (~0x4700);
4021
        env->fpus |= 0x400;
4022
    }
4023
}
4024

    
4025
void helper_fsqrt(void)
4026
{
4027
    CPU86_LDouble fptemp;
4028

    
4029
    fptemp = ST0;
4030
    if (fptemp<0.0) {
4031
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4032
        env->fpus |= 0x400;
4033
    }
4034
    ST0 = sqrt(fptemp);
4035
}
4036

    
4037
void helper_fsincos(void)
4038
{
4039
    CPU86_LDouble fptemp;
4040

    
4041
    fptemp = ST0;
4042
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4043
        env->fpus |= 0x400;
4044
    } else {
4045
        ST0 = sin(fptemp);
4046
        fpush();
4047
        ST0 = cos(fptemp);
4048
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4049
        /* the above code is for  |arg| < 2**63 only */
4050
    }
4051
}
4052

    
4053
void helper_frndint(void)
4054
{
4055
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4056
}
4057

    
4058
void helper_fscale(void)
4059
{
4060
    ST0 = ldexp (ST0, (int)(ST1));
4061
}
4062

    
4063
void helper_fsin(void)
4064
{
4065
    CPU86_LDouble fptemp;
4066

    
4067
    fptemp = ST0;
4068
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4069
        env->fpus |= 0x400;
4070
    } else {
4071
        ST0 = sin(fptemp);
4072
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4073
        /* the above code is for  |arg| < 2**53 only */
4074
    }
4075
}
4076

    
4077
void helper_fcos(void)
4078
{
4079
    CPU86_LDouble fptemp;
4080

    
4081
    fptemp = ST0;
4082
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4083
        env->fpus |= 0x400;
4084
    } else {
4085
        ST0 = cos(fptemp);
4086
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4087
        /* the above code is for  |arg5 < 2**63 only */
4088
    }
4089
}
4090

    
4091
void helper_fxam_ST0(void)
4092
{
4093
    CPU86_LDoubleU temp;
4094
    int expdif;
4095

    
4096
    temp.d = ST0;
4097

    
4098
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4099
    if (SIGND(temp))
4100
        env->fpus |= 0x200; /* C1 <-- 1 */
4101

    
4102
    /* XXX: test fptags too */
4103
    expdif = EXPD(temp);
4104
    if (expdif == MAXEXPD) {
4105
#ifdef USE_X86LDOUBLE
4106
        if (MANTD(temp) == 0x8000000000000000ULL)
4107
#else
4108
        if (MANTD(temp) == 0)
4109
#endif
4110
            env->fpus |=  0x500 /*Infinity*/;
4111
        else
4112
            env->fpus |=  0x100 /*NaN*/;
4113
    } else if (expdif == 0) {
4114
        if (MANTD(temp) == 0)
4115
            env->fpus |=  0x4000 /*Zero*/;
4116
        else
4117
            env->fpus |= 0x4400 /*Denormal*/;
4118
    } else {
4119
        env->fpus |= 0x400;
4120
    }
4121
}
4122

    
4123
void helper_fstenv(target_ulong ptr, int data32)
4124
{
4125
    int fpus, fptag, exp, i;
4126
    uint64_t mant;
4127
    CPU86_LDoubleU tmp;
4128

    
4129
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4130
    fptag = 0;
4131
    for (i=7; i>=0; i--) {
4132
        fptag <<= 2;
4133
        if (env->fptags[i]) {
4134
            fptag |= 3;
4135
        } else {
4136
            tmp.d = env->fpregs[i].d;
4137
            exp = EXPD(tmp);
4138
            mant = MANTD(tmp);
4139
            if (exp == 0 && mant == 0) {
4140
                /* zero */
4141
                fptag |= 1;
4142
            } else if (exp == 0 || exp == MAXEXPD
4143
#ifdef USE_X86LDOUBLE
4144
                       || (mant & (1LL << 63)) == 0
4145
#endif
4146
                       ) {
4147
                /* NaNs, infinity, denormal */
4148
                fptag |= 2;
4149
            }
4150
        }
4151
    }
4152
    if (data32) {
4153
        /* 32 bit */
4154
        stl(ptr, env->fpuc);
4155
        stl(ptr + 4, fpus);
4156
        stl(ptr + 8, fptag);
4157
        stl(ptr + 12, 0); /* fpip */
4158
        stl(ptr + 16, 0); /* fpcs */
4159
        stl(ptr + 20, 0); /* fpoo */
4160
        stl(ptr + 24, 0); /* fpos */
4161
    } else {
4162
        /* 16 bit */
4163
        stw(ptr, env->fpuc);
4164
        stw(ptr + 2, fpus);
4165
        stw(ptr + 4, fptag);
4166
        stw(ptr + 6, 0);
4167
        stw(ptr + 8, 0);
4168
        stw(ptr + 10, 0);
4169
        stw(ptr + 12, 0);
4170
    }
4171
}
4172

    
4173
void helper_fldenv(target_ulong ptr, int data32)
4174
{
4175
    int i, fpus, fptag;
4176

    
4177
    if (data32) {
4178
        env->fpuc = lduw(ptr);
4179
        fpus = lduw(ptr + 4);
4180
        fptag = lduw(ptr + 8);
4181
    }
4182
    else {
4183
        env->fpuc = lduw(ptr);
4184
        fpus = lduw(ptr + 2);
4185
        fptag = lduw(ptr + 4);
4186
    }
4187
    env->fpstt = (fpus >> 11) & 7;
4188
    env->fpus = fpus & ~0x3800;
4189
    for(i = 0;i < 8; i++) {
4190
        env->fptags[i] = ((fptag & 3) == 3);
4191
        fptag >>= 2;
4192
    }
4193
}
4194

    
4195
void helper_fsave(target_ulong ptr, int data32)
4196
{
4197
    CPU86_LDouble tmp;
4198
    int i;
4199

    
4200
    helper_fstenv(ptr, data32);
4201

    
4202
    ptr += (14 << data32);
4203
    for(i = 0;i < 8; i++) {
4204
        tmp = ST(i);
4205
        helper_fstt(tmp, ptr);
4206
        ptr += 10;
4207
    }
4208

    
4209
    /* fninit */
4210
    env->fpus = 0;
4211
    env->fpstt = 0;
4212
    env->fpuc = 0x37f;
4213
    env->fptags[0] = 1;
4214
    env->fptags[1] = 1;
4215
    env->fptags[2] = 1;
4216
    env->fptags[3] = 1;
4217
    env->fptags[4] = 1;
4218
    env->fptags[5] = 1;
4219
    env->fptags[6] = 1;
4220
    env->fptags[7] = 1;
4221
}
4222

    
4223
void helper_frstor(target_ulong ptr, int data32)
4224
{
4225
    CPU86_LDouble tmp;
4226
    int i;
4227

    
4228
    helper_fldenv(ptr, data32);
4229
    ptr += (14 << data32);
4230

    
4231
    for(i = 0;i < 8; i++) {
4232
        tmp = helper_fldt(ptr);
4233
        ST(i) = tmp;
4234
        ptr += 10;
4235
    }
4236
}
4237

    
4238
void helper_fxsave(target_ulong ptr, int data64)
4239
{
4240
    int fpus, fptag, i, nb_xmm_regs;
4241
    CPU86_LDouble tmp;
4242
    target_ulong addr;
4243

    
4244
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4245
    fptag = 0;
4246
    for(i = 0; i < 8; i++) {
4247
        fptag |= (env->fptags[i] << i);
4248
    }
4249
    stw(ptr, env->fpuc);
4250
    stw(ptr + 2, fpus);
4251
    stw(ptr + 4, fptag ^ 0xff);
4252

    
4253
    addr = ptr + 0x20;
4254
    for(i = 0;i < 8; i++) {
4255
        tmp = ST(i);
4256
        helper_fstt(tmp, addr);
4257
        addr += 16;
4258
    }
4259

    
4260
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4261
        /* XXX: finish it */
4262
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4263
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4264
        nb_xmm_regs = 8 << data64;
4265
        addr = ptr + 0xa0;
4266
        for(i = 0; i < nb_xmm_regs; i++) {
4267
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4268
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4269
            addr += 16;
4270
        }
4271
    }
4272
}
4273

    
4274
void helper_fxrstor(target_ulong ptr, int data64)
4275
{
4276
    int i, fpus, fptag, nb_xmm_regs;
4277
    CPU86_LDouble tmp;
4278
    target_ulong addr;
4279

    
4280
    env->fpuc = lduw(ptr);
4281
    fpus = lduw(ptr + 2);
4282
    fptag = lduw(ptr + 4);
4283
    env->fpstt = (fpus >> 11) & 7;
4284
    env->fpus = fpus & ~0x3800;
4285
    fptag ^= 0xff;
4286
    for(i = 0;i < 8; i++) {
4287
        env->fptags[i] = ((fptag >> i) & 1);
4288
    }
4289

    
4290
    addr = ptr + 0x20;
4291
    for(i = 0;i < 8; i++) {
4292
        tmp = helper_fldt(addr);
4293
        ST(i) = tmp;
4294
        addr += 16;
4295
    }
4296

    
4297
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4298
        /* XXX: finish it */
4299
        env->mxcsr = ldl(ptr + 0x18);
4300
        //ldl(ptr + 0x1c);
4301
        nb_xmm_regs = 8 << data64;
4302
        addr = ptr + 0xa0;
4303
        for(i = 0; i < nb_xmm_regs; i++) {
4304
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4305
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4306
            addr += 16;
4307
        }
4308
    }
4309
}
4310

    
4311
#ifndef USE_X86LDOUBLE
4312

    
4313
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4314
{
4315
    CPU86_LDoubleU temp;
4316
    int e;
4317

    
4318
    temp.d = f;
4319
    /* mantissa */
4320
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4321
    /* exponent + sign */
4322
    e = EXPD(temp) - EXPBIAS + 16383;
4323
    e |= SIGND(temp) >> 16;
4324
    *pexp = e;
4325
}
4326

    
4327
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4328
{
4329
    CPU86_LDoubleU temp;
4330
    int e;
4331
    uint64_t ll;
4332

    
4333
    /* XXX: handle overflow ? */
4334
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4335
    e |= (upper >> 4) & 0x800; /* sign */
4336
    ll = (mant >> 11) & ((1LL << 52) - 1);
4337
#ifdef __arm__
4338
    temp.l.upper = (e << 20) | (ll >> 32);
4339
    temp.l.lower = ll;
4340
#else
4341
    temp.ll = ll | ((uint64_t)e << 52);
4342
#endif
4343
    return temp.d;
4344
}
4345

    
4346
#else
4347

    
4348
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4349
{
4350
    CPU86_LDoubleU temp;
4351

    
4352
    temp.d = f;
4353
    *pmant = temp.l.lower;
4354
    *pexp = temp.l.upper;
4355
}
4356

    
4357
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4358
{
4359
    CPU86_LDoubleU temp;
4360

    
4361
    temp.l.upper = upper;
4362
    temp.l.lower = mant;
4363
    return temp.d;
4364
}
4365
#endif
4366

    
4367
#ifdef TARGET_X86_64
4368

    
4369
//#define DEBUG_MULDIV
4370

    
4371
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4372
{
4373
    *plow += a;
4374
    /* carry test */
4375
    if (*plow < a)
4376
        (*phigh)++;
4377
    *phigh += b;
4378
}
4379

    
4380
static void neg128(uint64_t *plow, uint64_t *phigh)
4381
{
4382
    *plow = ~ *plow;
4383
    *phigh = ~ *phigh;
4384
    add128(plow, phigh, 1, 0);
4385
}
4386

    
4387
/* return TRUE if overflow */
4388
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4389
{
4390
    uint64_t q, r, a1, a0;
4391
    int i, qb, ab;
4392

    
4393
    a0 = *plow;
4394
    a1 = *phigh;
4395
    if (a1 == 0) {
4396
        q = a0 / b;
4397
        r = a0 % b;
4398
        *plow = q;
4399
        *phigh = r;
4400
    } else {
4401
        if (a1 >= b)
4402
            return 1;
4403
        /* XXX: use a better algorithm */
4404
        for(i = 0; i < 64; i++) {
4405
            ab = a1 >> 63;
4406
            a1 = (a1 << 1) | (a0 >> 63);
4407
            if (ab || a1 >= b) {
4408
                a1 -= b;
4409
                qb = 1;
4410
            } else {
4411
                qb = 0;
4412
            }
4413
            a0 = (a0 << 1) | qb;
4414
        }
4415
#if defined(DEBUG_MULDIV)
4416
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4417
               *phigh, *plow, b, a0, a1);
4418
#endif
4419
        *plow = a0;
4420
        *phigh = a1;
4421
    }
4422
    return 0;
4423
}
4424

    
4425
/* return TRUE if overflow */
4426
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4427
{
4428
    int sa, sb;
4429
    sa = ((int64_t)*phigh < 0);
4430
    if (sa)
4431
        neg128(plow, phigh);
4432
    sb = (b < 0);
4433
    if (sb)
4434
        b = -b;
4435
    if (div64(plow, phigh, b) != 0)
4436
        return 1;
4437
    if (sa ^ sb) {
4438
        if (*plow > (1ULL << 63))
4439
            return 1;
4440
        *plow = - *plow;
4441
    } else {
4442
        if (*plow >= (1ULL << 63))
4443
            return 1;
4444
    }
4445
    if (sa)
4446
        *phigh = - *phigh;
4447
    return 0;
4448
}
4449

    
4450
void helper_mulq_EAX_T0(target_ulong t0)
4451
{
4452
    uint64_t r0, r1;
4453

    
4454
    mulu64(&r0, &r1, EAX, t0);
4455
    EAX = r0;
4456
    EDX = r1;
4457
    CC_DST = r0;
4458
    CC_SRC = r1;
4459
}
4460

    
4461
void helper_imulq_EAX_T0(target_ulong t0)
4462
{
4463
    uint64_t r0, r1;
4464

    
4465
    muls64(&r0, &r1, EAX, t0);
4466
    EAX = r0;
4467
    EDX = r1;
4468
    CC_DST = r0;
4469
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4470
}
4471

    
4472
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4473
{
4474
    uint64_t r0, r1;
4475

    
4476
    muls64(&r0, &r1, t0, t1);
4477
    CC_DST = r0;
4478
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4479
    return r0;
4480
}
4481

    
4482
void helper_divq_EAX(target_ulong t0)
4483
{
4484
    uint64_t r0, r1;
4485
    if (t0 == 0) {
4486
        raise_exception(EXCP00_DIVZ);
4487
    }
4488
    r0 = EAX;
4489
    r1 = EDX;
4490
    if (div64(&r0, &r1, t0))
4491
        raise_exception(EXCP00_DIVZ);
4492
    EAX = r0;
4493
    EDX = r1;
4494
}
4495

    
4496
void helper_idivq_EAX(target_ulong t0)
4497
{
4498
    uint64_t r0, r1;
4499
    if (t0 == 0) {
4500
        raise_exception(EXCP00_DIVZ);
4501
    }
4502
    r0 = EAX;
4503
    r1 = EDX;
4504
    if (idiv64(&r0, &r1, t0))
4505
        raise_exception(EXCP00_DIVZ);
4506
    EAX = r0;
4507
    EDX = r1;
4508
}
4509
#endif
4510

    
4511
void helper_hlt(void)
4512
{
4513
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4514
    env->hflags |= HF_HALTED_MASK;
4515
    env->exception_index = EXCP_HLT;
4516
    cpu_loop_exit();
4517
}
4518

    
4519
void helper_monitor(target_ulong ptr)
4520
{
4521
    if ((uint32_t)ECX != 0)
4522
        raise_exception(EXCP0D_GPF);
4523
    /* XXX: store address ? */
4524
}
4525

    
4526
void helper_mwait(void)
4527
{
4528
    if ((uint32_t)ECX != 0)
4529
        raise_exception(EXCP0D_GPF);
4530
    /* XXX: not complete but not completely erroneous */
4531
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4532
        /* more than one CPU: do not sleep because another CPU may
4533
           wake this one */
4534
    } else {
4535
        helper_hlt();
4536
    }
4537
}
4538

    
4539
void helper_debug(void)
4540
{
4541
    env->exception_index = EXCP_DEBUG;
4542
    cpu_loop_exit();
4543
}
4544

    
4545
void helper_raise_interrupt(int intno, int next_eip_addend)
4546
{
4547
    raise_interrupt(intno, 1, 0, next_eip_addend);
4548
}
4549

    
4550
void helper_raise_exception(int exception_index)
4551
{
4552
    raise_exception(exception_index);
4553
}
4554

    
4555
void helper_cli(void)
4556
{
4557
    env->eflags &= ~IF_MASK;
4558
}
4559

    
4560
void helper_sti(void)
4561
{
4562
    env->eflags |= IF_MASK;
4563
}
4564

    
4565
#if 0
4566
/* vm86plus instructions */
4567
void helper_cli_vm(void)
4568
{
4569
    env->eflags &= ~VIF_MASK;
4570
}
4571

4572
void helper_sti_vm(void)
4573
{
4574
    env->eflags |= VIF_MASK;
4575
    if (env->eflags & VIP_MASK) {
4576
        raise_exception(EXCP0D_GPF);
4577
    }
4578
}
4579
#endif
4580

    
4581
void helper_set_inhibit_irq(void)
4582
{
4583
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4584
}
4585

    
4586
void helper_reset_inhibit_irq(void)
4587
{
4588
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4589
}
4590

    
4591
void helper_boundw(target_ulong a0, int v)
4592
{
4593
    int low, high;
4594
    low = ldsw(a0);
4595
    high = ldsw(a0 + 2);
4596
    v = (int16_t)v;
4597
    if (v < low || v > high) {
4598
        raise_exception(EXCP05_BOUND);
4599
    }
4600
    FORCE_RET();
4601
}
4602

    
4603
void helper_boundl(target_ulong a0, int v)
4604
{
4605
    int low, high;
4606
    low = ldl(a0);
4607
    high = ldl(a0 + 4);
4608
    if (v < low || v > high) {
4609
        raise_exception(EXCP05_BOUND);
4610
    }
4611
    FORCE_RET();
4612
}
4613

    
4614
static float approx_rsqrt(float a)
4615
{
4616
    return 1.0 / sqrt(a);
4617
}
4618

    
4619
static float approx_rcp(float a)
4620
{
4621
    return 1.0 / a;
4622
}
4623

    
4624
#if !defined(CONFIG_USER_ONLY)
4625

    
4626
#define MMUSUFFIX _mmu
4627
#ifdef __s390__
4628
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4629
#else
4630
# define GETPC() (__builtin_return_address(0))
4631
#endif
4632

    
4633
#define SHIFT 0
4634
#include "softmmu_template.h"
4635

    
4636
#define SHIFT 1
4637
#include "softmmu_template.h"
4638

    
4639
#define SHIFT 2
4640
#include "softmmu_template.h"
4641

    
4642
#define SHIFT 3
4643
#include "softmmu_template.h"
4644

    
4645
#endif
4646

    
4647
/* try to fill the TLB and return an exception if error. If retaddr is
4648
   NULL, it means that the function was called in C code (i.e. not
4649
   from generated code or from helper.c) */
4650
/* XXX: fix it to restore all registers */
4651
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4652
{
4653
    TranslationBlock *tb;
4654
    int ret;
4655
    unsigned long pc;
4656
    CPUX86State *saved_env;
4657

    
4658
    /* XXX: hack to restore env in all cases, even if not called from
4659
       generated code */
4660
    saved_env = env;
4661
    env = cpu_single_env;
4662

    
4663
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4664
    if (ret) {
4665
        if (retaddr) {
4666
            /* now we have a real cpu fault */
4667
            pc = (unsigned long)retaddr;
4668
            tb = tb_find_pc(pc);
4669
            if (tb) {
4670
                /* the PC is inside the translated code. It means that we have
4671
                   a virtual CPU fault */
4672
                cpu_restore_state(tb, env, pc, NULL);
4673
            }
4674
        }
4675
        if (retaddr)
4676
            raise_exception_err(env->exception_index, env->error_code);
4677
        else
4678
            raise_exception_err_norestore(env->exception_index, env->error_code);
4679
    }
4680
    env = saved_env;
4681
}
4682

    
4683

    
4684
/* Secure Virtual Machine helpers */
4685

    
4686
void helper_stgi(void)
4687
{
4688
    env->hflags |= HF_GIF_MASK;
4689
}
4690

    
4691
void helper_clgi(void)
4692
{
4693
    env->hflags &= ~HF_GIF_MASK;
4694
}
4695

    
4696
#if defined(CONFIG_USER_ONLY)
4697

    
4698
void helper_vmrun(void) 
4699
{ 
4700
}
4701
void helper_vmmcall(void) 
4702
{ 
4703
}
4704
void helper_vmload(void) 
4705
{ 
4706
}
4707
void helper_vmsave(void) 
4708
{ 
4709
}
4710
void helper_skinit(void) 
4711
{ 
4712
}
4713
void helper_invlpga(void) 
4714
{ 
4715
}
4716
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4717
{ 
4718
}
4719
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4720
{
4721
}
4722

    
4723
void helper_svm_check_io(uint32_t port, uint32_t param, 
4724
                         uint32_t next_eip_addend)
4725
{
4726
}
4727
#else
4728

    
4729
static inline uint32_t
4730
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4731
{
4732
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4733
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4734
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4735
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4736
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4737
}
4738

    
4739
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4740
{
4741
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4742
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4743
}
4744

    
4745
void helper_vmrun(void)
4746
{
4747
    target_ulong addr;
4748
    uint32_t event_inj;
4749
    uint32_t int_ctl;
4750

    
4751
    addr = EAX;
4752
    if (loglevel & CPU_LOG_TB_IN_ASM)
4753
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4754

    
4755
    env->vm_vmcb = addr;
4756

    
4757
    /* save the current CPU state in the hsave page */
4758
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4759
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4760

    
4761
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4762
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4763

    
4764
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4765
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4766
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4767
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4768
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4769
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4770
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4771

    
4772
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4773
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4774

    
4775
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4776
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4777
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4778
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4779

    
4780
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4781
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4782
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4783

    
4784
    /* load the interception bitmaps so we do not need to access the
4785
       vmcb in svm mode */
4786
    /* We shift all the intercept bits so we can OR them with the TB
4787
       flags later on */
4788
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4789
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4790
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4791
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4792
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4793
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4794

    
4795
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4796
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4797

    
4798
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4799
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4800

    
4801
    /* clear exit_info_2 so we behave like the real hardware */
4802
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4803

    
4804
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4805
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4806
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4807
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4808
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4809
    if (int_ctl & V_INTR_MASKING_MASK) {
4810
        env->cr[8] = int_ctl & V_TPR_MASK;
4811
        cpu_set_apic_tpr(env, env->cr[8]);
4812
        if (env->eflags & IF_MASK)
4813
            env->hflags |= HF_HIF_MASK;
4814
    }
4815

    
4816
#ifdef TARGET_X86_64
4817
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4818
    env->hflags &= ~HF_LMA_MASK;
4819
    if (env->efer & MSR_EFER_LMA)
4820
       env->hflags |= HF_LMA_MASK;
4821
#endif
4822
    env->eflags = 0;
4823
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4824
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4825
    CC_OP = CC_OP_EFLAGS;
4826
    CC_DST = 0xffffffff;
4827

    
4828
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4829
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4830
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4831
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4832

    
4833
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4834
    env->eip = EIP;
4835
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4836
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4837
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4838
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4839
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4840

    
4841
    /* FIXME: guest state consistency checks */
4842

    
4843
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4844
        case TLB_CONTROL_DO_NOTHING:
4845
            break;
4846
        case TLB_CONTROL_FLUSH_ALL_ASID:
4847
            /* FIXME: this is not 100% correct but should work for now */
4848
            tlb_flush(env, 1);
4849
        break;
4850
    }
4851

    
4852
    helper_stgi();
4853

    
4854
    /* maybe we need to inject an event */
4855
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4856
    if (event_inj & SVM_EVTINJ_VALID) {
4857
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4858
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4859
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4860
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4861

    
4862
        if (loglevel & CPU_LOG_TB_IN_ASM)
4863
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4864
        /* FIXME: need to implement valid_err */
4865
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4866
        case SVM_EVTINJ_TYPE_INTR:
4867
                env->exception_index = vector;
4868
                env->error_code = event_inj_err;
4869
                env->exception_is_int = 0;
4870
                env->exception_next_eip = -1;
4871
                if (loglevel & CPU_LOG_TB_IN_ASM)
4872
                    fprintf(logfile, "INTR");
4873
                break;
4874
        case SVM_EVTINJ_TYPE_NMI:
4875
                env->exception_index = vector;
4876
                env->error_code = event_inj_err;
4877
                env->exception_is_int = 0;
4878
                env->exception_next_eip = EIP;
4879
                if (loglevel & CPU_LOG_TB_IN_ASM)
4880
                    fprintf(logfile, "NMI");
4881
                break;
4882
        case SVM_EVTINJ_TYPE_EXEPT:
4883
                env->exception_index = vector;
4884
                env->error_code = event_inj_err;
4885
                env->exception_is_int = 0;
4886
                env->exception_next_eip = -1;
4887
                if (loglevel & CPU_LOG_TB_IN_ASM)
4888
                    fprintf(logfile, "EXEPT");
4889
                break;
4890
        case SVM_EVTINJ_TYPE_SOFT:
4891
                env->exception_index = vector;
4892
                env->error_code = event_inj_err;
4893
                env->exception_is_int = 1;
4894
                env->exception_next_eip = EIP;
4895
                if (loglevel & CPU_LOG_TB_IN_ASM)
4896
                    fprintf(logfile, "SOFT");
4897
                break;
4898
        }
4899
        if (loglevel & CPU_LOG_TB_IN_ASM)
4900
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4901
    }
4902
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4903
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4904
    }
4905

    
4906
    cpu_loop_exit();
4907
}
4908

    
4909
void helper_vmmcall(void)
4910
{
4911
    if (loglevel & CPU_LOG_TB_IN_ASM)
4912
        fprintf(logfile,"vmmcall!\n");
4913
}
4914

    
4915
void helper_vmload(void)
4916
{
4917
    target_ulong addr;
4918
    addr = EAX;
4919
    if (loglevel & CPU_LOG_TB_IN_ASM)
4920
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4921
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4922
                env->segs[R_FS].base);
4923

    
4924
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4925
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4926
    SVM_LOAD_SEG2(addr, tr, tr);
4927
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4928

    
4929
#ifdef TARGET_X86_64
4930
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4931
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4932
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4933
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4934
#endif
4935
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4936
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4937
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4938
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4939
}
4940

    
4941
void helper_vmsave(void)
4942
{
4943
    target_ulong addr;
4944
    addr = EAX;
4945
    if (loglevel & CPU_LOG_TB_IN_ASM)
4946
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4947
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4948
                env->segs[R_FS].base);
4949

    
4950
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4951
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4952
    SVM_SAVE_SEG(addr, tr, tr);
4953
    SVM_SAVE_SEG(addr, ldt, ldtr);
4954

    
4955
#ifdef TARGET_X86_64
4956
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4957
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4958
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4959
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4960
#endif
4961
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4962
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4963
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4964
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4965
}
4966

    
4967
void helper_skinit(void)
4968
{
4969
    if (loglevel & CPU_LOG_TB_IN_ASM)
4970
        fprintf(logfile,"skinit!\n");
4971
}
4972

    
4973
void helper_invlpga(void)
4974
{
4975
    tlb_flush(env, 0);
4976
}
4977

    
4978
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4979
{
4980
    switch(type) {
4981
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4982
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4983
            helper_vmexit(type, param);
4984
        }
4985
        break;
4986
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4987
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4988
            helper_vmexit(type, param);
4989
        }
4990
        break;
4991
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4992
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4993
            helper_vmexit(type, param);
4994
        }
4995
        break;
4996
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4997
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4998
            helper_vmexit(type, param);
4999
        }
5000
        break;
5001
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5002
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5003
            helper_vmexit(type, param);
5004
        }
5005
        break;
5006
    case SVM_EXIT_IOIO:
5007
        break;
5008

    
5009
    case SVM_EXIT_MSR:
5010
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5011
            /* FIXME: this should be read in at vmrun (faster this way?) */
5012
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5013
            uint32_t t0, t1;
5014
            switch((uint32_t)ECX) {
5015
            case 0 ... 0x1fff:
5016
                t0 = (ECX * 2) % 8;
5017
                t1 = ECX / 8;
5018
                break;
5019
            case 0xc0000000 ... 0xc0001fff:
5020
                t0 = (8192 + ECX - 0xc0000000) * 2;
5021
                t1 = (t0 / 8);
5022
                t0 %= 8;
5023
                break;
5024
            case 0xc0010000 ... 0xc0011fff:
5025
                t0 = (16384 + ECX - 0xc0010000) * 2;
5026
                t1 = (t0 / 8);
5027
                t0 %= 8;
5028
                break;
5029
            default:
5030
                helper_vmexit(type, param);
5031
                t0 = 0;
5032
                t1 = 0;
5033
                break;
5034
            }
5035
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5036
                helper_vmexit(type, param);
5037
        }
5038
        break;
5039
    default:
5040
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5041
            helper_vmexit(type, param);
5042
        }
5043
        break;
5044
    }
5045
}
5046

    
5047
void helper_svm_check_io(uint32_t port, uint32_t param, 
5048
                         uint32_t next_eip_addend)
5049
{
5050
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5051
        /* FIXME: this should be read in at vmrun (faster this way?) */
5052
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5053
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5054
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5055
            /* next EIP */
5056
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5057
                     env->eip + next_eip_addend);
5058
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5059
        }
5060
    }
5061
}
5062

    
5063
/* Note: currently only 32 bits of exit_code are used */
5064
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5065
{
5066
    uint32_t int_ctl;
5067

    
5068
    if (loglevel & CPU_LOG_TB_IN_ASM)
5069
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5070
                exit_code, exit_info_1,
5071
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5072
                EIP);
5073

    
5074
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5075
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5076
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5077
    } else {
5078
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5079
    }
5080

    
5081
    /* Save the VM state in the vmcb */
5082
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5083
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5084
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5085
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5086

    
5087
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5088
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5089

    
5090
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5091
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5092

    
5093
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5094
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5095
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5096
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5097
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5098

    
5099
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5100
        int_ctl &= ~V_TPR_MASK;
5101
        int_ctl |= env->cr[8] & V_TPR_MASK;
5102
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5103
    }
5104

    
5105
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5106
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5107
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5108
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5109
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5110
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5111
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5112

    
5113
    /* Reload the host state from vm_hsave */
5114
    env->hflags &= ~HF_HIF_MASK;
5115
    env->intercept = 0;
5116
    env->intercept_exceptions = 0;
5117
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5118

    
5119
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5120
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5121

    
5122
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5123
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5124

    
5125
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5126
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5127
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5128
    if (int_ctl & V_INTR_MASKING_MASK) {
5129
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5130
        cpu_set_apic_tpr(env, env->cr[8]);
5131
    }
5132
    /* we need to set the efer after the crs so the hidden flags get set properly */
5133
#ifdef TARGET_X86_64
5134
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5135
    env->hflags &= ~HF_LMA_MASK;
5136
    if (env->efer & MSR_EFER_LMA)
5137
       env->hflags |= HF_LMA_MASK;
5138
#endif
5139

    
5140
    env->eflags = 0;
5141
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5142
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5143
    CC_OP = CC_OP_EFLAGS;
5144

    
5145
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5146
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5147
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5148
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5149

    
5150
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5151
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5152
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5153

    
5154
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5155
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5156

    
5157
    /* other setups */
5158
    cpu_x86_set_cpl(env, 0);
5159
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5160
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5161

    
5162
    helper_clgi();
5163
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5164

    
5165
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5166

    
5167
    /* Clears the TSC_OFFSET inside the processor. */
5168

    
5169
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5170
       from the page table indicated the host's CR3. If the PDPEs contain
5171
       illegal state, the processor causes a shutdown. */
5172

    
5173
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5174
    env->cr[0] |= CR0_PE_MASK;
5175
    env->eflags &= ~VM_MASK;
5176

    
5177
    /* Disables all breakpoints in the host DR7 register. */
5178

    
5179
    /* Checks the reloaded host state for consistency. */
5180

    
5181
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5182
       host's code segment or non-canonical (in the case of long mode), a
5183
       #GP fault is delivered inside the host.) */
5184

    
5185
    /* remove any pending exception */
5186
    env->exception_index = -1;
5187
    env->error_code = 0;
5188
    env->old_exception = -1;
5189

    
5190
    cpu_loop_exit();
5191
}
5192

    
5193
#endif
5194

    
5195
/* MMX/SSE */
5196
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5197
void helper_enter_mmx(void)
5198
{
5199
    env->fpstt = 0;
5200
    *(uint32_t *)(env->fptags) = 0;
5201
    *(uint32_t *)(env->fptags + 4) = 0;
5202
}
5203

    
5204
void helper_emms(void)
5205
{
5206
    /* set to empty state */
5207
    *(uint32_t *)(env->fptags) = 0x01010101;
5208
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5209
}
5210

    
5211
/* XXX: suppress */
5212
void helper_movq(uint64_t *d, uint64_t *s)
5213
{
5214
    *d = *s;
5215
}
5216

    
5217
#define SHIFT 0
5218
#include "ops_sse.h"
5219

    
5220
#define SHIFT 1
5221
#include "ops_sse.h"
5222

    
5223
#define SHIFT 0
5224
#include "helper_template.h"
5225
#undef SHIFT
5226

    
5227
#define SHIFT 1
5228
#include "helper_template.h"
5229
#undef SHIFT
5230

    
5231
#define SHIFT 2
5232
#include "helper_template.h"
5233
#undef SHIFT
5234

    
5235
#ifdef TARGET_X86_64
5236

    
5237
#define SHIFT 3
5238
#include "helper_template.h"
5239
#undef SHIFT
5240

    
5241
#endif