Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ bd7a7b33

History | View | Annotate | Download (148.6 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* XXX: add a is_user flag to have proper security support */
594
#define PUSHW(ssp, sp, sp_mask, val)\
595
{\
596
    sp -= 2;\
597
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598
}
599

    
600
#define PUSHL(ssp, sp, sp_mask, val)\
601
{\
602
    sp -= 4;\
603
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604
}
605

    
606
#define POPW(ssp, sp, sp_mask, val)\
607
{\
608
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609
    sp += 2;\
610
}
611

    
612
#define POPL(ssp, sp, sp_mask, val)\
613
{\
614
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615
    sp += 4;\
616
}
617

    
618
/* protected mode interrupt */
619
static void do_interrupt_protected(int intno, int is_int, int error_code,
620
                                   unsigned int next_eip, int is_hw)
621
{
622
    SegmentCache *dt;
623
    target_ulong ptr, ssp;
624
    int type, dpl, selector, ss_dpl, cpl;
625
    int has_error_code, new_stack, shift;
626
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
    uint32_t old_eip, sp_mask;
628
    int svm_should_check = 1;
629

    
630
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
631
        next_eip = EIP;
632
        svm_should_check = 0;
633
    }
634

    
635
    if (svm_should_check
636
        && (INTERCEPTEDl(_exceptions, 1 << intno)
637
        && !is_int)) {
638
        raise_interrupt(intno, is_int, error_code, 0);
639
    }
640
    has_error_code = 0;
641
    if (!is_int && !is_hw) {
642
        switch(intno) {
643
        case 8:
644
        case 10:
645
        case 11:
646
        case 12:
647
        case 13:
648
        case 14:
649
        case 17:
650
            has_error_code = 1;
651
            break;
652
        }
653
    }
654
    if (is_int)
655
        old_eip = next_eip;
656
    else
657
        old_eip = env->eip;
658

    
659
    dt = &env->idt;
660
    if (intno * 8 + 7 > dt->limit)
661
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
662
    ptr = dt->base + intno * 8;
663
    e1 = ldl_kernel(ptr);
664
    e2 = ldl_kernel(ptr + 4);
665
    /* check gate type */
666
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
667
    switch(type) {
668
    case 5: /* task gate */
669
        /* must do that check here to return the correct error code */
670
        if (!(e2 & DESC_P_MASK))
671
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
672
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
673
        if (has_error_code) {
674
            int type;
675
            uint32_t mask;
676
            /* push the error code */
677
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678
            shift = type >> 3;
679
            if (env->segs[R_SS].flags & DESC_B_MASK)
680
                mask = 0xffffffff;
681
            else
682
                mask = 0xffff;
683
            esp = (ESP - (2 << shift)) & mask;
684
            ssp = env->segs[R_SS].base + esp;
685
            if (shift)
686
                stl_kernel(ssp, error_code);
687
            else
688
                stw_kernel(ssp, error_code);
689
            SET_ESP(esp, mask);
690
        }
691
        return;
692
    case 6: /* 286 interrupt gate */
693
    case 7: /* 286 trap gate */
694
    case 14: /* 386 interrupt gate */
695
    case 15: /* 386 trap gate */
696
        break;
697
    default:
698
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699
        break;
700
    }
701
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702
    cpl = env->hflags & HF_CPL_MASK;
703
    /* check privledge if software int */
704
    if (is_int && dpl < cpl)
705
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
706
    /* check valid bit */
707
    if (!(e2 & DESC_P_MASK))
708
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
709
    selector = e1 >> 16;
710
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
711
    if ((selector & 0xfffc) == 0)
712
        raise_exception_err(EXCP0D_GPF, 0);
713

    
714
    if (load_segment(&e1, &e2, selector) != 0)
715
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
717
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719
    if (dpl > cpl)
720
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721
    if (!(e2 & DESC_P_MASK))
722
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
723
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
724
        /* to inner privilege */
725
        get_ss_esp_from_tss(&ss, &esp, dpl);
726
        if ((ss & 0xfffc) == 0)
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if ((ss & 3) != dpl)
729
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
733
        if (ss_dpl != dpl)
734
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735
        if (!(ss_e2 & DESC_S_MASK) ||
736
            (ss_e2 & DESC_CS_MASK) ||
737
            !(ss_e2 & DESC_W_MASK))
738
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739
        if (!(ss_e2 & DESC_P_MASK))
740
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
741
        new_stack = 1;
742
        sp_mask = get_sp_mask(ss_e2);
743
        ssp = get_seg_base(ss_e1, ss_e2);
744
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
745
        /* to same privilege */
746
        if (env->eflags & VM_MASK)
747
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748
        new_stack = 0;
749
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
750
        ssp = env->segs[R_SS].base;
751
        esp = ESP;
752
        dpl = cpl;
753
    } else {
754
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
755
        new_stack = 0; /* avoid warning */
756
        sp_mask = 0; /* avoid warning */
757
        ssp = 0; /* avoid warning */
758
        esp = 0; /* avoid warning */
759
    }
760

    
761
    shift = type >> 3;
762

    
763
#if 0
764
    /* XXX: check that enough room is available */
765
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
766
    if (env->eflags & VM_MASK)
767
        push_size += 8;
768
    push_size <<= shift;
769
#endif
770
    if (shift == 1) {
771
        if (new_stack) {
772
            if (env->eflags & VM_MASK) {
773
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
774
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
775
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
776
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
777
            }
778
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
779
            PUSHL(ssp, esp, sp_mask, ESP);
780
        }
781
        PUSHL(ssp, esp, sp_mask, compute_eflags());
782
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
783
        PUSHL(ssp, esp, sp_mask, old_eip);
784
        if (has_error_code) {
785
            PUSHL(ssp, esp, sp_mask, error_code);
786
        }
787
    } else {
788
        if (new_stack) {
789
            if (env->eflags & VM_MASK) {
790
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
791
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
792
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
793
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
794
            }
795
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
796
            PUSHW(ssp, esp, sp_mask, ESP);
797
        }
798
        PUSHW(ssp, esp, sp_mask, compute_eflags());
799
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
800
        PUSHW(ssp, esp, sp_mask, old_eip);
801
        if (has_error_code) {
802
            PUSHW(ssp, esp, sp_mask, error_code);
803
        }
804
    }
805

    
806
    if (new_stack) {
807
        if (env->eflags & VM_MASK) {
808
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
809
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
810
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
811
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
812
        }
813
        ss = (ss & ~3) | dpl;
814
        cpu_x86_load_seg_cache(env, R_SS, ss,
815
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
816
    }
817
    SET_ESP(esp, sp_mask);
818

    
819
    selector = (selector & ~3) | dpl;
820
    cpu_x86_load_seg_cache(env, R_CS, selector,
821
                   get_seg_base(e1, e2),
822
                   get_seg_limit(e1, e2),
823
                   e2);
824
    cpu_x86_set_cpl(env, dpl);
825
    env->eip = offset;
826

    
827
    /* interrupt gate clear IF mask */
828
    if ((type & 1) == 0) {
829
        env->eflags &= ~IF_MASK;
830
    }
831
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
832
}
833

    
834
#ifdef TARGET_X86_64
835

    
836
#define PUSHQ(sp, val)\
837
{\
838
    sp -= 8;\
839
    stq_kernel(sp, (val));\
840
}
841

    
842
#define POPQ(sp, val)\
843
{\
844
    val = ldq_kernel(sp);\
845
    sp += 8;\
846
}
847

    
848
static inline target_ulong get_rsp_from_tss(int level)
849
{
850
    int index;
851

    
852
#if 0
853
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
854
           env->tr.base, env->tr.limit);
855
#endif
856

    
857
    if (!(env->tr.flags & DESC_P_MASK))
858
        cpu_abort(env, "invalid tss");
859
    index = 8 * level + 4;
860
    if ((index + 7) > env->tr.limit)
861
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
862
    return ldq_kernel(env->tr.base + index);
863
}
864

    
865
/* 64 bit interrupt */
866
static void do_interrupt64(int intno, int is_int, int error_code,
867
                           target_ulong next_eip, int is_hw)
868
{
869
    SegmentCache *dt;
870
    target_ulong ptr;
871
    int type, dpl, selector, cpl, ist;
872
    int has_error_code, new_stack;
873
    uint32_t e1, e2, e3, ss;
874
    target_ulong old_eip, esp, offset;
875
    int svm_should_check = 1;
876

    
877
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
878
        next_eip = EIP;
879
        svm_should_check = 0;
880
    }
881
    if (svm_should_check
882
        && INTERCEPTEDl(_exceptions, 1 << intno)
883
        && !is_int) {
884
        raise_interrupt(intno, is_int, error_code, 0);
885
    }
886
    has_error_code = 0;
887
    if (!is_int && !is_hw) {
888
        switch(intno) {
889
        case 8:
890
        case 10:
891
        case 11:
892
        case 12:
893
        case 13:
894
        case 14:
895
        case 17:
896
            has_error_code = 1;
897
            break;
898
        }
899
    }
900
    if (is_int)
901
        old_eip = next_eip;
902
    else
903
        old_eip = env->eip;
904

    
905
    dt = &env->idt;
906
    if (intno * 16 + 15 > dt->limit)
907
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908
    ptr = dt->base + intno * 16;
909
    e1 = ldl_kernel(ptr);
910
    e2 = ldl_kernel(ptr + 4);
911
    e3 = ldl_kernel(ptr + 8);
912
    /* check gate type */
913
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914
    switch(type) {
915
    case 14: /* 386 interrupt gate */
916
    case 15: /* 386 trap gate */
917
        break;
918
    default:
919
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920
        break;
921
    }
922
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923
    cpl = env->hflags & HF_CPL_MASK;
924
    /* check privledge if software int */
925
    if (is_int && dpl < cpl)
926
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927
    /* check valid bit */
928
    if (!(e2 & DESC_P_MASK))
929
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930
    selector = e1 >> 16;
931
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932
    ist = e2 & 7;
933
    if ((selector & 0xfffc) == 0)
934
        raise_exception_err(EXCP0D_GPF, 0);
935

    
936
    if (load_segment(&e1, &e2, selector) != 0)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941
    if (dpl > cpl)
942
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
    if (!(e2 & DESC_P_MASK))
944
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
948
        /* to inner privilege */
949
        if (ist != 0)
950
            esp = get_rsp_from_tss(ist + 3);
951
        else
952
            esp = get_rsp_from_tss(dpl);
953
        esp &= ~0xfLL; /* align stack */
954
        ss = 0;
955
        new_stack = 1;
956
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957
        /* to same privilege */
958
        if (env->eflags & VM_MASK)
959
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960
        new_stack = 0;
961
        if (ist != 0)
962
            esp = get_rsp_from_tss(ist + 3);
963
        else
964
            esp = ESP;
965
        esp &= ~0xfLL; /* align stack */
966
        dpl = cpl;
967
    } else {
968
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969
        new_stack = 0; /* avoid warning */
970
        esp = 0; /* avoid warning */
971
    }
972

    
973
    PUSHQ(esp, env->segs[R_SS].selector);
974
    PUSHQ(esp, ESP);
975
    PUSHQ(esp, compute_eflags());
976
    PUSHQ(esp, env->segs[R_CS].selector);
977
    PUSHQ(esp, old_eip);
978
    if (has_error_code) {
979
        PUSHQ(esp, error_code);
980
    }
981

    
982
    if (new_stack) {
983
        ss = 0 | dpl;
984
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985
    }
986
    ESP = esp;
987

    
988
    selector = (selector & ~3) | dpl;
989
    cpu_x86_load_seg_cache(env, R_CS, selector,
990
                   get_seg_base(e1, e2),
991
                   get_seg_limit(e1, e2),
992
                   e2);
993
    cpu_x86_set_cpl(env, dpl);
994
    env->eip = offset;
995

    
996
    /* interrupt gate clear IF mask */
997
    if ((type & 1) == 0) {
998
        env->eflags &= ~IF_MASK;
999
    }
1000
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001
}
1002
#endif
1003

    
1004
#if defined(CONFIG_USER_ONLY)
1005
void helper_syscall(int next_eip_addend)
1006
{
1007
    env->exception_index = EXCP_SYSCALL;
1008
    env->exception_next_eip = env->eip + next_eip_addend;
1009
    cpu_loop_exit();
1010
}
1011
#else
1012
void helper_syscall(int next_eip_addend)
1013
{
1014
    int selector;
1015

    
1016
    if (!(env->efer & MSR_EFER_SCE)) {
1017
        raise_exception_err(EXCP06_ILLOP, 0);
1018
    }
1019
    selector = (env->star >> 32) & 0xffff;
1020
#ifdef TARGET_X86_64
1021
    if (env->hflags & HF_LMA_MASK) {
1022
        int code64;
1023

    
1024
        ECX = env->eip + next_eip_addend;
1025
        env->regs[11] = compute_eflags();
1026

    
1027
        code64 = env->hflags & HF_CS64_MASK;
1028

    
1029
        cpu_x86_set_cpl(env, 0);
1030
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031
                           0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1035
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1036
                               0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_W_MASK | DESC_A_MASK);
1040
        env->eflags &= ~env->fmask;
1041
        load_eflags(env->eflags, 0);
1042
        if (code64)
1043
            env->eip = env->lstar;
1044
        else
1045
            env->eip = env->cstar;
1046
    } else
1047
#endif
1048
    {
1049
        ECX = (uint32_t)(env->eip + next_eip_addend);
1050

    
1051
        cpu_x86_set_cpl(env, 0);
1052
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053
                           0, 0xffffffff,
1054
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055
                               DESC_S_MASK |
1056
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058
                               0, 0xffffffff,
1059
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060
                               DESC_S_MASK |
1061
                               DESC_W_MASK | DESC_A_MASK);
1062
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063
        env->eip = (uint32_t)env->star;
1064
    }
1065
}
1066
#endif
1067

    
1068
void helper_sysret(int dflag)
1069
{
1070
    int cpl, selector;
1071

    
1072
    if (!(env->efer & MSR_EFER_SCE)) {
1073
        raise_exception_err(EXCP06_ILLOP, 0);
1074
    }
1075
    cpl = env->hflags & HF_CPL_MASK;
1076
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077
        raise_exception_err(EXCP0D_GPF, 0);
1078
    }
1079
    selector = (env->star >> 48) & 0xffff;
1080
#ifdef TARGET_X86_64
1081
    if (env->hflags & HF_LMA_MASK) {
1082
        if (dflag == 2) {
1083
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1084
                                   0, 0xffffffff,
1085
                                   DESC_G_MASK | DESC_P_MASK |
1086
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1088
                                   DESC_L_MASK);
1089
            env->eip = ECX;
1090
        } else {
1091
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1092
                                   0, 0xffffffff,
1093
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1096
            env->eip = (uint32_t)ECX;
1097
        }
1098
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099
                               0, 0xffffffff,
1100
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                               DESC_W_MASK | DESC_A_MASK);
1103
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1104
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1105
        cpu_x86_set_cpl(env, 3);
1106
    } else
1107
#endif
1108
    {
1109
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1110
                               0, 0xffffffff,
1111
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1114
        env->eip = (uint32_t)ECX;
1115
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1116
                               0, 0xffffffff,
1117
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1118
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1119
                               DESC_W_MASK | DESC_A_MASK);
1120
        env->eflags |= IF_MASK;
1121
        cpu_x86_set_cpl(env, 3);
1122
    }
1123
#ifdef USE_KQEMU
1124
    if (kqemu_is_ok(env)) {
1125
        if (env->hflags & HF_LMA_MASK)
1126
            CC_OP = CC_OP_EFLAGS;
1127
        env->exception_index = -1;
1128
        cpu_loop_exit();
1129
    }
1130
#endif
1131
}
1132

    
1133
/* real mode interrupt */
1134
static void do_interrupt_real(int intno, int is_int, int error_code,
1135
                              unsigned int next_eip)
1136
{
1137
    SegmentCache *dt;
1138
    target_ulong ptr, ssp;
1139
    int selector;
1140
    uint32_t offset, esp;
1141
    uint32_t old_cs, old_eip;
1142
    int svm_should_check = 1;
1143

    
1144
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1145
        next_eip = EIP;
1146
        svm_should_check = 0;
1147
    }
1148
    if (svm_should_check
1149
        && INTERCEPTEDl(_exceptions, 1 << intno)
1150
        && !is_int) {
1151
        raise_interrupt(intno, is_int, error_code, 0);
1152
    }
1153
    /* real mode (simpler !) */
1154
    dt = &env->idt;
1155
    if (intno * 4 + 3 > dt->limit)
1156
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157
    ptr = dt->base + intno * 4;
1158
    offset = lduw_kernel(ptr);
1159
    selector = lduw_kernel(ptr + 2);
1160
    esp = ESP;
1161
    ssp = env->segs[R_SS].base;
1162
    if (is_int)
1163
        old_eip = next_eip;
1164
    else
1165
        old_eip = env->eip;
1166
    old_cs = env->segs[R_CS].selector;
1167
    /* XXX: use SS segment size ? */
1168
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1169
    PUSHW(ssp, esp, 0xffff, old_cs);
1170
    PUSHW(ssp, esp, 0xffff, old_eip);
1171

    
1172
    /* update processor state */
1173
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1174
    env->eip = offset;
1175
    env->segs[R_CS].selector = selector;
1176
    env->segs[R_CS].base = (selector << 4);
1177
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1178
}
1179

    
1180
/* fake user mode interrupt */
1181
void do_interrupt_user(int intno, int is_int, int error_code,
1182
                       target_ulong next_eip)
1183
{
1184
    SegmentCache *dt;
1185
    target_ulong ptr;
1186
    int dpl, cpl, shift;
1187
    uint32_t e2;
1188

    
1189
    dt = &env->idt;
1190
    if (env->hflags & HF_LMA_MASK) {
1191
        shift = 4;
1192
    } else {
1193
        shift = 3;
1194
    }
1195
    ptr = dt->base + (intno << shift);
1196
    e2 = ldl_kernel(ptr + 4);
1197

    
1198
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1199
    cpl = env->hflags & HF_CPL_MASK;
1200
    /* check privledge if software int */
1201
    if (is_int && dpl < cpl)
1202
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1203

    
1204
    /* Since we emulate only user space, we cannot do more than
1205
       exiting the emulation with the suitable exception and error
1206
       code */
1207
    if (is_int)
1208
        EIP = next_eip;
1209
}
1210

    
1211
/*
1212
 * Begin execution of an interruption. is_int is TRUE if coming from
1213
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214
 * instruction. It is only relevant if is_int is TRUE.
1215
 */
1216
void do_interrupt(int intno, int is_int, int error_code,
1217
                  target_ulong next_eip, int is_hw)
1218
{
1219
    if (loglevel & CPU_LOG_INT) {
1220
        if ((env->cr[0] & CR0_PE_MASK)) {
1221
            static int count;
1222
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223
                    count, intno, error_code, is_int,
1224
                    env->hflags & HF_CPL_MASK,
1225
                    env->segs[R_CS].selector, EIP,
1226
                    (int)env->segs[R_CS].base + EIP,
1227
                    env->segs[R_SS].selector, ESP);
1228
            if (intno == 0x0e) {
1229
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1230
            } else {
1231
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1232
            }
1233
            fprintf(logfile, "\n");
1234
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1235
#if 0
1236
            {
1237
                int i;
1238
                uint8_t *ptr;
1239
                fprintf(logfile, "       code=");
1240
                ptr = env->segs[R_CS].base + env->eip;
1241
                for(i = 0; i < 16; i++) {
1242
                    fprintf(logfile, " %02x", ldub(ptr + i));
1243
                }
1244
                fprintf(logfile, "\n");
1245
            }
1246
#endif
1247
            count++;
1248
        }
1249
    }
1250
    if (env->cr[0] & CR0_PE_MASK) {
1251
#if TARGET_X86_64
1252
        if (env->hflags & HF_LMA_MASK) {
1253
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1254
        } else
1255
#endif
1256
        {
1257
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258
        }
1259
    } else {
1260
        do_interrupt_real(intno, is_int, error_code, next_eip);
1261
    }
1262
}
1263

    
1264
/*
1265
 * Check nested exceptions and change to double or triple fault if
1266
 * needed. It should only be called, if this is not an interrupt.
1267
 * Returns the new exception number.
1268
 */
1269
static int check_exception(int intno, int *error_code)
1270
{
1271
    int first_contributory = env->old_exception == 0 ||
1272
                              (env->old_exception >= 10 &&
1273
                               env->old_exception <= 13);
1274
    int second_contributory = intno == 0 ||
1275
                               (intno >= 10 && intno <= 13);
1276

    
1277
    if (loglevel & CPU_LOG_INT)
1278
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1279
                env->old_exception, intno);
1280

    
1281
    if (env->old_exception == EXCP08_DBLE)
1282
        cpu_abort(env, "triple fault");
1283

    
1284
    if ((first_contributory && second_contributory)
1285
        || (env->old_exception == EXCP0E_PAGE &&
1286
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1287
        intno = EXCP08_DBLE;
1288
        *error_code = 0;
1289
    }
1290

    
1291
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1292
        (intno == EXCP08_DBLE))
1293
        env->old_exception = intno;
1294

    
1295
    return intno;
1296
}
1297

    
1298
/*
1299
 * Signal an interruption. It is executed in the main CPU loop.
1300
 * is_int is TRUE if coming from the int instruction. next_eip is the
1301
 * EIP value AFTER the interrupt instruction. It is only relevant if
1302
 * is_int is TRUE.
1303
 */
1304
void raise_interrupt(int intno, int is_int, int error_code,
1305
                     int next_eip_addend)
1306
{
1307
    if (!is_int) {
1308
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1309
        intno = check_exception(intno, &error_code);
1310
    }
1311

    
1312
    env->exception_index = intno;
1313
    env->error_code = error_code;
1314
    env->exception_is_int = is_int;
1315
    env->exception_next_eip = env->eip + next_eip_addend;
1316
    cpu_loop_exit();
1317
}
1318

    
1319
/* same as raise_exception_err, but do not restore global registers */
1320
static void raise_exception_err_norestore(int exception_index, int error_code)
1321
{
1322
    exception_index = check_exception(exception_index, &error_code);
1323

    
1324
    env->exception_index = exception_index;
1325
    env->error_code = error_code;
1326
    env->exception_is_int = 0;
1327
    env->exception_next_eip = 0;
1328
    longjmp(env->jmp_env, 1);
1329
}
1330

    
1331
/* shortcuts to generate exceptions */
1332

    
1333
void (raise_exception_err)(int exception_index, int error_code)
1334
{
1335
    raise_interrupt(exception_index, 0, error_code, 0);
1336
}
1337

    
1338
void raise_exception(int exception_index)
1339
{
1340
    raise_interrupt(exception_index, 0, 0, 0);
1341
}
1342

    
1343
/* SMM support */
1344

    
1345
#if defined(CONFIG_USER_ONLY)
1346

    
1347
void do_smm_enter(void)
1348
{
1349
}
1350

    
1351
void helper_rsm(void)
1352
{
1353
}
1354

    
1355
#else
1356

    
1357
#ifdef TARGET_X86_64
1358
#define SMM_REVISION_ID 0x00020064
1359
#else
1360
#define SMM_REVISION_ID 0x00020000
1361
#endif
1362

    
1363
void do_smm_enter(void)
1364
{
1365
    target_ulong sm_state;
1366
    SegmentCache *dt;
1367
    int i, offset;
1368

    
1369
    if (loglevel & CPU_LOG_INT) {
1370
        fprintf(logfile, "SMM: enter\n");
1371
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1372
    }
1373

    
1374
    env->hflags |= HF_SMM_MASK;
1375
    cpu_smm_update(env);
1376

    
1377
    sm_state = env->smbase + 0x8000;
1378

    
1379
#ifdef TARGET_X86_64
1380
    for(i = 0; i < 6; i++) {
1381
        dt = &env->segs[i];
1382
        offset = 0x7e00 + i * 16;
1383
        stw_phys(sm_state + offset, dt->selector);
1384
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1385
        stl_phys(sm_state + offset + 4, dt->limit);
1386
        stq_phys(sm_state + offset + 8, dt->base);
1387
    }
1388

    
1389
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1390
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1391

    
1392
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1393
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1394
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1395
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1396

    
1397
    stq_phys(sm_state + 0x7e88, env->idt.base);
1398
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1399

    
1400
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1401
    stq_phys(sm_state + 0x7e98, env->tr.base);
1402
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1403
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1404

    
1405
    stq_phys(sm_state + 0x7ed0, env->efer);
1406

    
1407
    stq_phys(sm_state + 0x7ff8, EAX);
1408
    stq_phys(sm_state + 0x7ff0, ECX);
1409
    stq_phys(sm_state + 0x7fe8, EDX);
1410
    stq_phys(sm_state + 0x7fe0, EBX);
1411
    stq_phys(sm_state + 0x7fd8, ESP);
1412
    stq_phys(sm_state + 0x7fd0, EBP);
1413
    stq_phys(sm_state + 0x7fc8, ESI);
1414
    stq_phys(sm_state + 0x7fc0, EDI);
1415
    for(i = 8; i < 16; i++)
1416
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1417
    stq_phys(sm_state + 0x7f78, env->eip);
1418
    stl_phys(sm_state + 0x7f70, compute_eflags());
1419
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1420
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1421

    
1422
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1423
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1424
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1425

    
1426
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1427
    stl_phys(sm_state + 0x7f00, env->smbase);
1428
#else
1429
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1430
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1431
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1432
    stl_phys(sm_state + 0x7ff0, env->eip);
1433
    stl_phys(sm_state + 0x7fec, EDI);
1434
    stl_phys(sm_state + 0x7fe8, ESI);
1435
    stl_phys(sm_state + 0x7fe4, EBP);
1436
    stl_phys(sm_state + 0x7fe0, ESP);
1437
    stl_phys(sm_state + 0x7fdc, EBX);
1438
    stl_phys(sm_state + 0x7fd8, EDX);
1439
    stl_phys(sm_state + 0x7fd4, ECX);
1440
    stl_phys(sm_state + 0x7fd0, EAX);
1441
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1442
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1443

    
1444
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1445
    stl_phys(sm_state + 0x7f64, env->tr.base);
1446
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1447
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1448

    
1449
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1450
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1451
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1452
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1453

    
1454
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1455
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1456

    
1457
    stl_phys(sm_state + 0x7f58, env->idt.base);
1458
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1459

    
1460
    for(i = 0; i < 6; i++) {
1461
        dt = &env->segs[i];
1462
        if (i < 3)
1463
            offset = 0x7f84 + i * 12;
1464
        else
1465
            offset = 0x7f2c + (i - 3) * 12;
1466
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1467
        stl_phys(sm_state + offset + 8, dt->base);
1468
        stl_phys(sm_state + offset + 4, dt->limit);
1469
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1470
    }
1471
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1472

    
1473
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1474
    stl_phys(sm_state + 0x7ef8, env->smbase);
1475
#endif
1476
    /* init SMM cpu state */
1477

    
1478
#ifdef TARGET_X86_64
1479
    env->efer = 0;
1480
    env->hflags &= ~HF_LMA_MASK;
1481
#endif
1482
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1483
    env->eip = 0x00008000;
1484
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1485
                           0xffffffff, 0);
1486
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1487
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1488
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1489
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1490
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1491

    
1492
    cpu_x86_update_cr0(env,
1493
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1494
    cpu_x86_update_cr4(env, 0);
1495
    env->dr[7] = 0x00000400;
1496
    CC_OP = CC_OP_EFLAGS;
1497
}
1498

    
1499
void helper_rsm(void)
1500
{
1501
    target_ulong sm_state;
1502
    int i, offset;
1503
    uint32_t val;
1504

    
1505
    sm_state = env->smbase + 0x8000;
1506
#ifdef TARGET_X86_64
1507
    env->efer = ldq_phys(sm_state + 0x7ed0);
1508
    if (env->efer & MSR_EFER_LMA)
1509
        env->hflags |= HF_LMA_MASK;
1510
    else
1511
        env->hflags &= ~HF_LMA_MASK;
1512

    
1513
    for(i = 0; i < 6; i++) {
1514
        offset = 0x7e00 + i * 16;
1515
        cpu_x86_load_seg_cache(env, i,
1516
                               lduw_phys(sm_state + offset),
1517
                               ldq_phys(sm_state + offset + 8),
1518
                               ldl_phys(sm_state + offset + 4),
1519
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1520
    }
1521

    
1522
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1523
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1524

    
1525
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1526
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1527
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1528
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1529

    
1530
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1531
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1532

    
1533
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1534
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1535
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1536
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1537

    
1538
    EAX = ldq_phys(sm_state + 0x7ff8);
1539
    ECX = ldq_phys(sm_state + 0x7ff0);
1540
    EDX = ldq_phys(sm_state + 0x7fe8);
1541
    EBX = ldq_phys(sm_state + 0x7fe0);
1542
    ESP = ldq_phys(sm_state + 0x7fd8);
1543
    EBP = ldq_phys(sm_state + 0x7fd0);
1544
    ESI = ldq_phys(sm_state + 0x7fc8);
1545
    EDI = ldq_phys(sm_state + 0x7fc0);
1546
    for(i = 8; i < 16; i++)
1547
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1548
    env->eip = ldq_phys(sm_state + 0x7f78);
1549
    load_eflags(ldl_phys(sm_state + 0x7f70),
1550
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1551
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1552
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1553

    
1554
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1555
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1556
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1557

    
1558
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1559
    if (val & 0x20000) {
1560
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1561
    }
1562
#else
1563
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1564
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1565
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1566
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1567
    env->eip = ldl_phys(sm_state + 0x7ff0);
1568
    EDI = ldl_phys(sm_state + 0x7fec);
1569
    ESI = ldl_phys(sm_state + 0x7fe8);
1570
    EBP = ldl_phys(sm_state + 0x7fe4);
1571
    ESP = ldl_phys(sm_state + 0x7fe0);
1572
    EBX = ldl_phys(sm_state + 0x7fdc);
1573
    EDX = ldl_phys(sm_state + 0x7fd8);
1574
    ECX = ldl_phys(sm_state + 0x7fd4);
1575
    EAX = ldl_phys(sm_state + 0x7fd0);
1576
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1577
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1578

    
1579
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1580
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1581
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1582
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1583

    
1584
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1585
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1586
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1587
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1588

    
1589
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1590
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1591

    
1592
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1593
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1594

    
1595
    for(i = 0; i < 6; i++) {
1596
        if (i < 3)
1597
            offset = 0x7f84 + i * 12;
1598
        else
1599
            offset = 0x7f2c + (i - 3) * 12;
1600
        cpu_x86_load_seg_cache(env, i,
1601
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1602
                               ldl_phys(sm_state + offset + 8),
1603
                               ldl_phys(sm_state + offset + 4),
1604
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1605
    }
1606
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1607

    
1608
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1609
    if (val & 0x20000) {
1610
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1611
    }
1612
#endif
1613
    CC_OP = CC_OP_EFLAGS;
1614
    env->hflags &= ~HF_SMM_MASK;
1615
    cpu_smm_update(env);
1616

    
1617
    if (loglevel & CPU_LOG_INT) {
1618
        fprintf(logfile, "SMM: after RSM\n");
1619
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1620
    }
1621
}
1622

    
1623
#endif /* !CONFIG_USER_ONLY */
1624

    
1625

    
1626
/* division, flags are undefined */
1627

    
1628
void helper_divb_AL(target_ulong t0)
1629
{
1630
    unsigned int num, den, q, r;
1631

    
1632
    num = (EAX & 0xffff);
1633
    den = (t0 & 0xff);
1634
    if (den == 0) {
1635
        raise_exception(EXCP00_DIVZ);
1636
    }
1637
    q = (num / den);
1638
    if (q > 0xff)
1639
        raise_exception(EXCP00_DIVZ);
1640
    q &= 0xff;
1641
    r = (num % den) & 0xff;
1642
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1643
}
1644

    
1645
void helper_idivb_AL(target_ulong t0)
1646
{
1647
    int num, den, q, r;
1648

    
1649
    num = (int16_t)EAX;
1650
    den = (int8_t)t0;
1651
    if (den == 0) {
1652
        raise_exception(EXCP00_DIVZ);
1653
    }
1654
    q = (num / den);
1655
    if (q != (int8_t)q)
1656
        raise_exception(EXCP00_DIVZ);
1657
    q &= 0xff;
1658
    r = (num % den) & 0xff;
1659
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1660
}
1661

    
1662
void helper_divw_AX(target_ulong t0)
1663
{
1664
    unsigned int num, den, q, r;
1665

    
1666
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1667
    den = (t0 & 0xffff);
1668
    if (den == 0) {
1669
        raise_exception(EXCP00_DIVZ);
1670
    }
1671
    q = (num / den);
1672
    if (q > 0xffff)
1673
        raise_exception(EXCP00_DIVZ);
1674
    q &= 0xffff;
1675
    r = (num % den) & 0xffff;
1676
    EAX = (EAX & ~0xffff) | q;
1677
    EDX = (EDX & ~0xffff) | r;
1678
}
1679

    
1680
void helper_idivw_AX(target_ulong t0)
1681
{
1682
    int num, den, q, r;
1683

    
1684
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1685
    den = (int16_t)t0;
1686
    if (den == 0) {
1687
        raise_exception(EXCP00_DIVZ);
1688
    }
1689
    q = (num / den);
1690
    if (q != (int16_t)q)
1691
        raise_exception(EXCP00_DIVZ);
1692
    q &= 0xffff;
1693
    r = (num % den) & 0xffff;
1694
    EAX = (EAX & ~0xffff) | q;
1695
    EDX = (EDX & ~0xffff) | r;
1696
}
1697

    
1698
void helper_divl_EAX(target_ulong t0)
1699
{
1700
    unsigned int den, r;
1701
    uint64_t num, q;
1702

    
1703
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1704
    den = t0;
1705
    if (den == 0) {
1706
        raise_exception(EXCP00_DIVZ);
1707
    }
1708
    q = (num / den);
1709
    r = (num % den);
1710
    if (q > 0xffffffff)
1711
        raise_exception(EXCP00_DIVZ);
1712
    EAX = (uint32_t)q;
1713
    EDX = (uint32_t)r;
1714
}
1715

    
1716
void helper_idivl_EAX(target_ulong t0)
1717
{
1718
    int den, r;
1719
    int64_t num, q;
1720

    
1721
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1722
    den = t0;
1723
    if (den == 0) {
1724
        raise_exception(EXCP00_DIVZ);
1725
    }
1726
    q = (num / den);
1727
    r = (num % den);
1728
    if (q != (int32_t)q)
1729
        raise_exception(EXCP00_DIVZ);
1730
    EAX = (uint32_t)q;
1731
    EDX = (uint32_t)r;
1732
}
1733

    
1734
/* bcd */
1735

    
1736
/* XXX: exception */
1737
void helper_aam(int base)
1738
{
1739
    int al, ah;
1740
    al = EAX & 0xff;
1741
    ah = al / base;
1742
    al = al % base;
1743
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1744
    CC_DST = al;
1745
}
1746

    
1747
void helper_aad(int base)
1748
{
1749
    int al, ah;
1750
    al = EAX & 0xff;
1751
    ah = (EAX >> 8) & 0xff;
1752
    al = ((ah * base) + al) & 0xff;
1753
    EAX = (EAX & ~0xffff) | al;
1754
    CC_DST = al;
1755
}
1756

    
1757
void helper_aaa(void)
1758
{
1759
    int icarry;
1760
    int al, ah, af;
1761
    int eflags;
1762

    
1763
    eflags = cc_table[CC_OP].compute_all();
1764
    af = eflags & CC_A;
1765
    al = EAX & 0xff;
1766
    ah = (EAX >> 8) & 0xff;
1767

    
1768
    icarry = (al > 0xf9);
1769
    if (((al & 0x0f) > 9 ) || af) {
1770
        al = (al + 6) & 0x0f;
1771
        ah = (ah + 1 + icarry) & 0xff;
1772
        eflags |= CC_C | CC_A;
1773
    } else {
1774
        eflags &= ~(CC_C | CC_A);
1775
        al &= 0x0f;
1776
    }
1777
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1778
    CC_SRC = eflags;
1779
    FORCE_RET();
1780
}
1781

    
1782
void helper_aas(void)
1783
{
1784
    int icarry;
1785
    int al, ah, af;
1786
    int eflags;
1787

    
1788
    eflags = cc_table[CC_OP].compute_all();
1789
    af = eflags & CC_A;
1790
    al = EAX & 0xff;
1791
    ah = (EAX >> 8) & 0xff;
1792

    
1793
    icarry = (al < 6);
1794
    if (((al & 0x0f) > 9 ) || af) {
1795
        al = (al - 6) & 0x0f;
1796
        ah = (ah - 1 - icarry) & 0xff;
1797
        eflags |= CC_C | CC_A;
1798
    } else {
1799
        eflags &= ~(CC_C | CC_A);
1800
        al &= 0x0f;
1801
    }
1802
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1803
    CC_SRC = eflags;
1804
    FORCE_RET();
1805
}
1806

    
1807
void helper_daa(void)
1808
{
1809
    int al, af, cf;
1810
    int eflags;
1811

    
1812
    eflags = cc_table[CC_OP].compute_all();
1813
    cf = eflags & CC_C;
1814
    af = eflags & CC_A;
1815
    al = EAX & 0xff;
1816

    
1817
    eflags = 0;
1818
    if (((al & 0x0f) > 9 ) || af) {
1819
        al = (al + 6) & 0xff;
1820
        eflags |= CC_A;
1821
    }
1822
    if ((al > 0x9f) || cf) {
1823
        al = (al + 0x60) & 0xff;
1824
        eflags |= CC_C;
1825
    }
1826
    EAX = (EAX & ~0xff) | al;
1827
    /* well, speed is not an issue here, so we compute the flags by hand */
1828
    eflags |= (al == 0) << 6; /* zf */
1829
    eflags |= parity_table[al]; /* pf */
1830
    eflags |= (al & 0x80); /* sf */
1831
    CC_SRC = eflags;
1832
    FORCE_RET();
1833
}
1834

    
1835
void helper_das(void)
1836
{
1837
    int al, al1, af, cf;
1838
    int eflags;
1839

    
1840
    eflags = cc_table[CC_OP].compute_all();
1841
    cf = eflags & CC_C;
1842
    af = eflags & CC_A;
1843
    al = EAX & 0xff;
1844

    
1845
    eflags = 0;
1846
    al1 = al;
1847
    if (((al & 0x0f) > 9 ) || af) {
1848
        eflags |= CC_A;
1849
        if (al < 6 || cf)
1850
            eflags |= CC_C;
1851
        al = (al - 6) & 0xff;
1852
    }
1853
    if ((al1 > 0x99) || cf) {
1854
        al = (al - 0x60) & 0xff;
1855
        eflags |= CC_C;
1856
    }
1857
    EAX = (EAX & ~0xff) | al;
1858
    /* well, speed is not an issue here, so we compute the flags by hand */
1859
    eflags |= (al == 0) << 6; /* zf */
1860
    eflags |= parity_table[al]; /* pf */
1861
    eflags |= (al & 0x80); /* sf */
1862
    CC_SRC = eflags;
1863
    FORCE_RET();
1864
}
1865

    
1866
void helper_into(int next_eip_addend)
1867
{
1868
    int eflags;
1869
    eflags = cc_table[CC_OP].compute_all();
1870
    if (eflags & CC_O) {
1871
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1872
    }
1873
}
1874

    
1875
void helper_cmpxchg8b(target_ulong a0)
1876
{
1877
    uint64_t d;
1878
    int eflags;
1879

    
1880
    eflags = cc_table[CC_OP].compute_all();
1881
    d = ldq(a0);
1882
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1883
        stq(a0, ((uint64_t)ECX << 32) | EBX);
1884
        eflags |= CC_Z;
1885
    } else {
1886
        EDX = (uint32_t)(d >> 32);
1887
        EAX = (uint32_t)d;
1888
        eflags &= ~CC_Z;
1889
    }
1890
    CC_SRC = eflags;
1891
}
1892

    
1893
void helper_single_step(void)
1894
{
1895
    env->dr[6] |= 0x4000;
1896
    raise_exception(EXCP01_SSTP);
1897
}
1898

    
1899
void helper_cpuid(void)
1900
{
1901
    uint32_t index;
1902
    index = (uint32_t)EAX;
1903

    
1904
    /* test if maximum index reached */
1905
    if (index & 0x80000000) {
1906
        if (index > env->cpuid_xlevel)
1907
            index = env->cpuid_level;
1908
    } else {
1909
        if (index > env->cpuid_level)
1910
            index = env->cpuid_level;
1911
    }
1912

    
1913
    switch(index) {
1914
    case 0:
1915
        EAX = env->cpuid_level;
1916
        EBX = env->cpuid_vendor1;
1917
        EDX = env->cpuid_vendor2;
1918
        ECX = env->cpuid_vendor3;
1919
        break;
1920
    case 1:
1921
        EAX = env->cpuid_version;
1922
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1923
        ECX = env->cpuid_ext_features;
1924
        EDX = env->cpuid_features;
1925
        break;
1926
    case 2:
1927
        /* cache info: needed for Pentium Pro compatibility */
1928
        EAX = 1;
1929
        EBX = 0;
1930
        ECX = 0;
1931
        EDX = 0x2c307d;
1932
        break;
1933
    case 0x80000000:
1934
        EAX = env->cpuid_xlevel;
1935
        EBX = env->cpuid_vendor1;
1936
        EDX = env->cpuid_vendor2;
1937
        ECX = env->cpuid_vendor3;
1938
        break;
1939
    case 0x80000001:
1940
        EAX = env->cpuid_features;
1941
        EBX = 0;
1942
        ECX = env->cpuid_ext3_features;
1943
        EDX = env->cpuid_ext2_features;
1944
        break;
1945
    case 0x80000002:
1946
    case 0x80000003:
1947
    case 0x80000004:
1948
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1949
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1950
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1951
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1952
        break;
1953
    case 0x80000005:
1954
        /* cache info (L1 cache) */
1955
        EAX = 0x01ff01ff;
1956
        EBX = 0x01ff01ff;
1957
        ECX = 0x40020140;
1958
        EDX = 0x40020140;
1959
        break;
1960
    case 0x80000006:
1961
        /* cache info (L2 cache) */
1962
        EAX = 0;
1963
        EBX = 0x42004200;
1964
        ECX = 0x02008140;
1965
        EDX = 0;
1966
        break;
1967
    case 0x80000008:
1968
        /* virtual & phys address size in low 2 bytes. */
1969
/* XXX: This value must match the one used in the MMU code. */ 
1970
#if defined(TARGET_X86_64)
1971
#  if defined(USE_KQEMU)
1972
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1973
#  else
1974
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1975
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1976
#  endif
1977
#else
1978
# if defined(USE_KQEMU)
1979
        EAX = 0x00000020;        /* 32 bits physical */
1980
#  else
1981
        EAX = 0x00000024;        /* 36 bits physical */
1982
#  endif
1983
#endif
1984
        EBX = 0;
1985
        ECX = 0;
1986
        EDX = 0;
1987
        break;
1988
    case 0x8000000A:
1989
        EAX = 0x00000001;
1990
        EBX = 0;
1991
        ECX = 0;
1992
        EDX = 0;
1993
        break;
1994
    default:
1995
        /* reserved values: zero */
1996
        EAX = 0;
1997
        EBX = 0;
1998
        ECX = 0;
1999
        EDX = 0;
2000
        break;
2001
    }
2002
}
2003

    
2004
void helper_enter_level(int level, int data32, target_ulong t1)
2005
{
2006
    target_ulong ssp;
2007
    uint32_t esp_mask, esp, ebp;
2008

    
2009
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2010
    ssp = env->segs[R_SS].base;
2011
    ebp = EBP;
2012
    esp = ESP;
2013
    if (data32) {
2014
        /* 32 bit */
2015
        esp -= 4;
2016
        while (--level) {
2017
            esp -= 4;
2018
            ebp -= 4;
2019
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2020
        }
2021
        esp -= 4;
2022
        stl(ssp + (esp & esp_mask), t1);
2023
    } else {
2024
        /* 16 bit */
2025
        esp -= 2;
2026
        while (--level) {
2027
            esp -= 2;
2028
            ebp -= 2;
2029
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2030
        }
2031
        esp -= 2;
2032
        stw(ssp + (esp & esp_mask), t1);
2033
    }
2034
}
2035

    
2036
#ifdef TARGET_X86_64
2037
void helper_enter64_level(int level, int data64, target_ulong t1)
2038
{
2039
    target_ulong esp, ebp;
2040
    ebp = EBP;
2041
    esp = ESP;
2042

    
2043
    if (data64) {
2044
        /* 64 bit */
2045
        esp -= 8;
2046
        while (--level) {
2047
            esp -= 8;
2048
            ebp -= 8;
2049
            stq(esp, ldq(ebp));
2050
        }
2051
        esp -= 8;
2052
        stq(esp, t1);
2053
    } else {
2054
        /* 16 bit */
2055
        esp -= 2;
2056
        while (--level) {
2057
            esp -= 2;
2058
            ebp -= 2;
2059
            stw(esp, lduw(ebp));
2060
        }
2061
        esp -= 2;
2062
        stw(esp, t1);
2063
    }
2064
}
2065
#endif
2066

    
2067
void helper_lldt(int selector)
2068
{
2069
    SegmentCache *dt;
2070
    uint32_t e1, e2;
2071
    int index, entry_limit;
2072
    target_ulong ptr;
2073

    
2074
    selector &= 0xffff;
2075
    if ((selector & 0xfffc) == 0) {
2076
        /* XXX: NULL selector case: invalid LDT */
2077
        env->ldt.base = 0;
2078
        env->ldt.limit = 0;
2079
    } else {
2080
        if (selector & 0x4)
2081
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2082
        dt = &env->gdt;
2083
        index = selector & ~7;
2084
#ifdef TARGET_X86_64
2085
        if (env->hflags & HF_LMA_MASK)
2086
            entry_limit = 15;
2087
        else
2088
#endif
2089
            entry_limit = 7;
2090
        if ((index + entry_limit) > dt->limit)
2091
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2092
        ptr = dt->base + index;
2093
        e1 = ldl_kernel(ptr);
2094
        e2 = ldl_kernel(ptr + 4);
2095
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2096
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2097
        if (!(e2 & DESC_P_MASK))
2098
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2099
#ifdef TARGET_X86_64
2100
        if (env->hflags & HF_LMA_MASK) {
2101
            uint32_t e3;
2102
            e3 = ldl_kernel(ptr + 8);
2103
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2104
            env->ldt.base |= (target_ulong)e3 << 32;
2105
        } else
2106
#endif
2107
        {
2108
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2109
        }
2110
    }
2111
    env->ldt.selector = selector;
2112
}
2113

    
2114
void helper_ltr(int selector)
2115
{
2116
    SegmentCache *dt;
2117
    uint32_t e1, e2;
2118
    int index, type, entry_limit;
2119
    target_ulong ptr;
2120

    
2121
    selector &= 0xffff;
2122
    if ((selector & 0xfffc) == 0) {
2123
        /* NULL selector case: invalid TR */
2124
        env->tr.base = 0;
2125
        env->tr.limit = 0;
2126
        env->tr.flags = 0;
2127
    } else {
2128
        if (selector & 0x4)
2129
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130
        dt = &env->gdt;
2131
        index = selector & ~7;
2132
#ifdef TARGET_X86_64
2133
        if (env->hflags & HF_LMA_MASK)
2134
            entry_limit = 15;
2135
        else
2136
#endif
2137
            entry_limit = 7;
2138
        if ((index + entry_limit) > dt->limit)
2139
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140
        ptr = dt->base + index;
2141
        e1 = ldl_kernel(ptr);
2142
        e2 = ldl_kernel(ptr + 4);
2143
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2144
        if ((e2 & DESC_S_MASK) ||
2145
            (type != 1 && type != 9))
2146
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147
        if (!(e2 & DESC_P_MASK))
2148
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2149
#ifdef TARGET_X86_64
2150
        if (env->hflags & HF_LMA_MASK) {
2151
            uint32_t e3, e4;
2152
            e3 = ldl_kernel(ptr + 8);
2153
            e4 = ldl_kernel(ptr + 12);
2154
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2155
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2157
            env->tr.base |= (target_ulong)e3 << 32;
2158
        } else
2159
#endif
2160
        {
2161
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2162
        }
2163
        e2 |= DESC_TSS_BUSY_MASK;
2164
        stl_kernel(ptr + 4, e2);
2165
    }
2166
    env->tr.selector = selector;
2167
}
2168

    
2169
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2170
void helper_load_seg(int seg_reg, int selector)
2171
{
2172
    uint32_t e1, e2;
2173
    int cpl, dpl, rpl;
2174
    SegmentCache *dt;
2175
    int index;
2176
    target_ulong ptr;
2177

    
2178
    selector &= 0xffff;
2179
    cpl = env->hflags & HF_CPL_MASK;
2180
    if ((selector & 0xfffc) == 0) {
2181
        /* null selector case */
2182
        if (seg_reg == R_SS
2183
#ifdef TARGET_X86_64
2184
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2185
#endif
2186
            )
2187
            raise_exception_err(EXCP0D_GPF, 0);
2188
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2189
    } else {
2190

    
2191
        if (selector & 0x4)
2192
            dt = &env->ldt;
2193
        else
2194
            dt = &env->gdt;
2195
        index = selector & ~7;
2196
        if ((index + 7) > dt->limit)
2197
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
        ptr = dt->base + index;
2199
        e1 = ldl_kernel(ptr);
2200
        e2 = ldl_kernel(ptr + 4);
2201

    
2202
        if (!(e2 & DESC_S_MASK))
2203
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2204
        rpl = selector & 3;
2205
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2206
        if (seg_reg == R_SS) {
2207
            /* must be writable segment */
2208
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2209
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2210
            if (rpl != cpl || dpl != cpl)
2211
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2212
        } else {
2213
            /* must be readable segment */
2214
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2215
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2216

    
2217
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2218
                /* if not conforming code, test rights */
2219
                if (dpl < cpl || dpl < rpl)
2220
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2221
            }
2222
        }
2223

    
2224
        if (!(e2 & DESC_P_MASK)) {
2225
            if (seg_reg == R_SS)
2226
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2227
            else
2228
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2229
        }
2230

    
2231
        /* set the access bit if not already set */
2232
        if (!(e2 & DESC_A_MASK)) {
2233
            e2 |= DESC_A_MASK;
2234
            stl_kernel(ptr + 4, e2);
2235
        }
2236

    
2237
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2238
                       get_seg_base(e1, e2),
2239
                       get_seg_limit(e1, e2),
2240
                       e2);
2241
#if 0
2242
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2243
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2244
#endif
2245
    }
2246
}
2247

    
2248
/* protected mode jump */
2249
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2250
                           int next_eip_addend)
2251
{
2252
    int gate_cs, type;
2253
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2254
    target_ulong next_eip;
2255

    
2256
    if ((new_cs & 0xfffc) == 0)
2257
        raise_exception_err(EXCP0D_GPF, 0);
2258
    if (load_segment(&e1, &e2, new_cs) != 0)
2259
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260
    cpl = env->hflags & HF_CPL_MASK;
2261
    if (e2 & DESC_S_MASK) {
2262
        if (!(e2 & DESC_CS_MASK))
2263
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2264
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2265
        if (e2 & DESC_C_MASK) {
2266
            /* conforming code segment */
2267
            if (dpl > cpl)
2268
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2269
        } else {
2270
            /* non conforming code segment */
2271
            rpl = new_cs & 3;
2272
            if (rpl > cpl)
2273
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2274
            if (dpl != cpl)
2275
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2276
        }
2277
        if (!(e2 & DESC_P_MASK))
2278
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2279
        limit = get_seg_limit(e1, e2);
2280
        if (new_eip > limit &&
2281
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2282
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2283
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2284
                       get_seg_base(e1, e2), limit, e2);
2285
        EIP = new_eip;
2286
    } else {
2287
        /* jump to call or task gate */
2288
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2289
        rpl = new_cs & 3;
2290
        cpl = env->hflags & HF_CPL_MASK;
2291
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2292
        switch(type) {
2293
        case 1: /* 286 TSS */
2294
        case 9: /* 386 TSS */
2295
        case 5: /* task gate */
2296
            if (dpl < cpl || dpl < rpl)
2297
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2298
            next_eip = env->eip + next_eip_addend;
2299
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2300
            CC_OP = CC_OP_EFLAGS;
2301
            break;
2302
        case 4: /* 286 call gate */
2303
        case 12: /* 386 call gate */
2304
            if ((dpl < cpl) || (dpl < rpl))
2305
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2306
            if (!(e2 & DESC_P_MASK))
2307
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2308
            gate_cs = e1 >> 16;
2309
            new_eip = (e1 & 0xffff);
2310
            if (type == 12)
2311
                new_eip |= (e2 & 0xffff0000);
2312
            if (load_segment(&e1, &e2, gate_cs) != 0)
2313
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2314
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2315
            /* must be code segment */
2316
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2317
                 (DESC_S_MASK | DESC_CS_MASK)))
2318
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2319
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2320
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2321
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2322
            if (!(e2 & DESC_P_MASK))
2323
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2324
            limit = get_seg_limit(e1, e2);
2325
            if (new_eip > limit)
2326
                raise_exception_err(EXCP0D_GPF, 0);
2327
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2328
                                   get_seg_base(e1, e2), limit, e2);
2329
            EIP = new_eip;
2330
            break;
2331
        default:
2332
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333
            break;
2334
        }
2335
    }
2336
}
2337

    
2338
/* real mode call */
2339
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2340
                       int shift, int next_eip)
2341
{
2342
    int new_eip;
2343
    uint32_t esp, esp_mask;
2344
    target_ulong ssp;
2345

    
2346
    new_eip = new_eip1;
2347
    esp = ESP;
2348
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2349
    ssp = env->segs[R_SS].base;
2350
    if (shift) {
2351
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2352
        PUSHL(ssp, esp, esp_mask, next_eip);
2353
    } else {
2354
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2355
        PUSHW(ssp, esp, esp_mask, next_eip);
2356
    }
2357

    
2358
    SET_ESP(esp, esp_mask);
2359
    env->eip = new_eip;
2360
    env->segs[R_CS].selector = new_cs;
2361
    env->segs[R_CS].base = (new_cs << 4);
2362
}
2363

    
2364
/* protected mode call */
2365
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2366
                            int shift, int next_eip_addend)
2367
{
2368
    int new_stack, i;
2369
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2370
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2371
    uint32_t val, limit, old_sp_mask;
2372
    target_ulong ssp, old_ssp, next_eip;
2373

    
2374
    next_eip = env->eip + next_eip_addend;
2375
#ifdef DEBUG_PCALL
2376
    if (loglevel & CPU_LOG_PCALL) {
2377
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2378
                new_cs, (uint32_t)new_eip, shift);
2379
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2380
    }
2381
#endif
2382
    if ((new_cs & 0xfffc) == 0)
2383
        raise_exception_err(EXCP0D_GPF, 0);
2384
    if (load_segment(&e1, &e2, new_cs) != 0)
2385
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386
    cpl = env->hflags & HF_CPL_MASK;
2387
#ifdef DEBUG_PCALL
2388
    if (loglevel & CPU_LOG_PCALL) {
2389
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2390
    }
2391
#endif
2392
    if (e2 & DESC_S_MASK) {
2393
        if (!(e2 & DESC_CS_MASK))
2394
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2395
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2396
        if (e2 & DESC_C_MASK) {
2397
            /* conforming code segment */
2398
            if (dpl > cpl)
2399
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400
        } else {
2401
            /* non conforming code segment */
2402
            rpl = new_cs & 3;
2403
            if (rpl > cpl)
2404
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2405
            if (dpl != cpl)
2406
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407
        }
2408
        if (!(e2 & DESC_P_MASK))
2409
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2410

    
2411
#ifdef TARGET_X86_64
2412
        /* XXX: check 16/32 bit cases in long mode */
2413
        if (shift == 2) {
2414
            target_ulong rsp;
2415
            /* 64 bit case */
2416
            rsp = ESP;
2417
            PUSHQ(rsp, env->segs[R_CS].selector);
2418
            PUSHQ(rsp, next_eip);
2419
            /* from this point, not restartable */
2420
            ESP = rsp;
2421
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2422
                                   get_seg_base(e1, e2),
2423
                                   get_seg_limit(e1, e2), e2);
2424
            EIP = new_eip;
2425
        } else
2426
#endif
2427
        {
2428
            sp = ESP;
2429
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2430
            ssp = env->segs[R_SS].base;
2431
            if (shift) {
2432
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2433
                PUSHL(ssp, sp, sp_mask, next_eip);
2434
            } else {
2435
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2436
                PUSHW(ssp, sp, sp_mask, next_eip);
2437
            }
2438

    
2439
            limit = get_seg_limit(e1, e2);
2440
            if (new_eip > limit)
2441
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2442
            /* from this point, not restartable */
2443
            SET_ESP(sp, sp_mask);
2444
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2445
                                   get_seg_base(e1, e2), limit, e2);
2446
            EIP = new_eip;
2447
        }
2448
    } else {
2449
        /* check gate type */
2450
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2451
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2452
        rpl = new_cs & 3;
2453
        switch(type) {
2454
        case 1: /* available 286 TSS */
2455
        case 9: /* available 386 TSS */
2456
        case 5: /* task gate */
2457
            if (dpl < cpl || dpl < rpl)
2458
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2460
            CC_OP = CC_OP_EFLAGS;
2461
            return;
2462
        case 4: /* 286 call gate */
2463
        case 12: /* 386 call gate */
2464
            break;
2465
        default:
2466
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2467
            break;
2468
        }
2469
        shift = type >> 3;
2470

    
2471
        if (dpl < cpl || dpl < rpl)
2472
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2473
        /* check valid bit */
2474
        if (!(e2 & DESC_P_MASK))
2475
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2476
        selector = e1 >> 16;
2477
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2478
        param_count = e2 & 0x1f;
2479
        if ((selector & 0xfffc) == 0)
2480
            raise_exception_err(EXCP0D_GPF, 0);
2481

    
2482
        if (load_segment(&e1, &e2, selector) != 0)
2483
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2484
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2485
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2486
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2487
        if (dpl > cpl)
2488
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2489
        if (!(e2 & DESC_P_MASK))
2490
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2491

    
2492
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2493
            /* to inner privilege */
2494
            get_ss_esp_from_tss(&ss, &sp, dpl);
2495
#ifdef DEBUG_PCALL
2496
            if (loglevel & CPU_LOG_PCALL)
2497
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2498
                        ss, sp, param_count, ESP);
2499
#endif
2500
            if ((ss & 0xfffc) == 0)
2501
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2502
            if ((ss & 3) != dpl)
2503
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2504
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2505
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2506
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2507
            if (ss_dpl != dpl)
2508
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2509
            if (!(ss_e2 & DESC_S_MASK) ||
2510
                (ss_e2 & DESC_CS_MASK) ||
2511
                !(ss_e2 & DESC_W_MASK))
2512
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2513
            if (!(ss_e2 & DESC_P_MASK))
2514
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2515

    
2516
            //            push_size = ((param_count * 2) + 8) << shift;
2517

    
2518
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2519
            old_ssp = env->segs[R_SS].base;
2520

    
2521
            sp_mask = get_sp_mask(ss_e2);
2522
            ssp = get_seg_base(ss_e1, ss_e2);
2523
            if (shift) {
2524
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2525
                PUSHL(ssp, sp, sp_mask, ESP);
2526
                for(i = param_count - 1; i >= 0; i--) {
2527
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2528
                    PUSHL(ssp, sp, sp_mask, val);
2529
                }
2530
            } else {
2531
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2532
                PUSHW(ssp, sp, sp_mask, ESP);
2533
                for(i = param_count - 1; i >= 0; i--) {
2534
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2535
                    PUSHW(ssp, sp, sp_mask, val);
2536
                }
2537
            }
2538
            new_stack = 1;
2539
        } else {
2540
            /* to same privilege */
2541
            sp = ESP;
2542
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2543
            ssp = env->segs[R_SS].base;
2544
            //            push_size = (4 << shift);
2545
            new_stack = 0;
2546
        }
2547

    
2548
        if (shift) {
2549
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2550
            PUSHL(ssp, sp, sp_mask, next_eip);
2551
        } else {
2552
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2553
            PUSHW(ssp, sp, sp_mask, next_eip);
2554
        }
2555

    
2556
        /* from this point, not restartable */
2557

    
2558
        if (new_stack) {
2559
            ss = (ss & ~3) | dpl;
2560
            cpu_x86_load_seg_cache(env, R_SS, ss,
2561
                                   ssp,
2562
                                   get_seg_limit(ss_e1, ss_e2),
2563
                                   ss_e2);
2564
        }
2565

    
2566
        selector = (selector & ~3) | dpl;
2567
        cpu_x86_load_seg_cache(env, R_CS, selector,
2568
                       get_seg_base(e1, e2),
2569
                       get_seg_limit(e1, e2),
2570
                       e2);
2571
        cpu_x86_set_cpl(env, dpl);
2572
        SET_ESP(sp, sp_mask);
2573
        EIP = offset;
2574
    }
2575
#ifdef USE_KQEMU
2576
    if (kqemu_is_ok(env)) {
2577
        env->exception_index = -1;
2578
        cpu_loop_exit();
2579
    }
2580
#endif
2581
}
2582

    
2583
/* real and vm86 mode iret */
2584
void helper_iret_real(int shift)
2585
{
2586
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2587
    target_ulong ssp;
2588
    int eflags_mask;
2589

    
2590
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2591
    sp = ESP;
2592
    ssp = env->segs[R_SS].base;
2593
    if (shift == 1) {
2594
        /* 32 bits */
2595
        POPL(ssp, sp, sp_mask, new_eip);
2596
        POPL(ssp, sp, sp_mask, new_cs);
2597
        new_cs &= 0xffff;
2598
        POPL(ssp, sp, sp_mask, new_eflags);
2599
    } else {
2600
        /* 16 bits */
2601
        POPW(ssp, sp, sp_mask, new_eip);
2602
        POPW(ssp, sp, sp_mask, new_cs);
2603
        POPW(ssp, sp, sp_mask, new_eflags);
2604
    }
2605
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2606
    load_seg_vm(R_CS, new_cs);
2607
    env->eip = new_eip;
2608
    if (env->eflags & VM_MASK)
2609
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2610
    else
2611
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2612
    if (shift == 0)
2613
        eflags_mask &= 0xffff;
2614
    load_eflags(new_eflags, eflags_mask);
2615
    env->hflags &= ~HF_NMI_MASK;
2616
}
2617

    
2618
static inline void validate_seg(int seg_reg, int cpl)
2619
{
2620
    int dpl;
2621
    uint32_t e2;
2622

    
2623
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2624
       they may still contain a valid base. I would be interested to
2625
       know how a real x86_64 CPU behaves */
2626
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2627
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2628
        return;
2629

    
2630
    e2 = env->segs[seg_reg].flags;
2631
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2632
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2633
        /* data or non conforming code segment */
2634
        if (dpl < cpl) {
2635
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2636
        }
2637
    }
2638
}
2639

    
2640
/* protected mode iret */
2641
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2642
{
2643
    uint32_t new_cs, new_eflags, new_ss;
2644
    uint32_t new_es, new_ds, new_fs, new_gs;
2645
    uint32_t e1, e2, ss_e1, ss_e2;
2646
    int cpl, dpl, rpl, eflags_mask, iopl;
2647
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2648

    
2649
#ifdef TARGET_X86_64
2650
    if (shift == 2)
2651
        sp_mask = -1;
2652
    else
2653
#endif
2654
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2655
    sp = ESP;
2656
    ssp = env->segs[R_SS].base;
2657
    new_eflags = 0; /* avoid warning */
2658
#ifdef TARGET_X86_64
2659
    if (shift == 2) {
2660
        POPQ(sp, new_eip);
2661
        POPQ(sp, new_cs);
2662
        new_cs &= 0xffff;
2663
        if (is_iret) {
2664
            POPQ(sp, new_eflags);
2665
        }
2666
    } else
2667
#endif
2668
    if (shift == 1) {
2669
        /* 32 bits */
2670
        POPL(ssp, sp, sp_mask, new_eip);
2671
        POPL(ssp, sp, sp_mask, new_cs);
2672
        new_cs &= 0xffff;
2673
        if (is_iret) {
2674
            POPL(ssp, sp, sp_mask, new_eflags);
2675
            if (new_eflags & VM_MASK)
2676
                goto return_to_vm86;
2677
        }
2678
    } else {
2679
        /* 16 bits */
2680
        POPW(ssp, sp, sp_mask, new_eip);
2681
        POPW(ssp, sp, sp_mask, new_cs);
2682
        if (is_iret)
2683
            POPW(ssp, sp, sp_mask, new_eflags);
2684
    }
2685
#ifdef DEBUG_PCALL
2686
    if (loglevel & CPU_LOG_PCALL) {
2687
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2688
                new_cs, new_eip, shift, addend);
2689
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2690
    }
2691
#endif
2692
    if ((new_cs & 0xfffc) == 0)
2693
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2694
    if (load_segment(&e1, &e2, new_cs) != 0)
2695
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2696
    if (!(e2 & DESC_S_MASK) ||
2697
        !(e2 & DESC_CS_MASK))
2698
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2699
    cpl = env->hflags & HF_CPL_MASK;
2700
    rpl = new_cs & 3;
2701
    if (rpl < cpl)
2702
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2703
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2704
    if (e2 & DESC_C_MASK) {
2705
        if (dpl > rpl)
2706
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2707
    } else {
2708
        if (dpl != rpl)
2709
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2710
    }
2711
    if (!(e2 & DESC_P_MASK))
2712
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2713

    
2714
    sp += addend;
2715
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2716
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2717
        /* return to same priledge level */
2718
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2719
                       get_seg_base(e1, e2),
2720
                       get_seg_limit(e1, e2),
2721
                       e2);
2722
    } else {
2723
        /* return to different privilege level */
2724
#ifdef TARGET_X86_64
2725
        if (shift == 2) {
2726
            POPQ(sp, new_esp);
2727
            POPQ(sp, new_ss);
2728
            new_ss &= 0xffff;
2729
        } else
2730
#endif
2731
        if (shift == 1) {
2732
            /* 32 bits */
2733
            POPL(ssp, sp, sp_mask, new_esp);
2734
            POPL(ssp, sp, sp_mask, new_ss);
2735
            new_ss &= 0xffff;
2736
        } else {
2737
            /* 16 bits */
2738
            POPW(ssp, sp, sp_mask, new_esp);
2739
            POPW(ssp, sp, sp_mask, new_ss);
2740
        }
2741
#ifdef DEBUG_PCALL
2742
        if (loglevel & CPU_LOG_PCALL) {
2743
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2744
                    new_ss, new_esp);
2745
        }
2746
#endif
2747
        if ((new_ss & 0xfffc) == 0) {
2748
#ifdef TARGET_X86_64
2749
            /* NULL ss is allowed in long mode if cpl != 3*/
2750
            /* XXX: test CS64 ? */
2751
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2752
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2753
                                       0, 0xffffffff,
2754
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2755
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2756
                                       DESC_W_MASK | DESC_A_MASK);
2757
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2758
            } else
2759
#endif
2760
            {
2761
                raise_exception_err(EXCP0D_GPF, 0);
2762
            }
2763
        } else {
2764
            if ((new_ss & 3) != rpl)
2765
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2766
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2767
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2768
            if (!(ss_e2 & DESC_S_MASK) ||
2769
                (ss_e2 & DESC_CS_MASK) ||
2770
                !(ss_e2 & DESC_W_MASK))
2771
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2772
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2773
            if (dpl != rpl)
2774
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2775
            if (!(ss_e2 & DESC_P_MASK))
2776
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2777
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2778
                                   get_seg_base(ss_e1, ss_e2),
2779
                                   get_seg_limit(ss_e1, ss_e2),
2780
                                   ss_e2);
2781
        }
2782

    
2783
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2784
                       get_seg_base(e1, e2),
2785
                       get_seg_limit(e1, e2),
2786
                       e2);
2787
        cpu_x86_set_cpl(env, rpl);
2788
        sp = new_esp;
2789
#ifdef TARGET_X86_64
2790
        if (env->hflags & HF_CS64_MASK)
2791
            sp_mask = -1;
2792
        else
2793
#endif
2794
            sp_mask = get_sp_mask(ss_e2);
2795

    
2796
        /* validate data segments */
2797
        validate_seg(R_ES, rpl);
2798
        validate_seg(R_DS, rpl);
2799
        validate_seg(R_FS, rpl);
2800
        validate_seg(R_GS, rpl);
2801

    
2802
        sp += addend;
2803
    }
2804
    SET_ESP(sp, sp_mask);
2805
    env->eip = new_eip;
2806
    if (is_iret) {
2807
        /* NOTE: 'cpl' is the _old_ CPL */
2808
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2809
        if (cpl == 0)
2810
            eflags_mask |= IOPL_MASK;
2811
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2812
        if (cpl <= iopl)
2813
            eflags_mask |= IF_MASK;
2814
        if (shift == 0)
2815
            eflags_mask &= 0xffff;
2816
        load_eflags(new_eflags, eflags_mask);
2817
    }
2818
    return;
2819

    
2820
 return_to_vm86:
2821
    POPL(ssp, sp, sp_mask, new_esp);
2822
    POPL(ssp, sp, sp_mask, new_ss);
2823
    POPL(ssp, sp, sp_mask, new_es);
2824
    POPL(ssp, sp, sp_mask, new_ds);
2825
    POPL(ssp, sp, sp_mask, new_fs);
2826
    POPL(ssp, sp, sp_mask, new_gs);
2827

    
2828
    /* modify processor state */
2829
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2830
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2831
    load_seg_vm(R_CS, new_cs & 0xffff);
2832
    cpu_x86_set_cpl(env, 3);
2833
    load_seg_vm(R_SS, new_ss & 0xffff);
2834
    load_seg_vm(R_ES, new_es & 0xffff);
2835
    load_seg_vm(R_DS, new_ds & 0xffff);
2836
    load_seg_vm(R_FS, new_fs & 0xffff);
2837
    load_seg_vm(R_GS, new_gs & 0xffff);
2838

    
2839
    env->eip = new_eip & 0xffff;
2840
    ESP = new_esp;
2841
}
2842

    
2843
void helper_iret_protected(int shift, int next_eip)
2844
{
2845
    int tss_selector, type;
2846
    uint32_t e1, e2;
2847

    
2848
    /* specific case for TSS */
2849
    if (env->eflags & NT_MASK) {
2850
#ifdef TARGET_X86_64
2851
        if (env->hflags & HF_LMA_MASK)
2852
            raise_exception_err(EXCP0D_GPF, 0);
2853
#endif
2854
        tss_selector = lduw_kernel(env->tr.base + 0);
2855
        if (tss_selector & 4)
2856
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2857
        if (load_segment(&e1, &e2, tss_selector) != 0)
2858
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2859
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2860
        /* NOTE: we check both segment and busy TSS */
2861
        if (type != 3)
2862
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2863
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2864
    } else {
2865
        helper_ret_protected(shift, 1, 0);
2866
    }
2867
    env->hflags &= ~HF_NMI_MASK;
2868
#ifdef USE_KQEMU
2869
    if (kqemu_is_ok(env)) {
2870
        CC_OP = CC_OP_EFLAGS;
2871
        env->exception_index = -1;
2872
        cpu_loop_exit();
2873
    }
2874
#endif
2875
}
2876

    
2877
void helper_lret_protected(int shift, int addend)
2878
{
2879
    helper_ret_protected(shift, 0, addend);
2880
#ifdef USE_KQEMU
2881
    if (kqemu_is_ok(env)) {
2882
        env->exception_index = -1;
2883
        cpu_loop_exit();
2884
    }
2885
#endif
2886
}
2887

    
2888
void helper_sysenter(void)
2889
{
2890
    if (env->sysenter_cs == 0) {
2891
        raise_exception_err(EXCP0D_GPF, 0);
2892
    }
2893
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2894
    cpu_x86_set_cpl(env, 0);
2895
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2896
                           0, 0xffffffff,
2897
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2898
                           DESC_S_MASK |
2899
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2900
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2901
                           0, 0xffffffff,
2902
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903
                           DESC_S_MASK |
2904
                           DESC_W_MASK | DESC_A_MASK);
2905
    ESP = env->sysenter_esp;
2906
    EIP = env->sysenter_eip;
2907
}
2908

    
2909
void helper_sysexit(void)
2910
{
2911
    int cpl;
2912

    
2913
    cpl = env->hflags & HF_CPL_MASK;
2914
    if (env->sysenter_cs == 0 || cpl != 0) {
2915
        raise_exception_err(EXCP0D_GPF, 0);
2916
    }
2917
    cpu_x86_set_cpl(env, 3);
2918
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2919
                           0, 0xffffffff,
2920
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2921
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2922
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2923
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2924
                           0, 0xffffffff,
2925
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2926
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2927
                           DESC_W_MASK | DESC_A_MASK);
2928
    ESP = ECX;
2929
    EIP = EDX;
2930
#ifdef USE_KQEMU
2931
    if (kqemu_is_ok(env)) {
2932
        env->exception_index = -1;
2933
        cpu_loop_exit();
2934
    }
2935
#endif
2936
}
2937

    
2938
void helper_movl_crN_T0(int reg, target_ulong t0)
2939
{
2940
#if !defined(CONFIG_USER_ONLY)
2941
    switch(reg) {
2942
    case 0:
2943
        cpu_x86_update_cr0(env, t0);
2944
        break;
2945
    case 3:
2946
        cpu_x86_update_cr3(env, t0);
2947
        break;
2948
    case 4:
2949
        cpu_x86_update_cr4(env, t0);
2950
        break;
2951
    case 8:
2952
        cpu_set_apic_tpr(env, t0);
2953
        env->cr[8] = t0;
2954
        break;
2955
    default:
2956
        env->cr[reg] = t0;
2957
        break;
2958
    }
2959
#endif
2960
}
2961

    
2962
void helper_lmsw(target_ulong t0)
2963
{
2964
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2965
       if already set to one. */
2966
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2967
    helper_movl_crN_T0(0, t0);
2968
}
2969

    
2970
void helper_clts(void)
2971
{
2972
    env->cr[0] &= ~CR0_TS_MASK;
2973
    env->hflags &= ~HF_TS_MASK;
2974
}
2975

    
2976
#if !defined(CONFIG_USER_ONLY)
2977
target_ulong helper_movtl_T0_cr8(void)
2978
{
2979
    return cpu_get_apic_tpr(env);
2980
}
2981
#endif
2982

    
2983
/* XXX: do more */
2984
void helper_movl_drN_T0(int reg, target_ulong t0)
2985
{
2986
    env->dr[reg] = t0;
2987
}
2988

    
2989
void helper_invlpg(target_ulong addr)
2990
{
2991
    cpu_x86_flush_tlb(env, addr);
2992
}
2993

    
2994
void helper_rdtsc(void)
2995
{
2996
    uint64_t val;
2997

    
2998
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2999
        raise_exception(EXCP0D_GPF);
3000
    }
3001
    val = cpu_get_tsc(env);
3002
    EAX = (uint32_t)(val);
3003
    EDX = (uint32_t)(val >> 32);
3004
}
3005

    
3006
void helper_rdpmc(void)
3007
{
3008
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3009
        raise_exception(EXCP0D_GPF);
3010
    }
3011

    
3012
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3013
    
3014
    /* currently unimplemented */
3015
    raise_exception_err(EXCP06_ILLOP, 0);
3016
}
3017

    
3018
#if defined(CONFIG_USER_ONLY)
3019
void helper_wrmsr(void)
3020
{
3021
}
3022

    
3023
void helper_rdmsr(void)
3024
{
3025
}
3026
#else
3027
void helper_wrmsr(void)
3028
{
3029
    uint64_t val;
3030

    
3031
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3032

    
3033
    switch((uint32_t)ECX) {
3034
    case MSR_IA32_SYSENTER_CS:
3035
        env->sysenter_cs = val & 0xffff;
3036
        break;
3037
    case MSR_IA32_SYSENTER_ESP:
3038
        env->sysenter_esp = val;
3039
        break;
3040
    case MSR_IA32_SYSENTER_EIP:
3041
        env->sysenter_eip = val;
3042
        break;
3043
    case MSR_IA32_APICBASE:
3044
        cpu_set_apic_base(env, val);
3045
        break;
3046
    case MSR_EFER:
3047
        {
3048
            uint64_t update_mask;
3049
            update_mask = 0;
3050
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3051
                update_mask |= MSR_EFER_SCE;
3052
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3053
                update_mask |= MSR_EFER_LME;
3054
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3055
                update_mask |= MSR_EFER_FFXSR;
3056
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3057
                update_mask |= MSR_EFER_NXE;
3058
            env->efer = (env->efer & ~update_mask) |
3059
            (val & update_mask);
3060
        }
3061
        break;
3062
    case MSR_STAR:
3063
        env->star = val;
3064
        break;
3065
    case MSR_PAT:
3066
        env->pat = val;
3067
        break;
3068
    case MSR_VM_HSAVE_PA:
3069
        env->vm_hsave = val;
3070
        break;
3071
#ifdef TARGET_X86_64
3072
    case MSR_LSTAR:
3073
        env->lstar = val;
3074
        break;
3075
    case MSR_CSTAR:
3076
        env->cstar = val;
3077
        break;
3078
    case MSR_FMASK:
3079
        env->fmask = val;
3080
        break;
3081
    case MSR_FSBASE:
3082
        env->segs[R_FS].base = val;
3083
        break;
3084
    case MSR_GSBASE:
3085
        env->segs[R_GS].base = val;
3086
        break;
3087
    case MSR_KERNELGSBASE:
3088
        env->kernelgsbase = val;
3089
        break;
3090
#endif
3091
    default:
3092
        /* XXX: exception ? */
3093
        break;
3094
    }
3095
}
3096

    
3097
void helper_rdmsr(void)
3098
{
3099
    uint64_t val;
3100
    switch((uint32_t)ECX) {
3101
    case MSR_IA32_SYSENTER_CS:
3102
        val = env->sysenter_cs;
3103
        break;
3104
    case MSR_IA32_SYSENTER_ESP:
3105
        val = env->sysenter_esp;
3106
        break;
3107
    case MSR_IA32_SYSENTER_EIP:
3108
        val = env->sysenter_eip;
3109
        break;
3110
    case MSR_IA32_APICBASE:
3111
        val = cpu_get_apic_base(env);
3112
        break;
3113
    case MSR_EFER:
3114
        val = env->efer;
3115
        break;
3116
    case MSR_STAR:
3117
        val = env->star;
3118
        break;
3119
    case MSR_PAT:
3120
        val = env->pat;
3121
        break;
3122
    case MSR_VM_HSAVE_PA:
3123
        val = env->vm_hsave;
3124
        break;
3125
#ifdef TARGET_X86_64
3126
    case MSR_LSTAR:
3127
        val = env->lstar;
3128
        break;
3129
    case MSR_CSTAR:
3130
        val = env->cstar;
3131
        break;
3132
    case MSR_FMASK:
3133
        val = env->fmask;
3134
        break;
3135
    case MSR_FSBASE:
3136
        val = env->segs[R_FS].base;
3137
        break;
3138
    case MSR_GSBASE:
3139
        val = env->segs[R_GS].base;
3140
        break;
3141
    case MSR_KERNELGSBASE:
3142
        val = env->kernelgsbase;
3143
        break;
3144
#endif
3145
    default:
3146
        /* XXX: exception ? */
3147
        val = 0;
3148
        break;
3149
    }
3150
    EAX = (uint32_t)(val);
3151
    EDX = (uint32_t)(val >> 32);
3152
}
3153
#endif
3154

    
3155
target_ulong helper_lsl(target_ulong selector1)
3156
{
3157
    unsigned int limit;
3158
    uint32_t e1, e2, eflags, selector;
3159
    int rpl, dpl, cpl, type;
3160

    
3161
    selector = selector1 & 0xffff;
3162
    eflags = cc_table[CC_OP].compute_all();
3163
    if (load_segment(&e1, &e2, selector) != 0)
3164
        goto fail;
3165
    rpl = selector & 3;
3166
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3167
    cpl = env->hflags & HF_CPL_MASK;
3168
    if (e2 & DESC_S_MASK) {
3169
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3170
            /* conforming */
3171
        } else {
3172
            if (dpl < cpl || dpl < rpl)
3173
                goto fail;
3174
        }
3175
    } else {
3176
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3177
        switch(type) {
3178
        case 1:
3179
        case 2:
3180
        case 3:
3181
        case 9:
3182
        case 11:
3183
            break;
3184
        default:
3185
            goto fail;
3186
        }
3187
        if (dpl < cpl || dpl < rpl) {
3188
        fail:
3189
            CC_SRC = eflags & ~CC_Z;
3190
            return 0;
3191
        }
3192
    }
3193
    limit = get_seg_limit(e1, e2);
3194
    CC_SRC = eflags | CC_Z;
3195
    return limit;
3196
}
3197

    
3198
target_ulong helper_lar(target_ulong selector1)
3199
{
3200
    uint32_t e1, e2, eflags, selector;
3201
    int rpl, dpl, cpl, type;
3202

    
3203
    selector = selector1 & 0xffff;
3204
    eflags = cc_table[CC_OP].compute_all();
3205
    if ((selector & 0xfffc) == 0)
3206
        goto fail;
3207
    if (load_segment(&e1, &e2, selector) != 0)
3208
        goto fail;
3209
    rpl = selector & 3;
3210
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3211
    cpl = env->hflags & HF_CPL_MASK;
3212
    if (e2 & DESC_S_MASK) {
3213
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3214
            /* conforming */
3215
        } else {
3216
            if (dpl < cpl || dpl < rpl)
3217
                goto fail;
3218
        }
3219
    } else {
3220
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3221
        switch(type) {
3222
        case 1:
3223
        case 2:
3224
        case 3:
3225
        case 4:
3226
        case 5:
3227
        case 9:
3228
        case 11:
3229
        case 12:
3230
            break;
3231
        default:
3232
            goto fail;
3233
        }
3234
        if (dpl < cpl || dpl < rpl) {
3235
        fail:
3236
            CC_SRC = eflags & ~CC_Z;
3237
            return 0;
3238
        }
3239
    }
3240
    CC_SRC = eflags | CC_Z;
3241
    return e2 & 0x00f0ff00;
3242
}
3243

    
3244
void helper_verr(target_ulong selector1)
3245
{
3246
    uint32_t e1, e2, eflags, selector;
3247
    int rpl, dpl, cpl;
3248

    
3249
    selector = selector1 & 0xffff;
3250
    eflags = cc_table[CC_OP].compute_all();
3251
    if ((selector & 0xfffc) == 0)
3252
        goto fail;
3253
    if (load_segment(&e1, &e2, selector) != 0)
3254
        goto fail;
3255
    if (!(e2 & DESC_S_MASK))
3256
        goto fail;
3257
    rpl = selector & 3;
3258
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3259
    cpl = env->hflags & HF_CPL_MASK;
3260
    if (e2 & DESC_CS_MASK) {
3261
        if (!(e2 & DESC_R_MASK))
3262
            goto fail;
3263
        if (!(e2 & DESC_C_MASK)) {
3264
            if (dpl < cpl || dpl < rpl)
3265
                goto fail;
3266
        }
3267
    } else {
3268
        if (dpl < cpl || dpl < rpl) {
3269
        fail:
3270
            CC_SRC = eflags & ~CC_Z;
3271
            return;
3272
        }
3273
    }
3274
    CC_SRC = eflags | CC_Z;
3275
}
3276

    
3277
void helper_verw(target_ulong selector1)
3278
{
3279
    uint32_t e1, e2, eflags, selector;
3280
    int rpl, dpl, cpl;
3281

    
3282
    selector = selector1 & 0xffff;
3283
    eflags = cc_table[CC_OP].compute_all();
3284
    if ((selector & 0xfffc) == 0)
3285
        goto fail;
3286
    if (load_segment(&e1, &e2, selector) != 0)
3287
        goto fail;
3288
    if (!(e2 & DESC_S_MASK))
3289
        goto fail;
3290
    rpl = selector & 3;
3291
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292
    cpl = env->hflags & HF_CPL_MASK;
3293
    if (e2 & DESC_CS_MASK) {
3294
        goto fail;
3295
    } else {
3296
        if (dpl < cpl || dpl < rpl)
3297
            goto fail;
3298
        if (!(e2 & DESC_W_MASK)) {
3299
        fail:
3300
            CC_SRC = eflags & ~CC_Z;
3301
            return;
3302
        }
3303
    }
3304
    CC_SRC = eflags | CC_Z;
3305
}
3306

    
3307
/* x87 FPU helpers */
3308

    
3309
static void fpu_set_exception(int mask)
3310
{
3311
    env->fpus |= mask;
3312
    if (env->fpus & (~env->fpuc & FPUC_EM))
3313
        env->fpus |= FPUS_SE | FPUS_B;
3314
}
3315

    
3316
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3317
{
3318
    if (b == 0.0)
3319
        fpu_set_exception(FPUS_ZE);
3320
    return a / b;
3321
}
3322

    
3323
void fpu_raise_exception(void)
3324
{
3325
    if (env->cr[0] & CR0_NE_MASK) {
3326
        raise_exception(EXCP10_COPR);
3327
    }
3328
#if !defined(CONFIG_USER_ONLY)
3329
    else {
3330
        cpu_set_ferr(env);
3331
    }
3332
#endif
3333
}
3334

    
3335
void helper_flds_FT0(uint32_t val)
3336
{
3337
    union {
3338
        float32 f;
3339
        uint32_t i;
3340
    } u;
3341
    u.i = val;
3342
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3343
}
3344

    
3345
void helper_fldl_FT0(uint64_t val)
3346
{
3347
    union {
3348
        float64 f;
3349
        uint64_t i;
3350
    } u;
3351
    u.i = val;
3352
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3353
}
3354

    
3355
void helper_fildl_FT0(int32_t val)
3356
{
3357
    FT0 = int32_to_floatx(val, &env->fp_status);
3358
}
3359

    
3360
void helper_flds_ST0(uint32_t val)
3361
{
3362
    int new_fpstt;
3363
    union {
3364
        float32 f;
3365
        uint32_t i;
3366
    } u;
3367
    new_fpstt = (env->fpstt - 1) & 7;
3368
    u.i = val;
3369
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3370
    env->fpstt = new_fpstt;
3371
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3372
}
3373

    
3374
void helper_fldl_ST0(uint64_t val)
3375
{
3376
    int new_fpstt;
3377
    union {
3378
        float64 f;
3379
        uint64_t i;
3380
    } u;
3381
    new_fpstt = (env->fpstt - 1) & 7;
3382
    u.i = val;
3383
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3384
    env->fpstt = new_fpstt;
3385
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3386
}
3387

    
3388
void helper_fildl_ST0(int32_t val)
3389
{
3390
    int new_fpstt;
3391
    new_fpstt = (env->fpstt - 1) & 7;
3392
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3393
    env->fpstt = new_fpstt;
3394
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3395
}
3396

    
3397
void helper_fildll_ST0(int64_t val)
3398
{
3399
    int new_fpstt;
3400
    new_fpstt = (env->fpstt - 1) & 7;
3401
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3402
    env->fpstt = new_fpstt;
3403
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3404
}
3405

    
3406
uint32_t helper_fsts_ST0(void)
3407
{
3408
    union {
3409
        float32 f;
3410
        uint32_t i;
3411
    } u;
3412
    u.f = floatx_to_float32(ST0, &env->fp_status);
3413
    return u.i;
3414
}
3415

    
3416
uint64_t helper_fstl_ST0(void)
3417
{
3418
    union {
3419
        float64 f;
3420
        uint64_t i;
3421
    } u;
3422
    u.f = floatx_to_float64(ST0, &env->fp_status);
3423
    return u.i;
3424
}
3425

    
3426
int32_t helper_fist_ST0(void)
3427
{
3428
    int32_t val;
3429
    val = floatx_to_int32(ST0, &env->fp_status);
3430
    if (val != (int16_t)val)
3431
        val = -32768;
3432
    return val;
3433
}
3434

    
3435
int32_t helper_fistl_ST0(void)
3436
{
3437
    int32_t val;
3438
    val = floatx_to_int32(ST0, &env->fp_status);
3439
    return val;
3440
}
3441

    
3442
int64_t helper_fistll_ST0(void)
3443
{
3444
    int64_t val;
3445
    val = floatx_to_int64(ST0, &env->fp_status);
3446
    return val;
3447
}
3448

    
3449
int32_t helper_fistt_ST0(void)
3450
{
3451
    int32_t val;
3452
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3453
    if (val != (int16_t)val)
3454
        val = -32768;
3455
    return val;
3456
}
3457

    
3458
int32_t helper_fisttl_ST0(void)
3459
{
3460
    int32_t val;
3461
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3462
    return val;
3463
}
3464

    
3465
int64_t helper_fisttll_ST0(void)
3466
{
3467
    int64_t val;
3468
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3469
    return val;
3470
}
3471

    
3472
void helper_fldt_ST0(target_ulong ptr)
3473
{
3474
    int new_fpstt;
3475
    new_fpstt = (env->fpstt - 1) & 7;
3476
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3477
    env->fpstt = new_fpstt;
3478
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3479
}
3480

    
3481
void helper_fstt_ST0(target_ulong ptr)
3482
{
3483
    helper_fstt(ST0, ptr);
3484
}
3485

    
3486
void helper_fpush(void)
3487
{
3488
    fpush();
3489
}
3490

    
3491
void helper_fpop(void)
3492
{
3493
    fpop();
3494
}
3495

    
3496
void helper_fdecstp(void)
3497
{
3498
    env->fpstt = (env->fpstt - 1) & 7;
3499
    env->fpus &= (~0x4700);
3500
}
3501

    
3502
void helper_fincstp(void)
3503
{
3504
    env->fpstt = (env->fpstt + 1) & 7;
3505
    env->fpus &= (~0x4700);
3506
}
3507

    
3508
/* FPU move */
3509

    
3510
void helper_ffree_STN(int st_index)
3511
{
3512
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3513
}
3514

    
3515
void helper_fmov_ST0_FT0(void)
3516
{
3517
    ST0 = FT0;
3518
}
3519

    
3520
void helper_fmov_FT0_STN(int st_index)
3521
{
3522
    FT0 = ST(st_index);
3523
}
3524

    
3525
void helper_fmov_ST0_STN(int st_index)
3526
{
3527
    ST0 = ST(st_index);
3528
}
3529

    
3530
void helper_fmov_STN_ST0(int st_index)
3531
{
3532
    ST(st_index) = ST0;
3533
}
3534

    
3535
void helper_fxchg_ST0_STN(int st_index)
3536
{
3537
    CPU86_LDouble tmp;
3538
    tmp = ST(st_index);
3539
    ST(st_index) = ST0;
3540
    ST0 = tmp;
3541
}
3542

    
3543
/* FPU operations */
3544

    
3545
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3546

    
3547
void helper_fcom_ST0_FT0(void)
3548
{
3549
    int ret;
3550

    
3551
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3552
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3553
    FORCE_RET();
3554
}
3555

    
3556
void helper_fucom_ST0_FT0(void)
3557
{
3558
    int ret;
3559

    
3560
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3561
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3562
    FORCE_RET();
3563
}
3564

    
3565
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3566

    
3567
void helper_fcomi_ST0_FT0(void)
3568
{
3569
    int eflags;
3570
    int ret;
3571

    
3572
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3573
    eflags = cc_table[CC_OP].compute_all();
3574
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3575
    CC_SRC = eflags;
3576
    FORCE_RET();
3577
}
3578

    
3579
void helper_fucomi_ST0_FT0(void)
3580
{
3581
    int eflags;
3582
    int ret;
3583

    
3584
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3585
    eflags = cc_table[CC_OP].compute_all();
3586
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3587
    CC_SRC = eflags;
3588
    FORCE_RET();
3589
}
3590

    
3591
void helper_fadd_ST0_FT0(void)
3592
{
3593
    ST0 += FT0;
3594
}
3595

    
3596
void helper_fmul_ST0_FT0(void)
3597
{
3598
    ST0 *= FT0;
3599
}
3600

    
3601
void helper_fsub_ST0_FT0(void)
3602
{
3603
    ST0 -= FT0;
3604
}
3605

    
3606
void helper_fsubr_ST0_FT0(void)
3607
{
3608
    ST0 = FT0 - ST0;
3609
}
3610

    
3611
void helper_fdiv_ST0_FT0(void)
3612
{
3613
    ST0 = helper_fdiv(ST0, FT0);
3614
}
3615

    
3616
void helper_fdivr_ST0_FT0(void)
3617
{
3618
    ST0 = helper_fdiv(FT0, ST0);
3619
}
3620

    
3621
/* fp operations between STN and ST0 */
3622

    
3623
void helper_fadd_STN_ST0(int st_index)
3624
{
3625
    ST(st_index) += ST0;
3626
}
3627

    
3628
void helper_fmul_STN_ST0(int st_index)
3629
{
3630
    ST(st_index) *= ST0;
3631
}
3632

    
3633
void helper_fsub_STN_ST0(int st_index)
3634
{
3635
    ST(st_index) -= ST0;
3636
}
3637

    
3638
void helper_fsubr_STN_ST0(int st_index)
3639
{
3640
    CPU86_LDouble *p;
3641
    p = &ST(st_index);
3642
    *p = ST0 - *p;
3643
}
3644

    
3645
void helper_fdiv_STN_ST0(int st_index)
3646
{
3647
    CPU86_LDouble *p;
3648
    p = &ST(st_index);
3649
    *p = helper_fdiv(*p, ST0);
3650
}
3651

    
3652
void helper_fdivr_STN_ST0(int st_index)
3653
{
3654
    CPU86_LDouble *p;
3655
    p = &ST(st_index);
3656
    *p = helper_fdiv(ST0, *p);
3657
}
3658

    
3659
/* misc FPU operations */
3660
void helper_fchs_ST0(void)
3661
{
3662
    ST0 = floatx_chs(ST0);
3663
}
3664

    
3665
void helper_fabs_ST0(void)
3666
{
3667
    ST0 = floatx_abs(ST0);
3668
}
3669

    
3670
void helper_fld1_ST0(void)
3671
{
3672
    ST0 = f15rk[1];
3673
}
3674

    
3675
void helper_fldl2t_ST0(void)
3676
{
3677
    ST0 = f15rk[6];
3678
}
3679

    
3680
void helper_fldl2e_ST0(void)
3681
{
3682
    ST0 = f15rk[5];
3683
}
3684

    
3685
void helper_fldpi_ST0(void)
3686
{
3687
    ST0 = f15rk[2];
3688
}
3689

    
3690
void helper_fldlg2_ST0(void)
3691
{
3692
    ST0 = f15rk[3];
3693
}
3694

    
3695
void helper_fldln2_ST0(void)
3696
{
3697
    ST0 = f15rk[4];
3698
}
3699

    
3700
void helper_fldz_ST0(void)
3701
{
3702
    ST0 = f15rk[0];
3703
}
3704

    
3705
void helper_fldz_FT0(void)
3706
{
3707
    FT0 = f15rk[0];
3708
}
3709

    
3710
uint32_t helper_fnstsw(void)
3711
{
3712
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3713
}
3714

    
3715
uint32_t helper_fnstcw(void)
3716
{
3717
    return env->fpuc;
3718
}
3719

    
3720
static void update_fp_status(void)
3721
{
3722
    int rnd_type;
3723

    
3724
    /* set rounding mode */
3725
    switch(env->fpuc & RC_MASK) {
3726
    default:
3727
    case RC_NEAR:
3728
        rnd_type = float_round_nearest_even;
3729
        break;
3730
    case RC_DOWN:
3731
        rnd_type = float_round_down;
3732
        break;
3733
    case RC_UP:
3734
        rnd_type = float_round_up;
3735
        break;
3736
    case RC_CHOP:
3737
        rnd_type = float_round_to_zero;
3738
        break;
3739
    }
3740
    set_float_rounding_mode(rnd_type, &env->fp_status);
3741
#ifdef FLOATX80
3742
    switch((env->fpuc >> 8) & 3) {
3743
    case 0:
3744
        rnd_type = 32;
3745
        break;
3746
    case 2:
3747
        rnd_type = 64;
3748
        break;
3749
    case 3:
3750
    default:
3751
        rnd_type = 80;
3752
        break;
3753
    }
3754
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3755
#endif
3756
}
3757

    
3758
void helper_fldcw(uint32_t val)
3759
{
3760
    env->fpuc = val;
3761
    update_fp_status();
3762
}
3763

    
3764
void helper_fclex(void)
3765
{
3766
    env->fpus &= 0x7f00;
3767
}
3768

    
3769
void helper_fwait(void)
3770
{
3771
    if (env->fpus & FPUS_SE)
3772
        fpu_raise_exception();
3773
    FORCE_RET();
3774
}
3775

    
3776
void helper_fninit(void)
3777
{
3778
    env->fpus = 0;
3779
    env->fpstt = 0;
3780
    env->fpuc = 0x37f;
3781
    env->fptags[0] = 1;
3782
    env->fptags[1] = 1;
3783
    env->fptags[2] = 1;
3784
    env->fptags[3] = 1;
3785
    env->fptags[4] = 1;
3786
    env->fptags[5] = 1;
3787
    env->fptags[6] = 1;
3788
    env->fptags[7] = 1;
3789
}
3790

    
3791
/* BCD ops */
3792

    
3793
void helper_fbld_ST0(target_ulong ptr)
3794
{
3795
    CPU86_LDouble tmp;
3796
    uint64_t val;
3797
    unsigned int v;
3798
    int i;
3799

    
3800
    val = 0;
3801
    for(i = 8; i >= 0; i--) {
3802
        v = ldub(ptr + i);
3803
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3804
    }
3805
    tmp = val;
3806
    if (ldub(ptr + 9) & 0x80)
3807
        tmp = -tmp;
3808
    fpush();
3809
    ST0 = tmp;
3810
}
3811

    
3812
void helper_fbst_ST0(target_ulong ptr)
3813
{
3814
    int v;
3815
    target_ulong mem_ref, mem_end;
3816
    int64_t val;
3817

    
3818
    val = floatx_to_int64(ST0, &env->fp_status);
3819
    mem_ref = ptr;
3820
    mem_end = mem_ref + 9;
3821
    if (val < 0) {
3822
        stb(mem_end, 0x80);
3823
        val = -val;
3824
    } else {
3825
        stb(mem_end, 0x00);
3826
    }
3827
    while (mem_ref < mem_end) {
3828
        if (val == 0)
3829
            break;
3830
        v = val % 100;
3831
        val = val / 100;
3832
        v = ((v / 10) << 4) | (v % 10);
3833
        stb(mem_ref++, v);
3834
    }
3835
    while (mem_ref < mem_end) {
3836
        stb(mem_ref++, 0);
3837
    }
3838
}
3839

    
3840
void helper_f2xm1(void)
3841
{
3842
    ST0 = pow(2.0,ST0) - 1.0;
3843
}
3844

    
3845
void helper_fyl2x(void)
3846
{
3847
    CPU86_LDouble fptemp;
3848

    
3849
    fptemp = ST0;
3850
    if (fptemp>0.0){
3851
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3852
        ST1 *= fptemp;
3853
        fpop();
3854
    } else {
3855
        env->fpus &= (~0x4700);
3856
        env->fpus |= 0x400;
3857
    }
3858
}
3859

    
3860
void helper_fptan(void)
3861
{
3862
    CPU86_LDouble fptemp;
3863

    
3864
    fptemp = ST0;
3865
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3866
        env->fpus |= 0x400;
3867
    } else {
3868
        ST0 = tan(fptemp);
3869
        fpush();
3870
        ST0 = 1.0;
3871
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3872
        /* the above code is for  |arg| < 2**52 only */
3873
    }
3874
}
3875

    
3876
void helper_fpatan(void)
3877
{
3878
    CPU86_LDouble fptemp, fpsrcop;
3879

    
3880
    fpsrcop = ST1;
3881
    fptemp = ST0;
3882
    ST1 = atan2(fpsrcop,fptemp);
3883
    fpop();
3884
}
3885

    
3886
void helper_fxtract(void)
3887
{
3888
    CPU86_LDoubleU temp;
3889
    unsigned int expdif;
3890

    
3891
    temp.d = ST0;
3892
    expdif = EXPD(temp) - EXPBIAS;
3893
    /*DP exponent bias*/
3894
    ST0 = expdif;
3895
    fpush();
3896
    BIASEXPONENT(temp);
3897
    ST0 = temp.d;
3898
}
3899

    
3900
void helper_fprem1(void)
3901
{
3902
    CPU86_LDouble dblq, fpsrcop, fptemp;
3903
    CPU86_LDoubleU fpsrcop1, fptemp1;
3904
    int expdif;
3905
    signed long long int q;
3906

    
3907
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3908
        ST0 = 0.0 / 0.0; /* NaN */
3909
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3910
        return;
3911
    }
3912

    
3913
    fpsrcop = ST0;
3914
    fptemp = ST1;
3915
    fpsrcop1.d = fpsrcop;
3916
    fptemp1.d = fptemp;
3917
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3918

    
3919
    if (expdif < 0) {
3920
        /* optimisation? taken from the AMD docs */
3921
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3922
        /* ST0 is unchanged */
3923
        return;
3924
    }
3925

    
3926
    if (expdif < 53) {
3927
        dblq = fpsrcop / fptemp;
3928
        /* round dblq towards nearest integer */
3929
        dblq = rint(dblq);
3930
        ST0 = fpsrcop - fptemp * dblq;
3931

    
3932
        /* convert dblq to q by truncating towards zero */
3933
        if (dblq < 0.0)
3934
           q = (signed long long int)(-dblq);
3935
        else
3936
           q = (signed long long int)dblq;
3937

    
3938
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3939
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3940
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3941
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3942
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3943
    } else {
3944
        env->fpus |= 0x400;  /* C2 <-- 1 */
3945
        fptemp = pow(2.0, expdif - 50);
3946
        fpsrcop = (ST0 / ST1) / fptemp;
3947
        /* fpsrcop = integer obtained by chopping */
3948
        fpsrcop = (fpsrcop < 0.0) ?
3949
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3950
        ST0 -= (ST1 * fpsrcop * fptemp);
3951
    }
3952
}
3953

    
3954
void helper_fprem(void)
3955
{
3956
    CPU86_LDouble dblq, fpsrcop, fptemp;
3957
    CPU86_LDoubleU fpsrcop1, fptemp1;
3958
    int expdif;
3959
    signed long long int q;
3960

    
3961
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3962
       ST0 = 0.0 / 0.0; /* NaN */
3963
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3964
       return;
3965
    }
3966

    
3967
    fpsrcop = (CPU86_LDouble)ST0;
3968
    fptemp = (CPU86_LDouble)ST1;
3969
    fpsrcop1.d = fpsrcop;
3970
    fptemp1.d = fptemp;
3971
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3972

    
3973
    if (expdif < 0) {
3974
        /* optimisation? taken from the AMD docs */
3975
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3976
        /* ST0 is unchanged */
3977
        return;
3978
    }
3979

    
3980
    if ( expdif < 53 ) {
3981
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3982
        /* round dblq towards zero */
3983
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3984
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3985

    
3986
        /* convert dblq to q by truncating towards zero */
3987
        if (dblq < 0.0)
3988
           q = (signed long long int)(-dblq);
3989
        else
3990
           q = (signed long long int)dblq;
3991

    
3992
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3993
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3994
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3995
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3996
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3997
    } else {
3998
        int N = 32 + (expdif % 32); /* as per AMD docs */
3999
        env->fpus |= 0x400;  /* C2 <-- 1 */
4000
        fptemp = pow(2.0, (double)(expdif - N));
4001
        fpsrcop = (ST0 / ST1) / fptemp;
4002
        /* fpsrcop = integer obtained by chopping */
4003
        fpsrcop = (fpsrcop < 0.0) ?
4004
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4005
        ST0 -= (ST1 * fpsrcop * fptemp);
4006
    }
4007
}
4008

    
4009
void helper_fyl2xp1(void)
4010
{
4011
    CPU86_LDouble fptemp;
4012

    
4013
    fptemp = ST0;
4014
    if ((fptemp+1.0)>0.0) {
4015
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4016
        ST1 *= fptemp;
4017
        fpop();
4018
    } else {
4019
        env->fpus &= (~0x4700);
4020
        env->fpus |= 0x400;
4021
    }
4022
}
4023

    
4024
void helper_fsqrt(void)
4025
{
4026
    CPU86_LDouble fptemp;
4027

    
4028
    fptemp = ST0;
4029
    if (fptemp<0.0) {
4030
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4031
        env->fpus |= 0x400;
4032
    }
4033
    ST0 = sqrt(fptemp);
4034
}
4035

    
4036
void helper_fsincos(void)
4037
{
4038
    CPU86_LDouble fptemp;
4039

    
4040
    fptemp = ST0;
4041
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4042
        env->fpus |= 0x400;
4043
    } else {
4044
        ST0 = sin(fptemp);
4045
        fpush();
4046
        ST0 = cos(fptemp);
4047
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4048
        /* the above code is for  |arg| < 2**63 only */
4049
    }
4050
}
4051

    
4052
void helper_frndint(void)
4053
{
4054
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4055
}
4056

    
4057
void helper_fscale(void)
4058
{
4059
    ST0 = ldexp (ST0, (int)(ST1));
4060
}
4061

    
4062
void helper_fsin(void)
4063
{
4064
    CPU86_LDouble fptemp;
4065

    
4066
    fptemp = ST0;
4067
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4068
        env->fpus |= 0x400;
4069
    } else {
4070
        ST0 = sin(fptemp);
4071
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4072
        /* the above code is for  |arg| < 2**53 only */
4073
    }
4074
}
4075

    
4076
void helper_fcos(void)
4077
{
4078
    CPU86_LDouble fptemp;
4079

    
4080
    fptemp = ST0;
4081
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4082
        env->fpus |= 0x400;
4083
    } else {
4084
        ST0 = cos(fptemp);
4085
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4086
        /* the above code is for  |arg5 < 2**63 only */
4087
    }
4088
}
4089

    
4090
void helper_fxam_ST0(void)
4091
{
4092
    CPU86_LDoubleU temp;
4093
    int expdif;
4094

    
4095
    temp.d = ST0;
4096

    
4097
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4098
    if (SIGND(temp))
4099
        env->fpus |= 0x200; /* C1 <-- 1 */
4100

    
4101
    /* XXX: test fptags too */
4102
    expdif = EXPD(temp);
4103
    if (expdif == MAXEXPD) {
4104
#ifdef USE_X86LDOUBLE
4105
        if (MANTD(temp) == 0x8000000000000000ULL)
4106
#else
4107
        if (MANTD(temp) == 0)
4108
#endif
4109
            env->fpus |=  0x500 /*Infinity*/;
4110
        else
4111
            env->fpus |=  0x100 /*NaN*/;
4112
    } else if (expdif == 0) {
4113
        if (MANTD(temp) == 0)
4114
            env->fpus |=  0x4000 /*Zero*/;
4115
        else
4116
            env->fpus |= 0x4400 /*Denormal*/;
4117
    } else {
4118
        env->fpus |= 0x400;
4119
    }
4120
}
4121

    
4122
void helper_fstenv(target_ulong ptr, int data32)
4123
{
4124
    int fpus, fptag, exp, i;
4125
    uint64_t mant;
4126
    CPU86_LDoubleU tmp;
4127

    
4128
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4129
    fptag = 0;
4130
    for (i=7; i>=0; i--) {
4131
        fptag <<= 2;
4132
        if (env->fptags[i]) {
4133
            fptag |= 3;
4134
        } else {
4135
            tmp.d = env->fpregs[i].d;
4136
            exp = EXPD(tmp);
4137
            mant = MANTD(tmp);
4138
            if (exp == 0 && mant == 0) {
4139
                /* zero */
4140
                fptag |= 1;
4141
            } else if (exp == 0 || exp == MAXEXPD
4142
#ifdef USE_X86LDOUBLE
4143
                       || (mant & (1LL << 63)) == 0
4144
#endif
4145
                       ) {
4146
                /* NaNs, infinity, denormal */
4147
                fptag |= 2;
4148
            }
4149
        }
4150
    }
4151
    if (data32) {
4152
        /* 32 bit */
4153
        stl(ptr, env->fpuc);
4154
        stl(ptr + 4, fpus);
4155
        stl(ptr + 8, fptag);
4156
        stl(ptr + 12, 0); /* fpip */
4157
        stl(ptr + 16, 0); /* fpcs */
4158
        stl(ptr + 20, 0); /* fpoo */
4159
        stl(ptr + 24, 0); /* fpos */
4160
    } else {
4161
        /* 16 bit */
4162
        stw(ptr, env->fpuc);
4163
        stw(ptr + 2, fpus);
4164
        stw(ptr + 4, fptag);
4165
        stw(ptr + 6, 0);
4166
        stw(ptr + 8, 0);
4167
        stw(ptr + 10, 0);
4168
        stw(ptr + 12, 0);
4169
    }
4170
}
4171

    
4172
void helper_fldenv(target_ulong ptr, int data32)
4173
{
4174
    int i, fpus, fptag;
4175

    
4176
    if (data32) {
4177
        env->fpuc = lduw(ptr);
4178
        fpus = lduw(ptr + 4);
4179
        fptag = lduw(ptr + 8);
4180
    }
4181
    else {
4182
        env->fpuc = lduw(ptr);
4183
        fpus = lduw(ptr + 2);
4184
        fptag = lduw(ptr + 4);
4185
    }
4186
    env->fpstt = (fpus >> 11) & 7;
4187
    env->fpus = fpus & ~0x3800;
4188
    for(i = 0;i < 8; i++) {
4189
        env->fptags[i] = ((fptag & 3) == 3);
4190
        fptag >>= 2;
4191
    }
4192
}
4193

    
4194
void helper_fsave(target_ulong ptr, int data32)
4195
{
4196
    CPU86_LDouble tmp;
4197
    int i;
4198

    
4199
    helper_fstenv(ptr, data32);
4200

    
4201
    ptr += (14 << data32);
4202
    for(i = 0;i < 8; i++) {
4203
        tmp = ST(i);
4204
        helper_fstt(tmp, ptr);
4205
        ptr += 10;
4206
    }
4207

    
4208
    /* fninit */
4209
    env->fpus = 0;
4210
    env->fpstt = 0;
4211
    env->fpuc = 0x37f;
4212
    env->fptags[0] = 1;
4213
    env->fptags[1] = 1;
4214
    env->fptags[2] = 1;
4215
    env->fptags[3] = 1;
4216
    env->fptags[4] = 1;
4217
    env->fptags[5] = 1;
4218
    env->fptags[6] = 1;
4219
    env->fptags[7] = 1;
4220
}
4221

    
4222
void helper_frstor(target_ulong ptr, int data32)
4223
{
4224
    CPU86_LDouble tmp;
4225
    int i;
4226

    
4227
    helper_fldenv(ptr, data32);
4228
    ptr += (14 << data32);
4229

    
4230
    for(i = 0;i < 8; i++) {
4231
        tmp = helper_fldt(ptr);
4232
        ST(i) = tmp;
4233
        ptr += 10;
4234
    }
4235
}
4236

    
4237
void helper_fxsave(target_ulong ptr, int data64)
4238
{
4239
    int fpus, fptag, i, nb_xmm_regs;
4240
    CPU86_LDouble tmp;
4241
    target_ulong addr;
4242

    
4243
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4244
    fptag = 0;
4245
    for(i = 0; i < 8; i++) {
4246
        fptag |= (env->fptags[i] << i);
4247
    }
4248
    stw(ptr, env->fpuc);
4249
    stw(ptr + 2, fpus);
4250
    stw(ptr + 4, fptag ^ 0xff);
4251

    
4252
    addr = ptr + 0x20;
4253
    for(i = 0;i < 8; i++) {
4254
        tmp = ST(i);
4255
        helper_fstt(tmp, addr);
4256
        addr += 16;
4257
    }
4258

    
4259
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4260
        /* XXX: finish it */
4261
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4262
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4263
        nb_xmm_regs = 8 << data64;
4264
        addr = ptr + 0xa0;
4265
        for(i = 0; i < nb_xmm_regs; i++) {
4266
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4267
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4268
            addr += 16;
4269
        }
4270
    }
4271
}
4272

    
4273
void helper_fxrstor(target_ulong ptr, int data64)
4274
{
4275
    int i, fpus, fptag, nb_xmm_regs;
4276
    CPU86_LDouble tmp;
4277
    target_ulong addr;
4278

    
4279
    env->fpuc = lduw(ptr);
4280
    fpus = lduw(ptr + 2);
4281
    fptag = lduw(ptr + 4);
4282
    env->fpstt = (fpus >> 11) & 7;
4283
    env->fpus = fpus & ~0x3800;
4284
    fptag ^= 0xff;
4285
    for(i = 0;i < 8; i++) {
4286
        env->fptags[i] = ((fptag >> i) & 1);
4287
    }
4288

    
4289
    addr = ptr + 0x20;
4290
    for(i = 0;i < 8; i++) {
4291
        tmp = helper_fldt(addr);
4292
        ST(i) = tmp;
4293
        addr += 16;
4294
    }
4295

    
4296
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4297
        /* XXX: finish it */
4298
        env->mxcsr = ldl(ptr + 0x18);
4299
        //ldl(ptr + 0x1c);
4300
        nb_xmm_regs = 8 << data64;
4301
        addr = ptr + 0xa0;
4302
        for(i = 0; i < nb_xmm_regs; i++) {
4303
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4304
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4305
            addr += 16;
4306
        }
4307
    }
4308
}
4309

    
4310
#ifndef USE_X86LDOUBLE
4311

    
4312
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4313
{
4314
    CPU86_LDoubleU temp;
4315
    int e;
4316

    
4317
    temp.d = f;
4318
    /* mantissa */
4319
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4320
    /* exponent + sign */
4321
    e = EXPD(temp) - EXPBIAS + 16383;
4322
    e |= SIGND(temp) >> 16;
4323
    *pexp = e;
4324
}
4325

    
4326
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4327
{
4328
    CPU86_LDoubleU temp;
4329
    int e;
4330
    uint64_t ll;
4331

    
4332
    /* XXX: handle overflow ? */
4333
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4334
    e |= (upper >> 4) & 0x800; /* sign */
4335
    ll = (mant >> 11) & ((1LL << 52) - 1);
4336
#ifdef __arm__
4337
    temp.l.upper = (e << 20) | (ll >> 32);
4338
    temp.l.lower = ll;
4339
#else
4340
    temp.ll = ll | ((uint64_t)e << 52);
4341
#endif
4342
    return temp.d;
4343
}
4344

    
4345
#else
4346

    
4347
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4348
{
4349
    CPU86_LDoubleU temp;
4350

    
4351
    temp.d = f;
4352
    *pmant = temp.l.lower;
4353
    *pexp = temp.l.upper;
4354
}
4355

    
4356
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4357
{
4358
    CPU86_LDoubleU temp;
4359

    
4360
    temp.l.upper = upper;
4361
    temp.l.lower = mant;
4362
    return temp.d;
4363
}
4364
#endif
4365

    
4366
#ifdef TARGET_X86_64
4367

    
4368
//#define DEBUG_MULDIV
4369

    
4370
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4371
{
4372
    *plow += a;
4373
    /* carry test */
4374
    if (*plow < a)
4375
        (*phigh)++;
4376
    *phigh += b;
4377
}
4378

    
4379
static void neg128(uint64_t *plow, uint64_t *phigh)
4380
{
4381
    *plow = ~ *plow;
4382
    *phigh = ~ *phigh;
4383
    add128(plow, phigh, 1, 0);
4384
}
4385

    
4386
/* return TRUE if overflow */
4387
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4388
{
4389
    uint64_t q, r, a1, a0;
4390
    int i, qb, ab;
4391

    
4392
    a0 = *plow;
4393
    a1 = *phigh;
4394
    if (a1 == 0) {
4395
        q = a0 / b;
4396
        r = a0 % b;
4397
        *plow = q;
4398
        *phigh = r;
4399
    } else {
4400
        if (a1 >= b)
4401
            return 1;
4402
        /* XXX: use a better algorithm */
4403
        for(i = 0; i < 64; i++) {
4404
            ab = a1 >> 63;
4405
            a1 = (a1 << 1) | (a0 >> 63);
4406
            if (ab || a1 >= b) {
4407
                a1 -= b;
4408
                qb = 1;
4409
            } else {
4410
                qb = 0;
4411
            }
4412
            a0 = (a0 << 1) | qb;
4413
        }
4414
#if defined(DEBUG_MULDIV)
4415
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4416
               *phigh, *plow, b, a0, a1);
4417
#endif
4418
        *plow = a0;
4419
        *phigh = a1;
4420
    }
4421
    return 0;
4422
}
4423

    
4424
/* return TRUE if overflow */
4425
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4426
{
4427
    int sa, sb;
4428
    sa = ((int64_t)*phigh < 0);
4429
    if (sa)
4430
        neg128(plow, phigh);
4431
    sb = (b < 0);
4432
    if (sb)
4433
        b = -b;
4434
    if (div64(plow, phigh, b) != 0)
4435
        return 1;
4436
    if (sa ^ sb) {
4437
        if (*plow > (1ULL << 63))
4438
            return 1;
4439
        *plow = - *plow;
4440
    } else {
4441
        if (*plow >= (1ULL << 63))
4442
            return 1;
4443
    }
4444
    if (sa)
4445
        *phigh = - *phigh;
4446
    return 0;
4447
}
4448

    
4449
void helper_mulq_EAX_T0(target_ulong t0)
4450
{
4451
    uint64_t r0, r1;
4452

    
4453
    mulu64(&r0, &r1, EAX, t0);
4454
    EAX = r0;
4455
    EDX = r1;
4456
    CC_DST = r0;
4457
    CC_SRC = r1;
4458
}
4459

    
4460
void helper_imulq_EAX_T0(target_ulong t0)
4461
{
4462
    uint64_t r0, r1;
4463

    
4464
    muls64(&r0, &r1, EAX, t0);
4465
    EAX = r0;
4466
    EDX = r1;
4467
    CC_DST = r0;
4468
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4469
}
4470

    
4471
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4472
{
4473
    uint64_t r0, r1;
4474

    
4475
    muls64(&r0, &r1, t0, t1);
4476
    CC_DST = r0;
4477
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4478
    return r0;
4479
}
4480

    
4481
void helper_divq_EAX(target_ulong t0)
4482
{
4483
    uint64_t r0, r1;
4484
    if (t0 == 0) {
4485
        raise_exception(EXCP00_DIVZ);
4486
    }
4487
    r0 = EAX;
4488
    r1 = EDX;
4489
    if (div64(&r0, &r1, t0))
4490
        raise_exception(EXCP00_DIVZ);
4491
    EAX = r0;
4492
    EDX = r1;
4493
}
4494

    
4495
void helper_idivq_EAX(target_ulong t0)
4496
{
4497
    uint64_t r0, r1;
4498
    if (t0 == 0) {
4499
        raise_exception(EXCP00_DIVZ);
4500
    }
4501
    r0 = EAX;
4502
    r1 = EDX;
4503
    if (idiv64(&r0, &r1, t0))
4504
        raise_exception(EXCP00_DIVZ);
4505
    EAX = r0;
4506
    EDX = r1;
4507
}
4508
#endif
4509

    
4510
void helper_hlt(void)
4511
{
4512
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4513
    env->hflags |= HF_HALTED_MASK;
4514
    env->exception_index = EXCP_HLT;
4515
    cpu_loop_exit();
4516
}
4517

    
4518
void helper_monitor(target_ulong ptr)
4519
{
4520
    if ((uint32_t)ECX != 0)
4521
        raise_exception(EXCP0D_GPF);
4522
    /* XXX: store address ? */
4523
}
4524

    
4525
void helper_mwait(void)
4526
{
4527
    if ((uint32_t)ECX != 0)
4528
        raise_exception(EXCP0D_GPF);
4529
    /* XXX: not complete but not completely erroneous */
4530
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4531
        /* more than one CPU: do not sleep because another CPU may
4532
           wake this one */
4533
    } else {
4534
        helper_hlt();
4535
    }
4536
}
4537

    
4538
void helper_debug(void)
4539
{
4540
    env->exception_index = EXCP_DEBUG;
4541
    cpu_loop_exit();
4542
}
4543

    
4544
void helper_raise_interrupt(int intno, int next_eip_addend)
4545
{
4546
    raise_interrupt(intno, 1, 0, next_eip_addend);
4547
}
4548

    
4549
void helper_raise_exception(int exception_index)
4550
{
4551
    raise_exception(exception_index);
4552
}
4553

    
4554
void helper_cli(void)
4555
{
4556
    env->eflags &= ~IF_MASK;
4557
}
4558

    
4559
void helper_sti(void)
4560
{
4561
    env->eflags |= IF_MASK;
4562
}
4563

    
4564
#if 0
4565
/* vm86plus instructions */
4566
void helper_cli_vm(void)
4567
{
4568
    env->eflags &= ~VIF_MASK;
4569
}
4570

4571
void helper_sti_vm(void)
4572
{
4573
    env->eflags |= VIF_MASK;
4574
    if (env->eflags & VIP_MASK) {
4575
        raise_exception(EXCP0D_GPF);
4576
    }
4577
}
4578
#endif
4579

    
4580
void helper_set_inhibit_irq(void)
4581
{
4582
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4583
}
4584

    
4585
void helper_reset_inhibit_irq(void)
4586
{
4587
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4588
}
4589

    
4590
void helper_boundw(target_ulong a0, int v)
4591
{
4592
    int low, high;
4593
    low = ldsw(a0);
4594
    high = ldsw(a0 + 2);
4595
    v = (int16_t)v;
4596
    if (v < low || v > high) {
4597
        raise_exception(EXCP05_BOUND);
4598
    }
4599
    FORCE_RET();
4600
}
4601

    
4602
void helper_boundl(target_ulong a0, int v)
4603
{
4604
    int low, high;
4605
    low = ldl(a0);
4606
    high = ldl(a0 + 4);
4607
    if (v < low || v > high) {
4608
        raise_exception(EXCP05_BOUND);
4609
    }
4610
    FORCE_RET();
4611
}
4612

    
4613
static float approx_rsqrt(float a)
4614
{
4615
    return 1.0 / sqrt(a);
4616
}
4617

    
4618
static float approx_rcp(float a)
4619
{
4620
    return 1.0 / a;
4621
}
4622

    
4623
#if !defined(CONFIG_USER_ONLY)
4624

    
4625
#define MMUSUFFIX _mmu
4626
#ifdef __s390__
4627
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4628
#else
4629
# define GETPC() (__builtin_return_address(0))
4630
#endif
4631

    
4632
#define SHIFT 0
4633
#include "softmmu_template.h"
4634

    
4635
#define SHIFT 1
4636
#include "softmmu_template.h"
4637

    
4638
#define SHIFT 2
4639
#include "softmmu_template.h"
4640

    
4641
#define SHIFT 3
4642
#include "softmmu_template.h"
4643

    
4644
#endif
4645

    
4646
/* try to fill the TLB and return an exception if error. If retaddr is
4647
   NULL, it means that the function was called in C code (i.e. not
4648
   from generated code or from helper.c) */
4649
/* XXX: fix it to restore all registers */
4650
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4651
{
4652
    TranslationBlock *tb;
4653
    int ret;
4654
    unsigned long pc;
4655
    CPUX86State *saved_env;
4656

    
4657
    /* XXX: hack to restore env in all cases, even if not called from
4658
       generated code */
4659
    saved_env = env;
4660
    env = cpu_single_env;
4661

    
4662
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4663
    if (ret) {
4664
        if (retaddr) {
4665
            /* now we have a real cpu fault */
4666
            pc = (unsigned long)retaddr;
4667
            tb = tb_find_pc(pc);
4668
            if (tb) {
4669
                /* the PC is inside the translated code. It means that we have
4670
                   a virtual CPU fault */
4671
                cpu_restore_state(tb, env, pc, NULL);
4672
            }
4673
        }
4674
        if (retaddr)
4675
            raise_exception_err(env->exception_index, env->error_code);
4676
        else
4677
            raise_exception_err_norestore(env->exception_index, env->error_code);
4678
    }
4679
    env = saved_env;
4680
}
4681

    
4682

    
4683
/* Secure Virtual Machine helpers */
4684

    
4685
void helper_stgi(void)
4686
{
4687
    env->hflags |= HF_GIF_MASK;
4688
}
4689

    
4690
void helper_clgi(void)
4691
{
4692
    env->hflags &= ~HF_GIF_MASK;
4693
}
4694

    
4695
#if defined(CONFIG_USER_ONLY)
4696

    
4697
void helper_vmrun(void) 
4698
{ 
4699
}
4700
void helper_vmmcall(void) 
4701
{ 
4702
}
4703
void helper_vmload(void) 
4704
{ 
4705
}
4706
void helper_vmsave(void) 
4707
{ 
4708
}
4709
void helper_skinit(void) 
4710
{ 
4711
}
4712
void helper_invlpga(void) 
4713
{ 
4714
}
4715
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4716
{ 
4717
}
4718
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4719
{
4720
}
4721

    
4722
void helper_svm_check_io(uint32_t port, uint32_t param, 
4723
                         uint32_t next_eip_addend)
4724
{
4725
}
4726
#else
4727

    
4728
static inline uint32_t
4729
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4730
{
4731
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4732
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4733
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4734
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4735
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4736
}
4737

    
4738
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4739
{
4740
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4741
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4742
}
4743

    
4744
void helper_vmrun(void)
4745
{
4746
    target_ulong addr;
4747
    uint32_t event_inj;
4748
    uint32_t int_ctl;
4749

    
4750
    addr = EAX;
4751
    if (loglevel & CPU_LOG_TB_IN_ASM)
4752
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4753

    
4754
    env->vm_vmcb = addr;
4755

    
4756
    /* save the current CPU state in the hsave page */
4757
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4758
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4759

    
4760
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4761
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4762

    
4763
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4764
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4765
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4766
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4767
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4768
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4769
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4770

    
4771
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4772
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4773

    
4774
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4775
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4776
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4777
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4778

    
4779
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4780
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4781
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4782

    
4783
    /* load the interception bitmaps so we do not need to access the
4784
       vmcb in svm mode */
4785
    /* We shift all the intercept bits so we can OR them with the TB
4786
       flags later on */
4787
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4788
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4789
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4790
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4791
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4792
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4793

    
4794
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4795
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4796

    
4797
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4798
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4799

    
4800
    /* clear exit_info_2 so we behave like the real hardware */
4801
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4802

    
4803
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4804
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4805
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4806
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4807
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4808
    if (int_ctl & V_INTR_MASKING_MASK) {
4809
        env->cr[8] = int_ctl & V_TPR_MASK;
4810
        cpu_set_apic_tpr(env, env->cr[8]);
4811
        if (env->eflags & IF_MASK)
4812
            env->hflags |= HF_HIF_MASK;
4813
    }
4814

    
4815
#ifdef TARGET_X86_64
4816
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4817
    env->hflags &= ~HF_LMA_MASK;
4818
    if (env->efer & MSR_EFER_LMA)
4819
       env->hflags |= HF_LMA_MASK;
4820
#endif
4821
    env->eflags = 0;
4822
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4823
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4824
    CC_OP = CC_OP_EFLAGS;
4825
    CC_DST = 0xffffffff;
4826

    
4827
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4828
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4829
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4830
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4831

    
4832
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4833
    env->eip = EIP;
4834
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4835
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4836
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4837
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4838
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4839

    
4840
    /* FIXME: guest state consistency checks */
4841

    
4842
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4843
        case TLB_CONTROL_DO_NOTHING:
4844
            break;
4845
        case TLB_CONTROL_FLUSH_ALL_ASID:
4846
            /* FIXME: this is not 100% correct but should work for now */
4847
            tlb_flush(env, 1);
4848
        break;
4849
    }
4850

    
4851
    helper_stgi();
4852

    
4853
    /* maybe we need to inject an event */
4854
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4855
    if (event_inj & SVM_EVTINJ_VALID) {
4856
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4857
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4858
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4859
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4860

    
4861
        if (loglevel & CPU_LOG_TB_IN_ASM)
4862
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4863
        /* FIXME: need to implement valid_err */
4864
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4865
        case SVM_EVTINJ_TYPE_INTR:
4866
                env->exception_index = vector;
4867
                env->error_code = event_inj_err;
4868
                env->exception_is_int = 0;
4869
                env->exception_next_eip = -1;
4870
                if (loglevel & CPU_LOG_TB_IN_ASM)
4871
                    fprintf(logfile, "INTR");
4872
                break;
4873
        case SVM_EVTINJ_TYPE_NMI:
4874
                env->exception_index = vector;
4875
                env->error_code = event_inj_err;
4876
                env->exception_is_int = 0;
4877
                env->exception_next_eip = EIP;
4878
                if (loglevel & CPU_LOG_TB_IN_ASM)
4879
                    fprintf(logfile, "NMI");
4880
                break;
4881
        case SVM_EVTINJ_TYPE_EXEPT:
4882
                env->exception_index = vector;
4883
                env->error_code = event_inj_err;
4884
                env->exception_is_int = 0;
4885
                env->exception_next_eip = -1;
4886
                if (loglevel & CPU_LOG_TB_IN_ASM)
4887
                    fprintf(logfile, "EXEPT");
4888
                break;
4889
        case SVM_EVTINJ_TYPE_SOFT:
4890
                env->exception_index = vector;
4891
                env->error_code = event_inj_err;
4892
                env->exception_is_int = 1;
4893
                env->exception_next_eip = EIP;
4894
                if (loglevel & CPU_LOG_TB_IN_ASM)
4895
                    fprintf(logfile, "SOFT");
4896
                break;
4897
        }
4898
        if (loglevel & CPU_LOG_TB_IN_ASM)
4899
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4900
    }
4901
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4902
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4903
    }
4904

    
4905
    cpu_loop_exit();
4906
}
4907

    
4908
void helper_vmmcall(void)
4909
{
4910
    if (loglevel & CPU_LOG_TB_IN_ASM)
4911
        fprintf(logfile,"vmmcall!\n");
4912
}
4913

    
4914
void helper_vmload(void)
4915
{
4916
    target_ulong addr;
4917
    addr = EAX;
4918
    if (loglevel & CPU_LOG_TB_IN_ASM)
4919
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4920
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4921
                env->segs[R_FS].base);
4922

    
4923
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4924
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4925
    SVM_LOAD_SEG2(addr, tr, tr);
4926
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4927

    
4928
#ifdef TARGET_X86_64
4929
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4930
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4931
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4932
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4933
#endif
4934
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4935
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4936
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4937
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4938
}
4939

    
4940
void helper_vmsave(void)
4941
{
4942
    target_ulong addr;
4943
    addr = EAX;
4944
    if (loglevel & CPU_LOG_TB_IN_ASM)
4945
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4946
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4947
                env->segs[R_FS].base);
4948

    
4949
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4950
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4951
    SVM_SAVE_SEG(addr, tr, tr);
4952
    SVM_SAVE_SEG(addr, ldt, ldtr);
4953

    
4954
#ifdef TARGET_X86_64
4955
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4956
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4957
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4958
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4959
#endif
4960
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4961
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4962
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4963
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4964
}
4965

    
4966
void helper_skinit(void)
4967
{
4968
    if (loglevel & CPU_LOG_TB_IN_ASM)
4969
        fprintf(logfile,"skinit!\n");
4970
}
4971

    
4972
void helper_invlpga(void)
4973
{
4974
    tlb_flush(env, 0);
4975
}
4976

    
4977
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4978
{
4979
    switch(type) {
4980
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4981
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4982
            helper_vmexit(type, param);
4983
        }
4984
        break;
4985
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4986
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4987
            helper_vmexit(type, param);
4988
        }
4989
        break;
4990
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4991
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4992
            helper_vmexit(type, param);
4993
        }
4994
        break;
4995
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4996
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4997
            helper_vmexit(type, param);
4998
        }
4999
        break;
5000
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5001
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5002
            helper_vmexit(type, param);
5003
        }
5004
        break;
5005
    case SVM_EXIT_IOIO:
5006
        break;
5007

    
5008
    case SVM_EXIT_MSR:
5009
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5010
            /* FIXME: this should be read in at vmrun (faster this way?) */
5011
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5012
            uint32_t t0, t1;
5013
            switch((uint32_t)ECX) {
5014
            case 0 ... 0x1fff:
5015
                t0 = (ECX * 2) % 8;
5016
                t1 = ECX / 8;
5017
                break;
5018
            case 0xc0000000 ... 0xc0001fff:
5019
                t0 = (8192 + ECX - 0xc0000000) * 2;
5020
                t1 = (t0 / 8);
5021
                t0 %= 8;
5022
                break;
5023
            case 0xc0010000 ... 0xc0011fff:
5024
                t0 = (16384 + ECX - 0xc0010000) * 2;
5025
                t1 = (t0 / 8);
5026
                t0 %= 8;
5027
                break;
5028
            default:
5029
                helper_vmexit(type, param);
5030
                t0 = 0;
5031
                t1 = 0;
5032
                break;
5033
            }
5034
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5035
                helper_vmexit(type, param);
5036
        }
5037
        break;
5038
    default:
5039
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5040
            helper_vmexit(type, param);
5041
        }
5042
        break;
5043
    }
5044
}
5045

    
5046
void helper_svm_check_io(uint32_t port, uint32_t param, 
5047
                         uint32_t next_eip_addend)
5048
{
5049
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5050
        /* FIXME: this should be read in at vmrun (faster this way?) */
5051
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5052
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5053
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5054
            /* next EIP */
5055
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5056
                     env->eip + next_eip_addend);
5057
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5058
        }
5059
    }
5060
}
5061

    
5062
/* Note: currently only 32 bits of exit_code are used */
5063
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5064
{
5065
    uint32_t int_ctl;
5066

    
5067
    if (loglevel & CPU_LOG_TB_IN_ASM)
5068
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5069
                exit_code, exit_info_1,
5070
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5071
                EIP);
5072

    
5073
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5074
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5075
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5076
    } else {
5077
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5078
    }
5079

    
5080
    /* Save the VM state in the vmcb */
5081
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5082
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5083
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5084
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5085

    
5086
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5087
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5088

    
5089
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5090
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5091

    
5092
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5093
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5094
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5095
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5096
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5097

    
5098
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5099
        int_ctl &= ~V_TPR_MASK;
5100
        int_ctl |= env->cr[8] & V_TPR_MASK;
5101
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5102
    }
5103

    
5104
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5105
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5106
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5107
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5108
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5109
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5110
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5111

    
5112
    /* Reload the host state from vm_hsave */
5113
    env->hflags &= ~HF_HIF_MASK;
5114
    env->intercept = 0;
5115
    env->intercept_exceptions = 0;
5116
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5117

    
5118
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5119
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5120

    
5121
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5122
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5123

    
5124
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5125
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5126
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5127
    if (int_ctl & V_INTR_MASKING_MASK) {
5128
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5129
        cpu_set_apic_tpr(env, env->cr[8]);
5130
    }
5131
    /* we need to set the efer after the crs so the hidden flags get set properly */
5132
#ifdef TARGET_X86_64
5133
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5134
    env->hflags &= ~HF_LMA_MASK;
5135
    if (env->efer & MSR_EFER_LMA)
5136
       env->hflags |= HF_LMA_MASK;
5137
#endif
5138

    
5139
    env->eflags = 0;
5140
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5141
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5142
    CC_OP = CC_OP_EFLAGS;
5143

    
5144
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5145
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5146
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5147
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5148

    
5149
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5150
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5151
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5152

    
5153
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5154
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5155

    
5156
    /* other setups */
5157
    cpu_x86_set_cpl(env, 0);
5158
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5159
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5160

    
5161
    helper_clgi();
5162
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5163

    
5164
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5165

    
5166
    /* Clears the TSC_OFFSET inside the processor. */
5167

    
5168
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5169
       from the page table indicated the host's CR3. If the PDPEs contain
5170
       illegal state, the processor causes a shutdown. */
5171

    
5172
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5173
    env->cr[0] |= CR0_PE_MASK;
5174
    env->eflags &= ~VM_MASK;
5175

    
5176
    /* Disables all breakpoints in the host DR7 register. */
5177

    
5178
    /* Checks the reloaded host state for consistency. */
5179

    
5180
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5181
       host's code segment or non-canonical (in the case of long mode), a
5182
       #GP fault is delivered inside the host.) */
5183

    
5184
    /* remove any pending exception */
5185
    env->exception_index = -1;
5186
    env->error_code = 0;
5187
    env->old_exception = -1;
5188

    
5189
    cpu_loop_exit();
5190
}
5191

    
5192
#endif
5193

    
5194
/* MMX/SSE */
5195
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5196
void helper_enter_mmx(void)
5197
{
5198
    env->fpstt = 0;
5199
    *(uint32_t *)(env->fptags) = 0;
5200
    *(uint32_t *)(env->fptags + 4) = 0;
5201
}
5202

    
5203
void helper_emms(void)
5204
{
5205
    /* set to empty state */
5206
    *(uint32_t *)(env->fptags) = 0x01010101;
5207
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5208
}
5209

    
5210
/* XXX: suppress */
5211
void helper_movq(uint64_t *d, uint64_t *s)
5212
{
5213
    *d = *s;
5214
}
5215

    
5216
#define SHIFT 0
5217
#include "ops_sse.h"
5218

    
5219
#define SHIFT 1
5220
#include "ops_sse.h"
5221

    
5222
#define SHIFT 0
5223
#include "helper_template.h"
5224
#undef SHIFT
5225

    
5226
#define SHIFT 1
5227
#include "helper_template.h"
5228
#undef SHIFT
5229

    
5230
#define SHIFT 2
5231
#include "helper_template.h"
5232
#undef SHIFT
5233

    
5234
#ifdef TARGET_X86_64
5235

    
5236
#define SHIFT 3
5237
#include "helper_template.h"
5238
#undef SHIFT
5239

    
5240
#endif
5241

    
5242
/* bit operations */
5243
target_ulong helper_bsf(target_ulong t0)
5244
{
5245
    int count;
5246
    target_ulong res;
5247

    
5248
    res = t0;
5249
    count = 0;
5250
    while ((res & 1) == 0) {
5251
        count++;
5252
        res >>= 1;
5253
    }
5254
    return count;
5255
}
5256

    
5257
target_ulong helper_bsr(target_ulong t0)
5258
{
5259
    int count;
5260
    target_ulong res, mask;
5261
    
5262
    res = t0;
5263
    count = TARGET_LONG_BITS - 1;
5264
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5265
    while ((res & mask) == 0) {
5266
        count--;
5267
        res <<= 1;
5268
    }
5269
    return count;
5270
}
5271

    
5272

    
5273
static int compute_all_eflags(void)
5274
{
5275
    return CC_SRC;
5276
}
5277

    
5278
static int compute_c_eflags(void)
5279
{
5280
    return CC_SRC & CC_C;
5281
}
5282

    
5283
CCTable cc_table[CC_OP_NB] = {
5284
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5285

    
5286
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5287

    
5288
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5289
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5290
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5291

    
5292
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5293
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5294
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5295

    
5296
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5297
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5298
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5299

    
5300
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5301
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5302
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5303

    
5304
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5305
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5306
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5307

    
5308
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5309
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5310
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5311

    
5312
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5313
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5314
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5315

    
5316
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5317
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5318
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5319

    
5320
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5321
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5322
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5323

    
5324
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5325
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5326
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5327

    
5328
#ifdef TARGET_X86_64
5329
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5330

    
5331
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5332

    
5333
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5334

    
5335
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5336

    
5337
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5338

    
5339
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5340

    
5341
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5342

    
5343
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5344

    
5345
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5346

    
5347
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5348
#endif
5349
};
5350