Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 40a2d705

History | View | Annotate | Download (153.5 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = helper_cc_compute_all(CC_OP);
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499

    
500
#ifndef CONFIG_USER_ONLY
501
    /* reset local breakpoints */
502
    if (env->dr[7] & 0x55) {
503
        for (i = 0; i < 4; i++) {
504
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
505
                hw_breakpoint_remove(env, i);
506
        }
507
        env->dr[7] &= ~0x55;
508
    }
509
#endif
510
}
511

    
512
/* check if Port I/O is allowed in TSS */
513
static inline void check_io(int addr, int size)
514
{
515
    int io_offset, val, mask;
516

    
517
    /* TSS must be a valid 32 bit one */
518
    if (!(env->tr.flags & DESC_P_MASK) ||
519
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
520
        env->tr.limit < 103)
521
        goto fail;
522
    io_offset = lduw_kernel(env->tr.base + 0x66);
523
    io_offset += (addr >> 3);
524
    /* Note: the check needs two bytes */
525
    if ((io_offset + 1) > env->tr.limit)
526
        goto fail;
527
    val = lduw_kernel(env->tr.base + io_offset);
528
    val >>= (addr & 7);
529
    mask = (1 << size) - 1;
530
    /* all bits must be zero to allow the I/O */
531
    if ((val & mask) != 0) {
532
    fail:
533
        raise_exception_err(EXCP0D_GPF, 0);
534
    }
535
}
536

    
537
void helper_check_iob(uint32_t t0)
538
{
539
    check_io(t0, 1);
540
}
541

    
542
void helper_check_iow(uint32_t t0)
543
{
544
    check_io(t0, 2);
545
}
546

    
547
void helper_check_iol(uint32_t t0)
548
{
549
    check_io(t0, 4);
550
}
551

    
552
void helper_outb(uint32_t port, uint32_t data)
553
{
554
    cpu_outb(env, port, data & 0xff);
555
}
556

    
557
target_ulong helper_inb(uint32_t port)
558
{
559
    return cpu_inb(env, port);
560
}
561

    
562
void helper_outw(uint32_t port, uint32_t data)
563
{
564
    cpu_outw(env, port, data & 0xffff);
565
}
566

    
567
target_ulong helper_inw(uint32_t port)
568
{
569
    return cpu_inw(env, port);
570
}
571

    
572
void helper_outl(uint32_t port, uint32_t data)
573
{
574
    cpu_outl(env, port, data);
575
}
576

    
577
target_ulong helper_inl(uint32_t port)
578
{
579
    return cpu_inl(env, port);
580
}
581

    
582
static inline unsigned int get_sp_mask(unsigned int e2)
583
{
584
    if (e2 & DESC_B_MASK)
585
        return 0xffffffff;
586
    else
587
        return 0xffff;
588
}
589

    
590
#ifdef TARGET_X86_64
591
#define SET_ESP(val, sp_mask)\
592
do {\
593
    if ((sp_mask) == 0xffff)\
594
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
595
    else if ((sp_mask) == 0xffffffffLL)\
596
        ESP = (uint32_t)(val);\
597
    else\
598
        ESP = (val);\
599
} while (0)
600
#else
601
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
602
#endif
603

    
604
/* in 64-bit machines, this can overflow. So this segment addition macro
605
 * can be used to trim the value to 32-bit whenever needed */
606
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
607

    
608
/* XXX: add a is_user flag to have proper security support */
609
#define PUSHW(ssp, sp, sp_mask, val)\
610
{\
611
    sp -= 2;\
612
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
613
}
614

    
615
#define PUSHL(ssp, sp, sp_mask, val)\
616
{\
617
    sp -= 4;\
618
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
619
}
620

    
621
#define POPW(ssp, sp, sp_mask, val)\
622
{\
623
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
624
    sp += 2;\
625
}
626

    
627
#define POPL(ssp, sp, sp_mask, val)\
628
{\
629
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
630
    sp += 4;\
631
}
632

    
633
/* protected mode interrupt */
634
static void do_interrupt_protected(int intno, int is_int, int error_code,
635
                                   unsigned int next_eip, int is_hw)
636
{
637
    SegmentCache *dt;
638
    target_ulong ptr, ssp;
639
    int type, dpl, selector, ss_dpl, cpl;
640
    int has_error_code, new_stack, shift;
641
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
642
    uint32_t old_eip, sp_mask;
643

    
644
    has_error_code = 0;
645
    if (!is_int && !is_hw) {
646
        switch(intno) {
647
        case 8:
648
        case 10:
649
        case 11:
650
        case 12:
651
        case 13:
652
        case 14:
653
        case 17:
654
            has_error_code = 1;
655
            break;
656
        }
657
    }
658
    if (is_int)
659
        old_eip = next_eip;
660
    else
661
        old_eip = env->eip;
662

    
663
    dt = &env->idt;
664
    if (intno * 8 + 7 > dt->limit)
665
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
666
    ptr = dt->base + intno * 8;
667
    e1 = ldl_kernel(ptr);
668
    e2 = ldl_kernel(ptr + 4);
669
    /* check gate type */
670
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
671
    switch(type) {
672
    case 5: /* task gate */
673
        /* must do that check here to return the correct error code */
674
        if (!(e2 & DESC_P_MASK))
675
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
676
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
677
        if (has_error_code) {
678
            int type;
679
            uint32_t mask;
680
            /* push the error code */
681
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
682
            shift = type >> 3;
683
            if (env->segs[R_SS].flags & DESC_B_MASK)
684
                mask = 0xffffffff;
685
            else
686
                mask = 0xffff;
687
            esp = (ESP - (2 << shift)) & mask;
688
            ssp = env->segs[R_SS].base + esp;
689
            if (shift)
690
                stl_kernel(ssp, error_code);
691
            else
692
                stw_kernel(ssp, error_code);
693
            SET_ESP(esp, mask);
694
        }
695
        return;
696
    case 6: /* 286 interrupt gate */
697
    case 7: /* 286 trap gate */
698
    case 14: /* 386 interrupt gate */
699
    case 15: /* 386 trap gate */
700
        break;
701
    default:
702
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
703
        break;
704
    }
705
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
706
    cpl = env->hflags & HF_CPL_MASK;
707
    /* check privilege if software int */
708
    if (is_int && dpl < cpl)
709
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
710
    /* check valid bit */
711
    if (!(e2 & DESC_P_MASK))
712
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
713
    selector = e1 >> 16;
714
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
715
    if ((selector & 0xfffc) == 0)
716
        raise_exception_err(EXCP0D_GPF, 0);
717

    
718
    if (load_segment(&e1, &e2, selector) != 0)
719
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
720
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
721
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
722
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
723
    if (dpl > cpl)
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
    if (!(e2 & DESC_P_MASK))
726
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
727
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
728
        /* to inner privilege */
729
        get_ss_esp_from_tss(&ss, &esp, dpl);
730
        if ((ss & 0xfffc) == 0)
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        if ((ss & 3) != dpl)
733
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
735
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
736
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
737
        if (ss_dpl != dpl)
738
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739
        if (!(ss_e2 & DESC_S_MASK) ||
740
            (ss_e2 & DESC_CS_MASK) ||
741
            !(ss_e2 & DESC_W_MASK))
742
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
        if (!(ss_e2 & DESC_P_MASK))
744
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
        new_stack = 1;
746
        sp_mask = get_sp_mask(ss_e2);
747
        ssp = get_seg_base(ss_e1, ss_e2);
748
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
749
        /* to same privilege */
750
        if (env->eflags & VM_MASK)
751
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
752
        new_stack = 0;
753
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
754
        ssp = env->segs[R_SS].base;
755
        esp = ESP;
756
        dpl = cpl;
757
    } else {
758
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
759
        new_stack = 0; /* avoid warning */
760
        sp_mask = 0; /* avoid warning */
761
        ssp = 0; /* avoid warning */
762
        esp = 0; /* avoid warning */
763
    }
764

    
765
    shift = type >> 3;
766

    
767
#if 0
768
    /* XXX: check that enough room is available */
769
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
770
    if (env->eflags & VM_MASK)
771
        push_size += 8;
772
    push_size <<= shift;
773
#endif
774
    if (shift == 1) {
775
        if (new_stack) {
776
            if (env->eflags & VM_MASK) {
777
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
778
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
779
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
780
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
781
            }
782
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
783
            PUSHL(ssp, esp, sp_mask, ESP);
784
        }
785
        PUSHL(ssp, esp, sp_mask, compute_eflags());
786
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
787
        PUSHL(ssp, esp, sp_mask, old_eip);
788
        if (has_error_code) {
789
            PUSHL(ssp, esp, sp_mask, error_code);
790
        }
791
    } else {
792
        if (new_stack) {
793
            if (env->eflags & VM_MASK) {
794
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
795
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
796
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
797
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
798
            }
799
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
800
            PUSHW(ssp, esp, sp_mask, ESP);
801
        }
802
        PUSHW(ssp, esp, sp_mask, compute_eflags());
803
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
804
        PUSHW(ssp, esp, sp_mask, old_eip);
805
        if (has_error_code) {
806
            PUSHW(ssp, esp, sp_mask, error_code);
807
        }
808
    }
809

    
810
    if (new_stack) {
811
        if (env->eflags & VM_MASK) {
812
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
813
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
814
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
815
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
816
        }
817
        ss = (ss & ~3) | dpl;
818
        cpu_x86_load_seg_cache(env, R_SS, ss,
819
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
820
    }
821
    SET_ESP(esp, sp_mask);
822

    
823
    selector = (selector & ~3) | dpl;
824
    cpu_x86_load_seg_cache(env, R_CS, selector,
825
                   get_seg_base(e1, e2),
826
                   get_seg_limit(e1, e2),
827
                   e2);
828
    cpu_x86_set_cpl(env, dpl);
829
    env->eip = offset;
830

    
831
    /* interrupt gate clear IF mask */
832
    if ((type & 1) == 0) {
833
        env->eflags &= ~IF_MASK;
834
    }
835
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
836
}
837

    
838
#ifdef TARGET_X86_64
839

    
840
#define PUSHQ(sp, val)\
841
{\
842
    sp -= 8;\
843
    stq_kernel(sp, (val));\
844
}
845

    
846
#define POPQ(sp, val)\
847
{\
848
    val = ldq_kernel(sp);\
849
    sp += 8;\
850
}
851

    
852
static inline target_ulong get_rsp_from_tss(int level)
853
{
854
    int index;
855

    
856
#if 0
857
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
858
           env->tr.base, env->tr.limit);
859
#endif
860

    
861
    if (!(env->tr.flags & DESC_P_MASK))
862
        cpu_abort(env, "invalid tss");
863
    index = 8 * level + 4;
864
    if ((index + 7) > env->tr.limit)
865
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
866
    return ldq_kernel(env->tr.base + index);
867
}
868

    
869
/* 64 bit interrupt */
870
static void do_interrupt64(int intno, int is_int, int error_code,
871
                           target_ulong next_eip, int is_hw)
872
{
873
    SegmentCache *dt;
874
    target_ulong ptr;
875
    int type, dpl, selector, cpl, ist;
876
    int has_error_code, new_stack;
877
    uint32_t e1, e2, e3, ss;
878
    target_ulong old_eip, esp, offset;
879

    
880
    has_error_code = 0;
881
    if (!is_int && !is_hw) {
882
        switch(intno) {
883
        case 8:
884
        case 10:
885
        case 11:
886
        case 12:
887
        case 13:
888
        case 14:
889
        case 17:
890
            has_error_code = 1;
891
            break;
892
        }
893
    }
894
    if (is_int)
895
        old_eip = next_eip;
896
    else
897
        old_eip = env->eip;
898

    
899
    dt = &env->idt;
900
    if (intno * 16 + 15 > dt->limit)
901
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902
    ptr = dt->base + intno * 16;
903
    e1 = ldl_kernel(ptr);
904
    e2 = ldl_kernel(ptr + 4);
905
    e3 = ldl_kernel(ptr + 8);
906
    /* check gate type */
907
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908
    switch(type) {
909
    case 14: /* 386 interrupt gate */
910
    case 15: /* 386 trap gate */
911
        break;
912
    default:
913
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
        break;
915
    }
916
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
    cpl = env->hflags & HF_CPL_MASK;
918
    /* check privilege if software int */
919
    if (is_int && dpl < cpl)
920
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921
    /* check valid bit */
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924
    selector = e1 >> 16;
925
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926
    ist = e2 & 7;
927
    if ((selector & 0xfffc) == 0)
928
        raise_exception_err(EXCP0D_GPF, 0);
929

    
930
    if (load_segment(&e1, &e2, selector) != 0)
931
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935
    if (dpl > cpl)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_P_MASK))
938
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942
        /* to inner privilege */
943
        if (ist != 0)
944
            esp = get_rsp_from_tss(ist + 3);
945
        else
946
            esp = get_rsp_from_tss(dpl);
947
        esp &= ~0xfLL; /* align stack */
948
        ss = 0;
949
        new_stack = 1;
950
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951
        /* to same privilege */
952
        if (env->eflags & VM_MASK)
953
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
        new_stack = 0;
955
        if (ist != 0)
956
            esp = get_rsp_from_tss(ist + 3);
957
        else
958
            esp = ESP;
959
        esp &= ~0xfLL; /* align stack */
960
        dpl = cpl;
961
    } else {
962
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963
        new_stack = 0; /* avoid warning */
964
        esp = 0; /* avoid warning */
965
    }
966

    
967
    PUSHQ(esp, env->segs[R_SS].selector);
968
    PUSHQ(esp, ESP);
969
    PUSHQ(esp, compute_eflags());
970
    PUSHQ(esp, env->segs[R_CS].selector);
971
    PUSHQ(esp, old_eip);
972
    if (has_error_code) {
973
        PUSHQ(esp, error_code);
974
    }
975

    
976
    if (new_stack) {
977
        ss = 0 | dpl;
978
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979
    }
980
    ESP = esp;
981

    
982
    selector = (selector & ~3) | dpl;
983
    cpu_x86_load_seg_cache(env, R_CS, selector,
984
                   get_seg_base(e1, e2),
985
                   get_seg_limit(e1, e2),
986
                   e2);
987
    cpu_x86_set_cpl(env, dpl);
988
    env->eip = offset;
989

    
990
    /* interrupt gate clear IF mask */
991
    if ((type & 1) == 0) {
992
        env->eflags &= ~IF_MASK;
993
    }
994
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995
}
996
#endif
997

    
998
#if defined(CONFIG_USER_ONLY)
999
void helper_syscall(int next_eip_addend)
1000
{
1001
    env->exception_index = EXCP_SYSCALL;
1002
    env->exception_next_eip = env->eip + next_eip_addend;
1003
    cpu_loop_exit();
1004
}
1005
#else
1006
void helper_syscall(int next_eip_addend)
1007
{
1008
    int selector;
1009

    
1010
    if (!(env->efer & MSR_EFER_SCE)) {
1011
        raise_exception_err(EXCP06_ILLOP, 0);
1012
    }
1013
    selector = (env->star >> 32) & 0xffff;
1014
#ifdef TARGET_X86_64
1015
    if (env->hflags & HF_LMA_MASK) {
1016
        int code64;
1017

    
1018
        ECX = env->eip + next_eip_addend;
1019
        env->regs[11] = compute_eflags();
1020

    
1021
        code64 = env->hflags & HF_CS64_MASK;
1022

    
1023
        cpu_x86_set_cpl(env, 0);
1024
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025
                           0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK |
1028
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030
                               0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_W_MASK | DESC_A_MASK);
1034
        env->eflags &= ~env->fmask;
1035
        load_eflags(env->eflags, 0);
1036
        if (code64)
1037
            env->eip = env->lstar;
1038
        else
1039
            env->eip = env->cstar;
1040
    } else
1041
#endif
1042
    {
1043
        ECX = (uint32_t)(env->eip + next_eip_addend);
1044

    
1045
        cpu_x86_set_cpl(env, 0);
1046
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1047
                           0, 0xffffffff,
1048
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049
                               DESC_S_MASK |
1050
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1051
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1052
                               0, 0xffffffff,
1053
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054
                               DESC_S_MASK |
1055
                               DESC_W_MASK | DESC_A_MASK);
1056
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1057
        env->eip = (uint32_t)env->star;
1058
    }
1059
}
1060
#endif
1061

    
1062
void helper_sysret(int dflag)
1063
{
1064
    int cpl, selector;
1065

    
1066
    if (!(env->efer & MSR_EFER_SCE)) {
1067
        raise_exception_err(EXCP06_ILLOP, 0);
1068
    }
1069
    cpl = env->hflags & HF_CPL_MASK;
1070
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071
        raise_exception_err(EXCP0D_GPF, 0);
1072
    }
1073
    selector = (env->star >> 48) & 0xffff;
1074
#ifdef TARGET_X86_64
1075
    if (env->hflags & HF_LMA_MASK) {
1076
        if (dflag == 2) {
1077
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
                                   DESC_L_MASK);
1083
            env->eip = ECX;
1084
        } else {
1085
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086
                                   0, 0xffffffff,
1087
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090
            env->eip = (uint32_t)ECX;
1091
        }
1092
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_W_MASK | DESC_A_MASK);
1097
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
        cpu_x86_set_cpl(env, 3);
1100
    } else
1101
#endif
1102
    {
1103
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104
                               0, 0xffffffff,
1105
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108
        env->eip = (uint32_t)ECX;
1109
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1110
                               0, 0xffffffff,
1111
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113
                               DESC_W_MASK | DESC_A_MASK);
1114
        env->eflags |= IF_MASK;
1115
        cpu_x86_set_cpl(env, 3);
1116
    }
1117
#ifdef USE_KQEMU
1118
    if (kqemu_is_ok(env)) {
1119
        if (env->hflags & HF_LMA_MASK)
1120
            CC_OP = CC_OP_EFLAGS;
1121
        env->exception_index = -1;
1122
        cpu_loop_exit();
1123
    }
1124
#endif
1125
}
1126

    
1127
/* real mode interrupt */
1128
static void do_interrupt_real(int intno, int is_int, int error_code,
1129
                              unsigned int next_eip)
1130
{
1131
    SegmentCache *dt;
1132
    target_ulong ptr, ssp;
1133
    int selector;
1134
    uint32_t offset, esp;
1135
    uint32_t old_cs, old_eip;
1136

    
1137
    /* real mode (simpler !) */
1138
    dt = &env->idt;
1139
    if (intno * 4 + 3 > dt->limit)
1140
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1141
    ptr = dt->base + intno * 4;
1142
    offset = lduw_kernel(ptr);
1143
    selector = lduw_kernel(ptr + 2);
1144
    esp = ESP;
1145
    ssp = env->segs[R_SS].base;
1146
    if (is_int)
1147
        old_eip = next_eip;
1148
    else
1149
        old_eip = env->eip;
1150
    old_cs = env->segs[R_CS].selector;
1151
    /* XXX: use SS segment size ? */
1152
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1153
    PUSHW(ssp, esp, 0xffff, old_cs);
1154
    PUSHW(ssp, esp, 0xffff, old_eip);
1155

    
1156
    /* update processor state */
1157
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1158
    env->eip = offset;
1159
    env->segs[R_CS].selector = selector;
1160
    env->segs[R_CS].base = (selector << 4);
1161
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1162
}
1163

    
1164
/* fake user mode interrupt */
1165
void do_interrupt_user(int intno, int is_int, int error_code,
1166
                       target_ulong next_eip)
1167
{
1168
    SegmentCache *dt;
1169
    target_ulong ptr;
1170
    int dpl, cpl, shift;
1171
    uint32_t e2;
1172

    
1173
    dt = &env->idt;
1174
    if (env->hflags & HF_LMA_MASK) {
1175
        shift = 4;
1176
    } else {
1177
        shift = 3;
1178
    }
1179
    ptr = dt->base + (intno << shift);
1180
    e2 = ldl_kernel(ptr + 4);
1181

    
1182
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1183
    cpl = env->hflags & HF_CPL_MASK;
1184
    /* check privilege if software int */
1185
    if (is_int && dpl < cpl)
1186
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1187

    
1188
    /* Since we emulate only user space, we cannot do more than
1189
       exiting the emulation with the suitable exception and error
1190
       code */
1191
    if (is_int)
1192
        EIP = next_eip;
1193
}
1194

    
1195
/*
1196
 * Begin execution of an interruption. is_int is TRUE if coming from
1197
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1198
 * instruction. It is only relevant if is_int is TRUE.
1199
 */
1200
void do_interrupt(int intno, int is_int, int error_code,
1201
                  target_ulong next_eip, int is_hw)
1202
{
1203
    if (loglevel & CPU_LOG_INT) {
1204
        if ((env->cr[0] & CR0_PE_MASK)) {
1205
            static int count;
1206
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1207
                    count, intno, error_code, is_int,
1208
                    env->hflags & HF_CPL_MASK,
1209
                    env->segs[R_CS].selector, EIP,
1210
                    (int)env->segs[R_CS].base + EIP,
1211
                    env->segs[R_SS].selector, ESP);
1212
            if (intno == 0x0e) {
1213
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1214
            } else {
1215
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1216
            }
1217
            fprintf(logfile, "\n");
1218
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1219
#if 0
1220
            {
1221
                int i;
1222
                uint8_t *ptr;
1223
                fprintf(logfile, "       code=");
1224
                ptr = env->segs[R_CS].base + env->eip;
1225
                for(i = 0; i < 16; i++) {
1226
                    fprintf(logfile, " %02x", ldub(ptr + i));
1227
                }
1228
                fprintf(logfile, "\n");
1229
            }
1230
#endif
1231
            count++;
1232
        }
1233
    }
1234
    if (env->cr[0] & CR0_PE_MASK) {
1235
#ifdef TARGET_X86_64
1236
        if (env->hflags & HF_LMA_MASK) {
1237
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1238
        } else
1239
#endif
1240
        {
1241
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1242
        }
1243
    } else {
1244
        do_interrupt_real(intno, is_int, error_code, next_eip);
1245
    }
1246
}
1247

    
1248
/*
1249
 * Check nested exceptions and change to double or triple fault if
1250
 * needed. It should only be called, if this is not an interrupt.
1251
 * Returns the new exception number.
1252
 */
1253
static int check_exception(int intno, int *error_code)
1254
{
1255
    int first_contributory = env->old_exception == 0 ||
1256
                              (env->old_exception >= 10 &&
1257
                               env->old_exception <= 13);
1258
    int second_contributory = intno == 0 ||
1259
                               (intno >= 10 && intno <= 13);
1260

    
1261
    if (loglevel & CPU_LOG_INT)
1262
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1263
                env->old_exception, intno);
1264

    
1265
    if (env->old_exception == EXCP08_DBLE)
1266
        cpu_abort(env, "triple fault");
1267

    
1268
    if ((first_contributory && second_contributory)
1269
        || (env->old_exception == EXCP0E_PAGE &&
1270
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1271
        intno = EXCP08_DBLE;
1272
        *error_code = 0;
1273
    }
1274

    
1275
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1276
        (intno == EXCP08_DBLE))
1277
        env->old_exception = intno;
1278

    
1279
    return intno;
1280
}
1281

    
1282
/*
1283
 * Signal an interruption. It is executed in the main CPU loop.
1284
 * is_int is TRUE if coming from the int instruction. next_eip is the
1285
 * EIP value AFTER the interrupt instruction. It is only relevant if
1286
 * is_int is TRUE.
1287
 */
1288
void raise_interrupt(int intno, int is_int, int error_code,
1289
                     int next_eip_addend)
1290
{
1291
    if (!is_int) {
1292
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1293
        intno = check_exception(intno, &error_code);
1294
    } else {
1295
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1296
    }
1297

    
1298
    env->exception_index = intno;
1299
    env->error_code = error_code;
1300
    env->exception_is_int = is_int;
1301
    env->exception_next_eip = env->eip + next_eip_addend;
1302
    cpu_loop_exit();
1303
}
1304

    
1305
/* shortcuts to generate exceptions */
1306

    
1307
void (raise_exception_err)(int exception_index, int error_code)
1308
{
1309
    raise_interrupt(exception_index, 0, error_code, 0);
1310
}
1311

    
1312
void raise_exception(int exception_index)
1313
{
1314
    raise_interrupt(exception_index, 0, 0, 0);
1315
}
1316

    
1317
/* SMM support */
1318

    
1319
#if defined(CONFIG_USER_ONLY)
1320

    
1321
void do_smm_enter(void)
1322
{
1323
}
1324

    
1325
void helper_rsm(void)
1326
{
1327
}
1328

    
1329
#else
1330

    
1331
#ifdef TARGET_X86_64
1332
#define SMM_REVISION_ID 0x00020064
1333
#else
1334
#define SMM_REVISION_ID 0x00020000
1335
#endif
1336

    
1337
void do_smm_enter(void)
1338
{
1339
    target_ulong sm_state;
1340
    SegmentCache *dt;
1341
    int i, offset;
1342

    
1343
    if (loglevel & CPU_LOG_INT) {
1344
        fprintf(logfile, "SMM: enter\n");
1345
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1346
    }
1347

    
1348
    env->hflags |= HF_SMM_MASK;
1349
    cpu_smm_update(env);
1350

    
1351
    sm_state = env->smbase + 0x8000;
1352

    
1353
#ifdef TARGET_X86_64
1354
    for(i = 0; i < 6; i++) {
1355
        dt = &env->segs[i];
1356
        offset = 0x7e00 + i * 16;
1357
        stw_phys(sm_state + offset, dt->selector);
1358
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1359
        stl_phys(sm_state + offset + 4, dt->limit);
1360
        stq_phys(sm_state + offset + 8, dt->base);
1361
    }
1362

    
1363
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1364
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1365

    
1366
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1367
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1368
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1369
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1370

    
1371
    stq_phys(sm_state + 0x7e88, env->idt.base);
1372
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1373

    
1374
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1375
    stq_phys(sm_state + 0x7e98, env->tr.base);
1376
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1377
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1378

    
1379
    stq_phys(sm_state + 0x7ed0, env->efer);
1380

    
1381
    stq_phys(sm_state + 0x7ff8, EAX);
1382
    stq_phys(sm_state + 0x7ff0, ECX);
1383
    stq_phys(sm_state + 0x7fe8, EDX);
1384
    stq_phys(sm_state + 0x7fe0, EBX);
1385
    stq_phys(sm_state + 0x7fd8, ESP);
1386
    stq_phys(sm_state + 0x7fd0, EBP);
1387
    stq_phys(sm_state + 0x7fc8, ESI);
1388
    stq_phys(sm_state + 0x7fc0, EDI);
1389
    for(i = 8; i < 16; i++)
1390
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1391
    stq_phys(sm_state + 0x7f78, env->eip);
1392
    stl_phys(sm_state + 0x7f70, compute_eflags());
1393
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1394
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1395

    
1396
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1397
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1398
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1399

    
1400
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1401
    stl_phys(sm_state + 0x7f00, env->smbase);
1402
#else
1403
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1404
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1405
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1406
    stl_phys(sm_state + 0x7ff0, env->eip);
1407
    stl_phys(sm_state + 0x7fec, EDI);
1408
    stl_phys(sm_state + 0x7fe8, ESI);
1409
    stl_phys(sm_state + 0x7fe4, EBP);
1410
    stl_phys(sm_state + 0x7fe0, ESP);
1411
    stl_phys(sm_state + 0x7fdc, EBX);
1412
    stl_phys(sm_state + 0x7fd8, EDX);
1413
    stl_phys(sm_state + 0x7fd4, ECX);
1414
    stl_phys(sm_state + 0x7fd0, EAX);
1415
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1416
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1417

    
1418
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1419
    stl_phys(sm_state + 0x7f64, env->tr.base);
1420
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1421
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1424
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1425
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1426
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1427

    
1428
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1429
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1430

    
1431
    stl_phys(sm_state + 0x7f58, env->idt.base);
1432
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1433

    
1434
    for(i = 0; i < 6; i++) {
1435
        dt = &env->segs[i];
1436
        if (i < 3)
1437
            offset = 0x7f84 + i * 12;
1438
        else
1439
            offset = 0x7f2c + (i - 3) * 12;
1440
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1441
        stl_phys(sm_state + offset + 8, dt->base);
1442
        stl_phys(sm_state + offset + 4, dt->limit);
1443
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1444
    }
1445
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1446

    
1447
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1448
    stl_phys(sm_state + 0x7ef8, env->smbase);
1449
#endif
1450
    /* init SMM cpu state */
1451

    
1452
#ifdef TARGET_X86_64
1453
    cpu_load_efer(env, 0);
1454
#endif
1455
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1456
    env->eip = 0x00008000;
1457
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1458
                           0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1460
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1461
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1462
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1463
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1464

    
1465
    cpu_x86_update_cr0(env,
1466
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1467
    cpu_x86_update_cr4(env, 0);
1468
    env->dr[7] = 0x00000400;
1469
    CC_OP = CC_OP_EFLAGS;
1470
}
1471

    
1472
void helper_rsm(void)
1473
{
1474
    target_ulong sm_state;
1475
    int i, offset;
1476
    uint32_t val;
1477

    
1478
    sm_state = env->smbase + 0x8000;
1479
#ifdef TARGET_X86_64
1480
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
/* division, flags are undefined */
1596

    
1597
void helper_divb_AL(target_ulong t0)
1598
{
1599
    unsigned int num, den, q, r;
1600

    
1601
    num = (EAX & 0xffff);
1602
    den = (t0 & 0xff);
1603
    if (den == 0) {
1604
        raise_exception(EXCP00_DIVZ);
1605
    }
1606
    q = (num / den);
1607
    if (q > 0xff)
1608
        raise_exception(EXCP00_DIVZ);
1609
    q &= 0xff;
1610
    r = (num % den) & 0xff;
1611
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1612
}
1613

    
1614
void helper_idivb_AL(target_ulong t0)
1615
{
1616
    int num, den, q, r;
1617

    
1618
    num = (int16_t)EAX;
1619
    den = (int8_t)t0;
1620
    if (den == 0) {
1621
        raise_exception(EXCP00_DIVZ);
1622
    }
1623
    q = (num / den);
1624
    if (q != (int8_t)q)
1625
        raise_exception(EXCP00_DIVZ);
1626
    q &= 0xff;
1627
    r = (num % den) & 0xff;
1628
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1629
}
1630

    
1631
void helper_divw_AX(target_ulong t0)
1632
{
1633
    unsigned int num, den, q, r;
1634

    
1635
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1636
    den = (t0 & 0xffff);
1637
    if (den == 0) {
1638
        raise_exception(EXCP00_DIVZ);
1639
    }
1640
    q = (num / den);
1641
    if (q > 0xffff)
1642
        raise_exception(EXCP00_DIVZ);
1643
    q &= 0xffff;
1644
    r = (num % den) & 0xffff;
1645
    EAX = (EAX & ~0xffff) | q;
1646
    EDX = (EDX & ~0xffff) | r;
1647
}
1648

    
1649
void helper_idivw_AX(target_ulong t0)
1650
{
1651
    int num, den, q, r;
1652

    
1653
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1654
    den = (int16_t)t0;
1655
    if (den == 0) {
1656
        raise_exception(EXCP00_DIVZ);
1657
    }
1658
    q = (num / den);
1659
    if (q != (int16_t)q)
1660
        raise_exception(EXCP00_DIVZ);
1661
    q &= 0xffff;
1662
    r = (num % den) & 0xffff;
1663
    EAX = (EAX & ~0xffff) | q;
1664
    EDX = (EDX & ~0xffff) | r;
1665
}
1666

    
1667
void helper_divl_EAX(target_ulong t0)
1668
{
1669
    unsigned int den, r;
1670
    uint64_t num, q;
1671

    
1672
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1673
    den = t0;
1674
    if (den == 0) {
1675
        raise_exception(EXCP00_DIVZ);
1676
    }
1677
    q = (num / den);
1678
    r = (num % den);
1679
    if (q > 0xffffffff)
1680
        raise_exception(EXCP00_DIVZ);
1681
    EAX = (uint32_t)q;
1682
    EDX = (uint32_t)r;
1683
}
1684

    
1685
void helper_idivl_EAX(target_ulong t0)
1686
{
1687
    int den, r;
1688
    int64_t num, q;
1689

    
1690
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1691
    den = t0;
1692
    if (den == 0) {
1693
        raise_exception(EXCP00_DIVZ);
1694
    }
1695
    q = (num / den);
1696
    r = (num % den);
1697
    if (q != (int32_t)q)
1698
        raise_exception(EXCP00_DIVZ);
1699
    EAX = (uint32_t)q;
1700
    EDX = (uint32_t)r;
1701
}
1702

    
1703
/* bcd */
1704

    
1705
/* XXX: exception */
1706
void helper_aam(int base)
1707
{
1708
    int al, ah;
1709
    al = EAX & 0xff;
1710
    ah = al / base;
1711
    al = al % base;
1712
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1713
    CC_DST = al;
1714
}
1715

    
1716
void helper_aad(int base)
1717
{
1718
    int al, ah;
1719
    al = EAX & 0xff;
1720
    ah = (EAX >> 8) & 0xff;
1721
    al = ((ah * base) + al) & 0xff;
1722
    EAX = (EAX & ~0xffff) | al;
1723
    CC_DST = al;
1724
}
1725

    
1726
void helper_aaa(void)
1727
{
1728
    int icarry;
1729
    int al, ah, af;
1730
    int eflags;
1731

    
1732
    eflags = helper_cc_compute_all(CC_OP);
1733
    af = eflags & CC_A;
1734
    al = EAX & 0xff;
1735
    ah = (EAX >> 8) & 0xff;
1736

    
1737
    icarry = (al > 0xf9);
1738
    if (((al & 0x0f) > 9 ) || af) {
1739
        al = (al + 6) & 0x0f;
1740
        ah = (ah + 1 + icarry) & 0xff;
1741
        eflags |= CC_C | CC_A;
1742
    } else {
1743
        eflags &= ~(CC_C | CC_A);
1744
        al &= 0x0f;
1745
    }
1746
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1747
    CC_SRC = eflags;
1748
}
1749

    
1750
void helper_aas(void)
1751
{
1752
    int icarry;
1753
    int al, ah, af;
1754
    int eflags;
1755

    
1756
    eflags = helper_cc_compute_all(CC_OP);
1757
    af = eflags & CC_A;
1758
    al = EAX & 0xff;
1759
    ah = (EAX >> 8) & 0xff;
1760

    
1761
    icarry = (al < 6);
1762
    if (((al & 0x0f) > 9 ) || af) {
1763
        al = (al - 6) & 0x0f;
1764
        ah = (ah - 1 - icarry) & 0xff;
1765
        eflags |= CC_C | CC_A;
1766
    } else {
1767
        eflags &= ~(CC_C | CC_A);
1768
        al &= 0x0f;
1769
    }
1770
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1771
    CC_SRC = eflags;
1772
}
1773

    
1774
void helper_daa(void)
1775
{
1776
    int al, af, cf;
1777
    int eflags;
1778

    
1779
    eflags = helper_cc_compute_all(CC_OP);
1780
    cf = eflags & CC_C;
1781
    af = eflags & CC_A;
1782
    al = EAX & 0xff;
1783

    
1784
    eflags = 0;
1785
    if (((al & 0x0f) > 9 ) || af) {
1786
        al = (al + 6) & 0xff;
1787
        eflags |= CC_A;
1788
    }
1789
    if ((al > 0x9f) || cf) {
1790
        al = (al + 0x60) & 0xff;
1791
        eflags |= CC_C;
1792
    }
1793
    EAX = (EAX & ~0xff) | al;
1794
    /* well, speed is not an issue here, so we compute the flags by hand */
1795
    eflags |= (al == 0) << 6; /* zf */
1796
    eflags |= parity_table[al]; /* pf */
1797
    eflags |= (al & 0x80); /* sf */
1798
    CC_SRC = eflags;
1799
}
1800

    
1801
void helper_das(void)
1802
{
1803
    int al, al1, af, cf;
1804
    int eflags;
1805

    
1806
    eflags = helper_cc_compute_all(CC_OP);
1807
    cf = eflags & CC_C;
1808
    af = eflags & CC_A;
1809
    al = EAX & 0xff;
1810

    
1811
    eflags = 0;
1812
    al1 = al;
1813
    if (((al & 0x0f) > 9 ) || af) {
1814
        eflags |= CC_A;
1815
        if (al < 6 || cf)
1816
            eflags |= CC_C;
1817
        al = (al - 6) & 0xff;
1818
    }
1819
    if ((al1 > 0x99) || cf) {
1820
        al = (al - 0x60) & 0xff;
1821
        eflags |= CC_C;
1822
    }
1823
    EAX = (EAX & ~0xff) | al;
1824
    /* well, speed is not an issue here, so we compute the flags by hand */
1825
    eflags |= (al == 0) << 6; /* zf */
1826
    eflags |= parity_table[al]; /* pf */
1827
    eflags |= (al & 0x80); /* sf */
1828
    CC_SRC = eflags;
1829
}
1830

    
1831
void helper_into(int next_eip_addend)
1832
{
1833
    int eflags;
1834
    eflags = helper_cc_compute_all(CC_OP);
1835
    if (eflags & CC_O) {
1836
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1837
    }
1838
}
1839

    
1840
void helper_cmpxchg8b(target_ulong a0)
1841
{
1842
    uint64_t d;
1843
    int eflags;
1844

    
1845
    eflags = helper_cc_compute_all(CC_OP);
1846
    d = ldq(a0);
1847
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1848
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1849
        eflags |= CC_Z;
1850
    } else {
1851
        /* always do the store */
1852
        stq(a0, d); 
1853
        EDX = (uint32_t)(d >> 32);
1854
        EAX = (uint32_t)d;
1855
        eflags &= ~CC_Z;
1856
    }
1857
    CC_SRC = eflags;
1858
}
1859

    
1860
#ifdef TARGET_X86_64
1861
void helper_cmpxchg16b(target_ulong a0)
1862
{
1863
    uint64_t d0, d1;
1864
    int eflags;
1865

    
1866
    if ((a0 & 0xf) != 0)
1867
        raise_exception(EXCP0D_GPF);
1868
    eflags = helper_cc_compute_all(CC_OP);
1869
    d0 = ldq(a0);
1870
    d1 = ldq(a0 + 8);
1871
    if (d0 == EAX && d1 == EDX) {
1872
        stq(a0, EBX);
1873
        stq(a0 + 8, ECX);
1874
        eflags |= CC_Z;
1875
    } else {
1876
        /* always do the store */
1877
        stq(a0, d0); 
1878
        stq(a0 + 8, d1); 
1879
        EDX = d1;
1880
        EAX = d0;
1881
        eflags &= ~CC_Z;
1882
    }
1883
    CC_SRC = eflags;
1884
}
1885
#endif
1886

    
1887
void helper_single_step(void)
1888
{
1889
#ifndef CONFIG_USER_ONLY
1890
    check_hw_breakpoints(env, 1);
1891
    env->dr[6] |= DR6_BS;
1892
#endif
1893
    raise_exception(EXCP01_DB);
1894
}
1895

    
1896
void helper_cpuid(void)
1897
{
1898
    uint32_t eax, ebx, ecx, edx;
1899

    
1900
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1901

    
1902
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1903
    EAX = eax;
1904
    EBX = ebx;
1905
    ECX = ecx;
1906
    EDX = edx;
1907
}
1908

    
1909
void helper_enter_level(int level, int data32, target_ulong t1)
1910
{
1911
    target_ulong ssp;
1912
    uint32_t esp_mask, esp, ebp;
1913

    
1914
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1915
    ssp = env->segs[R_SS].base;
1916
    ebp = EBP;
1917
    esp = ESP;
1918
    if (data32) {
1919
        /* 32 bit */
1920
        esp -= 4;
1921
        while (--level) {
1922
            esp -= 4;
1923
            ebp -= 4;
1924
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1925
        }
1926
        esp -= 4;
1927
        stl(ssp + (esp & esp_mask), t1);
1928
    } else {
1929
        /* 16 bit */
1930
        esp -= 2;
1931
        while (--level) {
1932
            esp -= 2;
1933
            ebp -= 2;
1934
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1935
        }
1936
        esp -= 2;
1937
        stw(ssp + (esp & esp_mask), t1);
1938
    }
1939
}
1940

    
1941
#ifdef TARGET_X86_64
1942
void helper_enter64_level(int level, int data64, target_ulong t1)
1943
{
1944
    target_ulong esp, ebp;
1945
    ebp = EBP;
1946
    esp = ESP;
1947

    
1948
    if (data64) {
1949
        /* 64 bit */
1950
        esp -= 8;
1951
        while (--level) {
1952
            esp -= 8;
1953
            ebp -= 8;
1954
            stq(esp, ldq(ebp));
1955
        }
1956
        esp -= 8;
1957
        stq(esp, t1);
1958
    } else {
1959
        /* 16 bit */
1960
        esp -= 2;
1961
        while (--level) {
1962
            esp -= 2;
1963
            ebp -= 2;
1964
            stw(esp, lduw(ebp));
1965
        }
1966
        esp -= 2;
1967
        stw(esp, t1);
1968
    }
1969
}
1970
#endif
1971

    
1972
void helper_lldt(int selector)
1973
{
1974
    SegmentCache *dt;
1975
    uint32_t e1, e2;
1976
    int index, entry_limit;
1977
    target_ulong ptr;
1978

    
1979
    selector &= 0xffff;
1980
    if ((selector & 0xfffc) == 0) {
1981
        /* XXX: NULL selector case: invalid LDT */
1982
        env->ldt.base = 0;
1983
        env->ldt.limit = 0;
1984
    } else {
1985
        if (selector & 0x4)
1986
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1987
        dt = &env->gdt;
1988
        index = selector & ~7;
1989
#ifdef TARGET_X86_64
1990
        if (env->hflags & HF_LMA_MASK)
1991
            entry_limit = 15;
1992
        else
1993
#endif
1994
            entry_limit = 7;
1995
        if ((index + entry_limit) > dt->limit)
1996
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1997
        ptr = dt->base + index;
1998
        e1 = ldl_kernel(ptr);
1999
        e2 = ldl_kernel(ptr + 4);
2000
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2001
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2002
        if (!(e2 & DESC_P_MASK))
2003
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2004
#ifdef TARGET_X86_64
2005
        if (env->hflags & HF_LMA_MASK) {
2006
            uint32_t e3;
2007
            e3 = ldl_kernel(ptr + 8);
2008
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2009
            env->ldt.base |= (target_ulong)e3 << 32;
2010
        } else
2011
#endif
2012
        {
2013
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2014
        }
2015
    }
2016
    env->ldt.selector = selector;
2017
}
2018

    
2019
void helper_ltr(int selector)
2020
{
2021
    SegmentCache *dt;
2022
    uint32_t e1, e2;
2023
    int index, type, entry_limit;
2024
    target_ulong ptr;
2025

    
2026
    selector &= 0xffff;
2027
    if ((selector & 0xfffc) == 0) {
2028
        /* NULL selector case: invalid TR */
2029
        env->tr.base = 0;
2030
        env->tr.limit = 0;
2031
        env->tr.flags = 0;
2032
    } else {
2033
        if (selector & 0x4)
2034
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035
        dt = &env->gdt;
2036
        index = selector & ~7;
2037
#ifdef TARGET_X86_64
2038
        if (env->hflags & HF_LMA_MASK)
2039
            entry_limit = 15;
2040
        else
2041
#endif
2042
            entry_limit = 7;
2043
        if ((index + entry_limit) > dt->limit)
2044
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045
        ptr = dt->base + index;
2046
        e1 = ldl_kernel(ptr);
2047
        e2 = ldl_kernel(ptr + 4);
2048
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2049
        if ((e2 & DESC_S_MASK) ||
2050
            (type != 1 && type != 9))
2051
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2052
        if (!(e2 & DESC_P_MASK))
2053
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2054
#ifdef TARGET_X86_64
2055
        if (env->hflags & HF_LMA_MASK) {
2056
            uint32_t e3, e4;
2057
            e3 = ldl_kernel(ptr + 8);
2058
            e4 = ldl_kernel(ptr + 12);
2059
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2060
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2062
            env->tr.base |= (target_ulong)e3 << 32;
2063
        } else
2064
#endif
2065
        {
2066
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2067
        }
2068
        e2 |= DESC_TSS_BUSY_MASK;
2069
        stl_kernel(ptr + 4, e2);
2070
    }
2071
    env->tr.selector = selector;
2072
}
2073

    
2074
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2075
void helper_load_seg(int seg_reg, int selector)
2076
{
2077
    uint32_t e1, e2;
2078
    int cpl, dpl, rpl;
2079
    SegmentCache *dt;
2080
    int index;
2081
    target_ulong ptr;
2082

    
2083
    selector &= 0xffff;
2084
    cpl = env->hflags & HF_CPL_MASK;
2085
    if ((selector & 0xfffc) == 0) {
2086
        /* null selector case */
2087
        if (seg_reg == R_SS
2088
#ifdef TARGET_X86_64
2089
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2090
#endif
2091
            )
2092
            raise_exception_err(EXCP0D_GPF, 0);
2093
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2094
    } else {
2095

    
2096
        if (selector & 0x4)
2097
            dt = &env->ldt;
2098
        else
2099
            dt = &env->gdt;
2100
        index = selector & ~7;
2101
        if ((index + 7) > dt->limit)
2102
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2103
        ptr = dt->base + index;
2104
        e1 = ldl_kernel(ptr);
2105
        e2 = ldl_kernel(ptr + 4);
2106

    
2107
        if (!(e2 & DESC_S_MASK))
2108
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109
        rpl = selector & 3;
2110
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2111
        if (seg_reg == R_SS) {
2112
            /* must be writable segment */
2113
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2114
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2115
            if (rpl != cpl || dpl != cpl)
2116
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2117
        } else {
2118
            /* must be readable segment */
2119
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2120
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2121

    
2122
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2123
                /* if not conforming code, test rights */
2124
                if (dpl < cpl || dpl < rpl)
2125
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126
            }
2127
        }
2128

    
2129
        if (!(e2 & DESC_P_MASK)) {
2130
            if (seg_reg == R_SS)
2131
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2132
            else
2133
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2134
        }
2135

    
2136
        /* set the access bit if not already set */
2137
        if (!(e2 & DESC_A_MASK)) {
2138
            e2 |= DESC_A_MASK;
2139
            stl_kernel(ptr + 4, e2);
2140
        }
2141

    
2142
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2143
                       get_seg_base(e1, e2),
2144
                       get_seg_limit(e1, e2),
2145
                       e2);
2146
#if 0
2147
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2148
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2149
#endif
2150
    }
2151
}
2152

    
2153
/* protected mode jump */
2154
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2155
                           int next_eip_addend)
2156
{
2157
    int gate_cs, type;
2158
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2159
    target_ulong next_eip;
2160

    
2161
    if ((new_cs & 0xfffc) == 0)
2162
        raise_exception_err(EXCP0D_GPF, 0);
2163
    if (load_segment(&e1, &e2, new_cs) != 0)
2164
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165
    cpl = env->hflags & HF_CPL_MASK;
2166
    if (e2 & DESC_S_MASK) {
2167
        if (!(e2 & DESC_CS_MASK))
2168
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2170
        if (e2 & DESC_C_MASK) {
2171
            /* conforming code segment */
2172
            if (dpl > cpl)
2173
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2174
        } else {
2175
            /* non conforming code segment */
2176
            rpl = new_cs & 3;
2177
            if (rpl > cpl)
2178
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2179
            if (dpl != cpl)
2180
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2181
        }
2182
        if (!(e2 & DESC_P_MASK))
2183
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2184
        limit = get_seg_limit(e1, e2);
2185
        if (new_eip > limit &&
2186
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2187
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2188
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2189
                       get_seg_base(e1, e2), limit, e2);
2190
        EIP = new_eip;
2191
    } else {
2192
        /* jump to call or task gate */
2193
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2194
        rpl = new_cs & 3;
2195
        cpl = env->hflags & HF_CPL_MASK;
2196
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2197
        switch(type) {
2198
        case 1: /* 286 TSS */
2199
        case 9: /* 386 TSS */
2200
        case 5: /* task gate */
2201
            if (dpl < cpl || dpl < rpl)
2202
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203
            next_eip = env->eip + next_eip_addend;
2204
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2205
            CC_OP = CC_OP_EFLAGS;
2206
            break;
2207
        case 4: /* 286 call gate */
2208
        case 12: /* 386 call gate */
2209
            if ((dpl < cpl) || (dpl < rpl))
2210
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211
            if (!(e2 & DESC_P_MASK))
2212
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2213
            gate_cs = e1 >> 16;
2214
            new_eip = (e1 & 0xffff);
2215
            if (type == 12)
2216
                new_eip |= (e2 & 0xffff0000);
2217
            if (load_segment(&e1, &e2, gate_cs) != 0)
2218
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2219
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2220
            /* must be code segment */
2221
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2222
                 (DESC_S_MASK | DESC_CS_MASK)))
2223
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2224
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2225
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2226
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2227
            if (!(e2 & DESC_P_MASK))
2228
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2229
            limit = get_seg_limit(e1, e2);
2230
            if (new_eip > limit)
2231
                raise_exception_err(EXCP0D_GPF, 0);
2232
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2233
                                   get_seg_base(e1, e2), limit, e2);
2234
            EIP = new_eip;
2235
            break;
2236
        default:
2237
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2238
            break;
2239
        }
2240
    }
2241
}
2242

    
2243
/* real mode call */
2244
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2245
                       int shift, int next_eip)
2246
{
2247
    int new_eip;
2248
    uint32_t esp, esp_mask;
2249
    target_ulong ssp;
2250

    
2251
    new_eip = new_eip1;
2252
    esp = ESP;
2253
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2254
    ssp = env->segs[R_SS].base;
2255
    if (shift) {
2256
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2257
        PUSHL(ssp, esp, esp_mask, next_eip);
2258
    } else {
2259
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2260
        PUSHW(ssp, esp, esp_mask, next_eip);
2261
    }
2262

    
2263
    SET_ESP(esp, esp_mask);
2264
    env->eip = new_eip;
2265
    env->segs[R_CS].selector = new_cs;
2266
    env->segs[R_CS].base = (new_cs << 4);
2267
}
2268

    
2269
/* protected mode call */
2270
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2271
                            int shift, int next_eip_addend)
2272
{
2273
    int new_stack, i;
2274
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2275
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2276
    uint32_t val, limit, old_sp_mask;
2277
    target_ulong ssp, old_ssp, next_eip;
2278

    
2279
    next_eip = env->eip + next_eip_addend;
2280
#ifdef DEBUG_PCALL
2281
    if (loglevel & CPU_LOG_PCALL) {
2282
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2283
                new_cs, (uint32_t)new_eip, shift);
2284
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2285
    }
2286
#endif
2287
    if ((new_cs & 0xfffc) == 0)
2288
        raise_exception_err(EXCP0D_GPF, 0);
2289
    if (load_segment(&e1, &e2, new_cs) != 0)
2290
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291
    cpl = env->hflags & HF_CPL_MASK;
2292
#ifdef DEBUG_PCALL
2293
    if (loglevel & CPU_LOG_PCALL) {
2294
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2295
    }
2296
#endif
2297
    if (e2 & DESC_S_MASK) {
2298
        if (!(e2 & DESC_CS_MASK))
2299
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2300
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2301
        if (e2 & DESC_C_MASK) {
2302
            /* conforming code segment */
2303
            if (dpl > cpl)
2304
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305
        } else {
2306
            /* non conforming code segment */
2307
            rpl = new_cs & 3;
2308
            if (rpl > cpl)
2309
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310
            if (dpl != cpl)
2311
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2312
        }
2313
        if (!(e2 & DESC_P_MASK))
2314
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2315

    
2316
#ifdef TARGET_X86_64
2317
        /* XXX: check 16/32 bit cases in long mode */
2318
        if (shift == 2) {
2319
            target_ulong rsp;
2320
            /* 64 bit case */
2321
            rsp = ESP;
2322
            PUSHQ(rsp, env->segs[R_CS].selector);
2323
            PUSHQ(rsp, next_eip);
2324
            /* from this point, not restartable */
2325
            ESP = rsp;
2326
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2327
                                   get_seg_base(e1, e2),
2328
                                   get_seg_limit(e1, e2), e2);
2329
            EIP = new_eip;
2330
        } else
2331
#endif
2332
        {
2333
            sp = ESP;
2334
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2335
            ssp = env->segs[R_SS].base;
2336
            if (shift) {
2337
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2338
                PUSHL(ssp, sp, sp_mask, next_eip);
2339
            } else {
2340
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2341
                PUSHW(ssp, sp, sp_mask, next_eip);
2342
            }
2343

    
2344
            limit = get_seg_limit(e1, e2);
2345
            if (new_eip > limit)
2346
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2347
            /* from this point, not restartable */
2348
            SET_ESP(sp, sp_mask);
2349
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2350
                                   get_seg_base(e1, e2), limit, e2);
2351
            EIP = new_eip;
2352
        }
2353
    } else {
2354
        /* check gate type */
2355
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2356
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2357
        rpl = new_cs & 3;
2358
        switch(type) {
2359
        case 1: /* available 286 TSS */
2360
        case 9: /* available 386 TSS */
2361
        case 5: /* task gate */
2362
            if (dpl < cpl || dpl < rpl)
2363
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2364
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2365
            CC_OP = CC_OP_EFLAGS;
2366
            return;
2367
        case 4: /* 286 call gate */
2368
        case 12: /* 386 call gate */
2369
            break;
2370
        default:
2371
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2372
            break;
2373
        }
2374
        shift = type >> 3;
2375

    
2376
        if (dpl < cpl || dpl < rpl)
2377
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378
        /* check valid bit */
2379
        if (!(e2 & DESC_P_MASK))
2380
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2381
        selector = e1 >> 16;
2382
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2383
        param_count = e2 & 0x1f;
2384
        if ((selector & 0xfffc) == 0)
2385
            raise_exception_err(EXCP0D_GPF, 0);
2386

    
2387
        if (load_segment(&e1, &e2, selector) != 0)
2388
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2389
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2390
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2391
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2392
        if (dpl > cpl)
2393
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2394
        if (!(e2 & DESC_P_MASK))
2395
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2396

    
2397
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2398
            /* to inner privilege */
2399
            get_ss_esp_from_tss(&ss, &sp, dpl);
2400
#ifdef DEBUG_PCALL
2401
            if (loglevel & CPU_LOG_PCALL)
2402
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2403
                        ss, sp, param_count, ESP);
2404
#endif
2405
            if ((ss & 0xfffc) == 0)
2406
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2407
            if ((ss & 3) != dpl)
2408
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2409
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2410
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2411
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2412
            if (ss_dpl != dpl)
2413
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2414
            if (!(ss_e2 & DESC_S_MASK) ||
2415
                (ss_e2 & DESC_CS_MASK) ||
2416
                !(ss_e2 & DESC_W_MASK))
2417
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2418
            if (!(ss_e2 & DESC_P_MASK))
2419
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2420

    
2421
            //            push_size = ((param_count * 2) + 8) << shift;
2422

    
2423
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2424
            old_ssp = env->segs[R_SS].base;
2425

    
2426
            sp_mask = get_sp_mask(ss_e2);
2427
            ssp = get_seg_base(ss_e1, ss_e2);
2428
            if (shift) {
2429
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2430
                PUSHL(ssp, sp, sp_mask, ESP);
2431
                for(i = param_count - 1; i >= 0; i--) {
2432
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2433
                    PUSHL(ssp, sp, sp_mask, val);
2434
                }
2435
            } else {
2436
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2437
                PUSHW(ssp, sp, sp_mask, ESP);
2438
                for(i = param_count - 1; i >= 0; i--) {
2439
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2440
                    PUSHW(ssp, sp, sp_mask, val);
2441
                }
2442
            }
2443
            new_stack = 1;
2444
        } else {
2445
            /* to same privilege */
2446
            sp = ESP;
2447
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2448
            ssp = env->segs[R_SS].base;
2449
            //            push_size = (4 << shift);
2450
            new_stack = 0;
2451
        }
2452

    
2453
        if (shift) {
2454
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2455
            PUSHL(ssp, sp, sp_mask, next_eip);
2456
        } else {
2457
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2458
            PUSHW(ssp, sp, sp_mask, next_eip);
2459
        }
2460

    
2461
        /* from this point, not restartable */
2462

    
2463
        if (new_stack) {
2464
            ss = (ss & ~3) | dpl;
2465
            cpu_x86_load_seg_cache(env, R_SS, ss,
2466
                                   ssp,
2467
                                   get_seg_limit(ss_e1, ss_e2),
2468
                                   ss_e2);
2469
        }
2470

    
2471
        selector = (selector & ~3) | dpl;
2472
        cpu_x86_load_seg_cache(env, R_CS, selector,
2473
                       get_seg_base(e1, e2),
2474
                       get_seg_limit(e1, e2),
2475
                       e2);
2476
        cpu_x86_set_cpl(env, dpl);
2477
        SET_ESP(sp, sp_mask);
2478
        EIP = offset;
2479
    }
2480
#ifdef USE_KQEMU
2481
    if (kqemu_is_ok(env)) {
2482
        env->exception_index = -1;
2483
        cpu_loop_exit();
2484
    }
2485
#endif
2486
}
2487

    
2488
/* real and vm86 mode iret */
2489
void helper_iret_real(int shift)
2490
{
2491
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2492
    target_ulong ssp;
2493
    int eflags_mask;
2494

    
2495
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2496
    sp = ESP;
2497
    ssp = env->segs[R_SS].base;
2498
    if (shift == 1) {
2499
        /* 32 bits */
2500
        POPL(ssp, sp, sp_mask, new_eip);
2501
        POPL(ssp, sp, sp_mask, new_cs);
2502
        new_cs &= 0xffff;
2503
        POPL(ssp, sp, sp_mask, new_eflags);
2504
    } else {
2505
        /* 16 bits */
2506
        POPW(ssp, sp, sp_mask, new_eip);
2507
        POPW(ssp, sp, sp_mask, new_cs);
2508
        POPW(ssp, sp, sp_mask, new_eflags);
2509
    }
2510
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2511
    env->segs[R_CS].selector = new_cs;
2512
    env->segs[R_CS].base = (new_cs << 4);
2513
    env->eip = new_eip;
2514
    if (env->eflags & VM_MASK)
2515
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2516
    else
2517
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2518
    if (shift == 0)
2519
        eflags_mask &= 0xffff;
2520
    load_eflags(new_eflags, eflags_mask);
2521
    env->hflags2 &= ~HF2_NMI_MASK;
2522
}
2523

    
2524
static inline void validate_seg(int seg_reg, int cpl)
2525
{
2526
    int dpl;
2527
    uint32_t e2;
2528

    
2529
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2530
       they may still contain a valid base. I would be interested to
2531
       know how a real x86_64 CPU behaves */
2532
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2533
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2534
        return;
2535

    
2536
    e2 = env->segs[seg_reg].flags;
2537
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2538
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2539
        /* data or non conforming code segment */
2540
        if (dpl < cpl) {
2541
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2542
        }
2543
    }
2544
}
2545

    
2546
/* protected mode iret */
2547
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2548
{
2549
    uint32_t new_cs, new_eflags, new_ss;
2550
    uint32_t new_es, new_ds, new_fs, new_gs;
2551
    uint32_t e1, e2, ss_e1, ss_e2;
2552
    int cpl, dpl, rpl, eflags_mask, iopl;
2553
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2554

    
2555
#ifdef TARGET_X86_64
2556
    if (shift == 2)
2557
        sp_mask = -1;
2558
    else
2559
#endif
2560
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2561
    sp = ESP;
2562
    ssp = env->segs[R_SS].base;
2563
    new_eflags = 0; /* avoid warning */
2564
#ifdef TARGET_X86_64
2565
    if (shift == 2) {
2566
        POPQ(sp, new_eip);
2567
        POPQ(sp, new_cs);
2568
        new_cs &= 0xffff;
2569
        if (is_iret) {
2570
            POPQ(sp, new_eflags);
2571
        }
2572
    } else
2573
#endif
2574
    if (shift == 1) {
2575
        /* 32 bits */
2576
        POPL(ssp, sp, sp_mask, new_eip);
2577
        POPL(ssp, sp, sp_mask, new_cs);
2578
        new_cs &= 0xffff;
2579
        if (is_iret) {
2580
            POPL(ssp, sp, sp_mask, new_eflags);
2581
            if (new_eflags & VM_MASK)
2582
                goto return_to_vm86;
2583
        }
2584
    } else {
2585
        /* 16 bits */
2586
        POPW(ssp, sp, sp_mask, new_eip);
2587
        POPW(ssp, sp, sp_mask, new_cs);
2588
        if (is_iret)
2589
            POPW(ssp, sp, sp_mask, new_eflags);
2590
    }
2591
#ifdef DEBUG_PCALL
2592
    if (loglevel & CPU_LOG_PCALL) {
2593
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2594
                new_cs, new_eip, shift, addend);
2595
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2596
    }
2597
#endif
2598
    if ((new_cs & 0xfffc) == 0)
2599
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2600
    if (load_segment(&e1, &e2, new_cs) != 0)
2601
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2602
    if (!(e2 & DESC_S_MASK) ||
2603
        !(e2 & DESC_CS_MASK))
2604
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2605
    cpl = env->hflags & HF_CPL_MASK;
2606
    rpl = new_cs & 3;
2607
    if (rpl < cpl)
2608
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2609
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2610
    if (e2 & DESC_C_MASK) {
2611
        if (dpl > rpl)
2612
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613
    } else {
2614
        if (dpl != rpl)
2615
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2616
    }
2617
    if (!(e2 & DESC_P_MASK))
2618
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2619

    
2620
    sp += addend;
2621
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2622
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2623
        /* return to same privilege level */
2624
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2625
                       get_seg_base(e1, e2),
2626
                       get_seg_limit(e1, e2),
2627
                       e2);
2628
    } else {
2629
        /* return to different privilege level */
2630
#ifdef TARGET_X86_64
2631
        if (shift == 2) {
2632
            POPQ(sp, new_esp);
2633
            POPQ(sp, new_ss);
2634
            new_ss &= 0xffff;
2635
        } else
2636
#endif
2637
        if (shift == 1) {
2638
            /* 32 bits */
2639
            POPL(ssp, sp, sp_mask, new_esp);
2640
            POPL(ssp, sp, sp_mask, new_ss);
2641
            new_ss &= 0xffff;
2642
        } else {
2643
            /* 16 bits */
2644
            POPW(ssp, sp, sp_mask, new_esp);
2645
            POPW(ssp, sp, sp_mask, new_ss);
2646
        }
2647
#ifdef DEBUG_PCALL
2648
        if (loglevel & CPU_LOG_PCALL) {
2649
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2650
                    new_ss, new_esp);
2651
        }
2652
#endif
2653
        if ((new_ss & 0xfffc) == 0) {
2654
#ifdef TARGET_X86_64
2655
            /* NULL ss is allowed in long mode if cpl != 3*/
2656
            /* XXX: test CS64 ? */
2657
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2658
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2659
                                       0, 0xffffffff,
2660
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2661
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2662
                                       DESC_W_MASK | DESC_A_MASK);
2663
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2664
            } else
2665
#endif
2666
            {
2667
                raise_exception_err(EXCP0D_GPF, 0);
2668
            }
2669
        } else {
2670
            if ((new_ss & 3) != rpl)
2671
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2672
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2673
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2674
            if (!(ss_e2 & DESC_S_MASK) ||
2675
                (ss_e2 & DESC_CS_MASK) ||
2676
                !(ss_e2 & DESC_W_MASK))
2677
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2678
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2679
            if (dpl != rpl)
2680
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2681
            if (!(ss_e2 & DESC_P_MASK))
2682
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2683
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2684
                                   get_seg_base(ss_e1, ss_e2),
2685
                                   get_seg_limit(ss_e1, ss_e2),
2686
                                   ss_e2);
2687
        }
2688

    
2689
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2690
                       get_seg_base(e1, e2),
2691
                       get_seg_limit(e1, e2),
2692
                       e2);
2693
        cpu_x86_set_cpl(env, rpl);
2694
        sp = new_esp;
2695
#ifdef TARGET_X86_64
2696
        if (env->hflags & HF_CS64_MASK)
2697
            sp_mask = -1;
2698
        else
2699
#endif
2700
            sp_mask = get_sp_mask(ss_e2);
2701

    
2702
        /* validate data segments */
2703
        validate_seg(R_ES, rpl);
2704
        validate_seg(R_DS, rpl);
2705
        validate_seg(R_FS, rpl);
2706
        validate_seg(R_GS, rpl);
2707

    
2708
        sp += addend;
2709
    }
2710
    SET_ESP(sp, sp_mask);
2711
    env->eip = new_eip;
2712
    if (is_iret) {
2713
        /* NOTE: 'cpl' is the _old_ CPL */
2714
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2715
        if (cpl == 0)
2716
            eflags_mask |= IOPL_MASK;
2717
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2718
        if (cpl <= iopl)
2719
            eflags_mask |= IF_MASK;
2720
        if (shift == 0)
2721
            eflags_mask &= 0xffff;
2722
        load_eflags(new_eflags, eflags_mask);
2723
    }
2724
    return;
2725

    
2726
 return_to_vm86:
2727
    POPL(ssp, sp, sp_mask, new_esp);
2728
    POPL(ssp, sp, sp_mask, new_ss);
2729
    POPL(ssp, sp, sp_mask, new_es);
2730
    POPL(ssp, sp, sp_mask, new_ds);
2731
    POPL(ssp, sp, sp_mask, new_fs);
2732
    POPL(ssp, sp, sp_mask, new_gs);
2733

    
2734
    /* modify processor state */
2735
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2736
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2737
    load_seg_vm(R_CS, new_cs & 0xffff);
2738
    cpu_x86_set_cpl(env, 3);
2739
    load_seg_vm(R_SS, new_ss & 0xffff);
2740
    load_seg_vm(R_ES, new_es & 0xffff);
2741
    load_seg_vm(R_DS, new_ds & 0xffff);
2742
    load_seg_vm(R_FS, new_fs & 0xffff);
2743
    load_seg_vm(R_GS, new_gs & 0xffff);
2744

    
2745
    env->eip = new_eip & 0xffff;
2746
    ESP = new_esp;
2747
}
2748

    
2749
void helper_iret_protected(int shift, int next_eip)
2750
{
2751
    int tss_selector, type;
2752
    uint32_t e1, e2;
2753

    
2754
    /* specific case for TSS */
2755
    if (env->eflags & NT_MASK) {
2756
#ifdef TARGET_X86_64
2757
        if (env->hflags & HF_LMA_MASK)
2758
            raise_exception_err(EXCP0D_GPF, 0);
2759
#endif
2760
        tss_selector = lduw_kernel(env->tr.base + 0);
2761
        if (tss_selector & 4)
2762
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2763
        if (load_segment(&e1, &e2, tss_selector) != 0)
2764
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2765
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2766
        /* NOTE: we check both segment and busy TSS */
2767
        if (type != 3)
2768
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2769
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2770
    } else {
2771
        helper_ret_protected(shift, 1, 0);
2772
    }
2773
    env->hflags2 &= ~HF2_NMI_MASK;
2774
#ifdef USE_KQEMU
2775
    if (kqemu_is_ok(env)) {
2776
        CC_OP = CC_OP_EFLAGS;
2777
        env->exception_index = -1;
2778
        cpu_loop_exit();
2779
    }
2780
#endif
2781
}
2782

    
2783
void helper_lret_protected(int shift, int addend)
2784
{
2785
    helper_ret_protected(shift, 0, addend);
2786
#ifdef USE_KQEMU
2787
    if (kqemu_is_ok(env)) {
2788
        env->exception_index = -1;
2789
        cpu_loop_exit();
2790
    }
2791
#endif
2792
}
2793

    
2794
void helper_sysenter(void)
2795
{
2796
    if (env->sysenter_cs == 0) {
2797
        raise_exception_err(EXCP0D_GPF, 0);
2798
    }
2799
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2800
    cpu_x86_set_cpl(env, 0);
2801

    
2802
#ifdef TARGET_X86_64
2803
    if (env->hflags & HF_LMA_MASK) {
2804
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2805
                               0, 0xffffffff,
2806
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2807
                               DESC_S_MASK |
2808
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2809
    } else
2810
#endif
2811
    {
2812
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2813
                               0, 0xffffffff,
2814
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815
                               DESC_S_MASK |
2816
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2817
    }
2818
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2819
                           0, 0xffffffff,
2820
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2821
                           DESC_S_MASK |
2822
                           DESC_W_MASK | DESC_A_MASK);
2823
    ESP = env->sysenter_esp;
2824
    EIP = env->sysenter_eip;
2825
}
2826

    
2827
void helper_sysexit(int dflag)
2828
{
2829
    int cpl;
2830

    
2831
    cpl = env->hflags & HF_CPL_MASK;
2832
    if (env->sysenter_cs == 0 || cpl != 0) {
2833
        raise_exception_err(EXCP0D_GPF, 0);
2834
    }
2835
    cpu_x86_set_cpl(env, 3);
2836
#ifdef TARGET_X86_64
2837
    if (dflag == 2) {
2838
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2839
                               0, 0xffffffff,
2840
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2841
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2842
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2843
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2844
                               0, 0xffffffff,
2845
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847
                               DESC_W_MASK | DESC_A_MASK);
2848
    } else
2849
#endif
2850
    {
2851
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2852
                               0, 0xffffffff,
2853
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2854
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2855
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2856
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2857
                               0, 0xffffffff,
2858
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2860
                               DESC_W_MASK | DESC_A_MASK);
2861
    }
2862
    ESP = ECX;
2863
    EIP = EDX;
2864
#ifdef USE_KQEMU
2865
    if (kqemu_is_ok(env)) {
2866
        env->exception_index = -1;
2867
        cpu_loop_exit();
2868
    }
2869
#endif
2870
}
2871

    
2872
#if defined(CONFIG_USER_ONLY)
2873
target_ulong helper_read_crN(int reg)
2874
{
2875
    return 0;
2876
}
2877

    
2878
void helper_write_crN(int reg, target_ulong t0)
2879
{
2880
}
2881

    
2882
void helper_movl_drN_T0(int reg, target_ulong t0)
2883
{
2884
}
2885
#else
2886
target_ulong helper_read_crN(int reg)
2887
{
2888
    target_ulong val;
2889

    
2890
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2891
    switch(reg) {
2892
    default:
2893
        val = env->cr[reg];
2894
        break;
2895
    case 8:
2896
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2897
            val = cpu_get_apic_tpr(env);
2898
        } else {
2899
            val = env->v_tpr;
2900
        }
2901
        break;
2902
    }
2903
    return val;
2904
}
2905

    
2906
void helper_write_crN(int reg, target_ulong t0)
2907
{
2908
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2909
    switch(reg) {
2910
    case 0:
2911
        cpu_x86_update_cr0(env, t0);
2912
        break;
2913
    case 3:
2914
        cpu_x86_update_cr3(env, t0);
2915
        break;
2916
    case 4:
2917
        cpu_x86_update_cr4(env, t0);
2918
        break;
2919
    case 8:
2920
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2921
            cpu_set_apic_tpr(env, t0);
2922
        }
2923
        env->v_tpr = t0 & 0x0f;
2924
        break;
2925
    default:
2926
        env->cr[reg] = t0;
2927
        break;
2928
    }
2929
}
2930

    
2931
void helper_movl_drN_T0(int reg, target_ulong t0)
2932
{
2933
    int i;
2934

    
2935
    if (reg < 4) {
2936
        hw_breakpoint_remove(env, reg);
2937
        env->dr[reg] = t0;
2938
        hw_breakpoint_insert(env, reg);
2939
    } else if (reg == 7) {
2940
        for (i = 0; i < 4; i++)
2941
            hw_breakpoint_remove(env, i);
2942
        env->dr[7] = t0;
2943
        for (i = 0; i < 4; i++)
2944
            hw_breakpoint_insert(env, i);
2945
    } else
2946
        env->dr[reg] = t0;
2947
}
2948
#endif
2949

    
2950
void helper_lmsw(target_ulong t0)
2951
{
2952
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2953
       if already set to one. */
2954
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2955
    helper_write_crN(0, t0);
2956
}
2957

    
2958
void helper_clts(void)
2959
{
2960
    env->cr[0] &= ~CR0_TS_MASK;
2961
    env->hflags &= ~HF_TS_MASK;
2962
}
2963

    
2964
void helper_invlpg(target_ulong addr)
2965
{
2966
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2967
    tlb_flush_page(env, addr);
2968
}
2969

    
2970
void helper_rdtsc(void)
2971
{
2972
    uint64_t val;
2973

    
2974
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2975
        raise_exception(EXCP0D_GPF);
2976
    }
2977
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2978

    
2979
    val = cpu_get_tsc(env) + env->tsc_offset;
2980
    EAX = (uint32_t)(val);
2981
    EDX = (uint32_t)(val >> 32);
2982
}
2983

    
2984
void helper_rdpmc(void)
2985
{
2986
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2987
        raise_exception(EXCP0D_GPF);
2988
    }
2989
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2990
    
2991
    /* currently unimplemented */
2992
    raise_exception_err(EXCP06_ILLOP, 0);
2993
}
2994

    
2995
#if defined(CONFIG_USER_ONLY)
2996
void helper_wrmsr(void)
2997
{
2998
}
2999

    
3000
void helper_rdmsr(void)
3001
{
3002
}
3003
#else
3004
void helper_wrmsr(void)
3005
{
3006
    uint64_t val;
3007

    
3008
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3009

    
3010
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3011

    
3012
    switch((uint32_t)ECX) {
3013
    case MSR_IA32_SYSENTER_CS:
3014
        env->sysenter_cs = val & 0xffff;
3015
        break;
3016
    case MSR_IA32_SYSENTER_ESP:
3017
        env->sysenter_esp = val;
3018
        break;
3019
    case MSR_IA32_SYSENTER_EIP:
3020
        env->sysenter_eip = val;
3021
        break;
3022
    case MSR_IA32_APICBASE:
3023
        cpu_set_apic_base(env, val);
3024
        break;
3025
    case MSR_EFER:
3026
        {
3027
            uint64_t update_mask;
3028
            update_mask = 0;
3029
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3030
                update_mask |= MSR_EFER_SCE;
3031
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3032
                update_mask |= MSR_EFER_LME;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3034
                update_mask |= MSR_EFER_FFXSR;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3036
                update_mask |= MSR_EFER_NXE;
3037
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3038
                update_mask |= MSR_EFER_SVME;
3039
            cpu_load_efer(env, (env->efer & ~update_mask) |
3040
                          (val & update_mask));
3041
        }
3042
        break;
3043
    case MSR_STAR:
3044
        env->star = val;
3045
        break;
3046
    case MSR_PAT:
3047
        env->pat = val;
3048
        break;
3049
    case MSR_VM_HSAVE_PA:
3050
        env->vm_hsave = val;
3051
        break;
3052
#ifdef TARGET_X86_64
3053
    case MSR_LSTAR:
3054
        env->lstar = val;
3055
        break;
3056
    case MSR_CSTAR:
3057
        env->cstar = val;
3058
        break;
3059
    case MSR_FMASK:
3060
        env->fmask = val;
3061
        break;
3062
    case MSR_FSBASE:
3063
        env->segs[R_FS].base = val;
3064
        break;
3065
    case MSR_GSBASE:
3066
        env->segs[R_GS].base = val;
3067
        break;
3068
    case MSR_KERNELGSBASE:
3069
        env->kernelgsbase = val;
3070
        break;
3071
#endif
3072
    default:
3073
        /* XXX: exception ? */
3074
        break;
3075
    }
3076
}
3077

    
3078
void helper_rdmsr(void)
3079
{
3080
    uint64_t val;
3081

    
3082
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3083

    
3084
    switch((uint32_t)ECX) {
3085
    case MSR_IA32_SYSENTER_CS:
3086
        val = env->sysenter_cs;
3087
        break;
3088
    case MSR_IA32_SYSENTER_ESP:
3089
        val = env->sysenter_esp;
3090
        break;
3091
    case MSR_IA32_SYSENTER_EIP:
3092
        val = env->sysenter_eip;
3093
        break;
3094
    case MSR_IA32_APICBASE:
3095
        val = cpu_get_apic_base(env);
3096
        break;
3097
    case MSR_EFER:
3098
        val = env->efer;
3099
        break;
3100
    case MSR_STAR:
3101
        val = env->star;
3102
        break;
3103
    case MSR_PAT:
3104
        val = env->pat;
3105
        break;
3106
    case MSR_VM_HSAVE_PA:
3107
        val = env->vm_hsave;
3108
        break;
3109
    case MSR_IA32_PERF_STATUS:
3110
        /* tsc_increment_by_tick */
3111
        val = 1000ULL;
3112
        /* CPU multiplier */
3113
        val |= (((uint64_t)4ULL) << 40);
3114
        break;
3115
#ifdef TARGET_X86_64
3116
    case MSR_LSTAR:
3117
        val = env->lstar;
3118
        break;
3119
    case MSR_CSTAR:
3120
        val = env->cstar;
3121
        break;
3122
    case MSR_FMASK:
3123
        val = env->fmask;
3124
        break;
3125
    case MSR_FSBASE:
3126
        val = env->segs[R_FS].base;
3127
        break;
3128
    case MSR_GSBASE:
3129
        val = env->segs[R_GS].base;
3130
        break;
3131
    case MSR_KERNELGSBASE:
3132
        val = env->kernelgsbase;
3133
        break;
3134
#endif
3135
#ifdef USE_KQEMU
3136
    case MSR_QPI_COMMBASE:
3137
        if (env->kqemu_enabled) {
3138
            val = kqemu_comm_base;
3139
        } else {
3140
            val = 0;
3141
        }
3142
        break;
3143
#endif
3144
    default:
3145
        /* XXX: exception ? */
3146
        val = 0;
3147
        break;
3148
    }
3149
    EAX = (uint32_t)(val);
3150
    EDX = (uint32_t)(val >> 32);
3151
}
3152
#endif
3153

    
3154
target_ulong helper_lsl(target_ulong selector1)
3155
{
3156
    unsigned int limit;
3157
    uint32_t e1, e2, eflags, selector;
3158
    int rpl, dpl, cpl, type;
3159

    
3160
    selector = selector1 & 0xffff;
3161
    eflags = helper_cc_compute_all(CC_OP);
3162
    if (load_segment(&e1, &e2, selector) != 0)
3163
        goto fail;
3164
    rpl = selector & 3;
3165
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3166
    cpl = env->hflags & HF_CPL_MASK;
3167
    if (e2 & DESC_S_MASK) {
3168
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3169
            /* conforming */
3170
        } else {
3171
            if (dpl < cpl || dpl < rpl)
3172
                goto fail;
3173
        }
3174
    } else {
3175
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3176
        switch(type) {
3177
        case 1:
3178
        case 2:
3179
        case 3:
3180
        case 9:
3181
        case 11:
3182
            break;
3183
        default:
3184
            goto fail;
3185
        }
3186
        if (dpl < cpl || dpl < rpl) {
3187
        fail:
3188
            CC_SRC = eflags & ~CC_Z;
3189
            return 0;
3190
        }
3191
    }
3192
    limit = get_seg_limit(e1, e2);
3193
    CC_SRC = eflags | CC_Z;
3194
    return limit;
3195
}
3196

    
3197
target_ulong helper_lar(target_ulong selector1)
3198
{
3199
    uint32_t e1, e2, eflags, selector;
3200
    int rpl, dpl, cpl, type;
3201

    
3202
    selector = selector1 & 0xffff;
3203
    eflags = helper_cc_compute_all(CC_OP);
3204
    if ((selector & 0xfffc) == 0)
3205
        goto fail;
3206
    if (load_segment(&e1, &e2, selector) != 0)
3207
        goto fail;
3208
    rpl = selector & 3;
3209
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3210
    cpl = env->hflags & HF_CPL_MASK;
3211
    if (e2 & DESC_S_MASK) {
3212
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3213
            /* conforming */
3214
        } else {
3215
            if (dpl < cpl || dpl < rpl)
3216
                goto fail;
3217
        }
3218
    } else {
3219
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3220
        switch(type) {
3221
        case 1:
3222
        case 2:
3223
        case 3:
3224
        case 4:
3225
        case 5:
3226
        case 9:
3227
        case 11:
3228
        case 12:
3229
            break;
3230
        default:
3231
            goto fail;
3232
        }
3233
        if (dpl < cpl || dpl < rpl) {
3234
        fail:
3235
            CC_SRC = eflags & ~CC_Z;
3236
            return 0;
3237
        }
3238
    }
3239
    CC_SRC = eflags | CC_Z;
3240
    return e2 & 0x00f0ff00;
3241
}
3242

    
3243
void helper_verr(target_ulong selector1)
3244
{
3245
    uint32_t e1, e2, eflags, selector;
3246
    int rpl, dpl, cpl;
3247

    
3248
    selector = selector1 & 0xffff;
3249
    eflags = helper_cc_compute_all(CC_OP);
3250
    if ((selector & 0xfffc) == 0)
3251
        goto fail;
3252
    if (load_segment(&e1, &e2, selector) != 0)
3253
        goto fail;
3254
    if (!(e2 & DESC_S_MASK))
3255
        goto fail;
3256
    rpl = selector & 3;
3257
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3258
    cpl = env->hflags & HF_CPL_MASK;
3259
    if (e2 & DESC_CS_MASK) {
3260
        if (!(e2 & DESC_R_MASK))
3261
            goto fail;
3262
        if (!(e2 & DESC_C_MASK)) {
3263
            if (dpl < cpl || dpl < rpl)
3264
                goto fail;
3265
        }
3266
    } else {
3267
        if (dpl < cpl || dpl < rpl) {
3268
        fail:
3269
            CC_SRC = eflags & ~CC_Z;
3270
            return;
3271
        }
3272
    }
3273
    CC_SRC = eflags | CC_Z;
3274
}
3275

    
3276
void helper_verw(target_ulong selector1)
3277
{
3278
    uint32_t e1, e2, eflags, selector;
3279
    int rpl, dpl, cpl;
3280

    
3281
    selector = selector1 & 0xffff;
3282
    eflags = helper_cc_compute_all(CC_OP);
3283
    if ((selector & 0xfffc) == 0)
3284
        goto fail;
3285
    if (load_segment(&e1, &e2, selector) != 0)
3286
        goto fail;
3287
    if (!(e2 & DESC_S_MASK))
3288
        goto fail;
3289
    rpl = selector & 3;
3290
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3291
    cpl = env->hflags & HF_CPL_MASK;
3292
    if (e2 & DESC_CS_MASK) {
3293
        goto fail;
3294
    } else {
3295
        if (dpl < cpl || dpl < rpl)
3296
            goto fail;
3297
        if (!(e2 & DESC_W_MASK)) {
3298
        fail:
3299
            CC_SRC = eflags & ~CC_Z;
3300
            return;
3301
        }
3302
    }
3303
    CC_SRC = eflags | CC_Z;
3304
}
3305

    
3306
/* x87 FPU helpers */
3307

    
3308
static void fpu_set_exception(int mask)
3309
{
3310
    env->fpus |= mask;
3311
    if (env->fpus & (~env->fpuc & FPUC_EM))
3312
        env->fpus |= FPUS_SE | FPUS_B;
3313
}
3314

    
3315
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3316
{
3317
    if (b == 0.0)
3318
        fpu_set_exception(FPUS_ZE);
3319
    return a / b;
3320
}
3321

    
3322
void fpu_raise_exception(void)
3323
{
3324
    if (env->cr[0] & CR0_NE_MASK) {
3325
        raise_exception(EXCP10_COPR);
3326
    }
3327
#if !defined(CONFIG_USER_ONLY)
3328
    else {
3329
        cpu_set_ferr(env);
3330
    }
3331
#endif
3332
}
3333

    
3334
void helper_flds_FT0(uint32_t val)
3335
{
3336
    union {
3337
        float32 f;
3338
        uint32_t i;
3339
    } u;
3340
    u.i = val;
3341
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3342
}
3343

    
3344
void helper_fldl_FT0(uint64_t val)
3345
{
3346
    union {
3347
        float64 f;
3348
        uint64_t i;
3349
    } u;
3350
    u.i = val;
3351
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3352
}
3353

    
3354
void helper_fildl_FT0(int32_t val)
3355
{
3356
    FT0 = int32_to_floatx(val, &env->fp_status);
3357
}
3358

    
3359
void helper_flds_ST0(uint32_t val)
3360
{
3361
    int new_fpstt;
3362
    union {
3363
        float32 f;
3364
        uint32_t i;
3365
    } u;
3366
    new_fpstt = (env->fpstt - 1) & 7;
3367
    u.i = val;
3368
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3369
    env->fpstt = new_fpstt;
3370
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3371
}
3372

    
3373
void helper_fldl_ST0(uint64_t val)
3374
{
3375
    int new_fpstt;
3376
    union {
3377
        float64 f;
3378
        uint64_t i;
3379
    } u;
3380
    new_fpstt = (env->fpstt - 1) & 7;
3381
    u.i = val;
3382
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3383
    env->fpstt = new_fpstt;
3384
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3385
}
3386

    
3387
void helper_fildl_ST0(int32_t val)
3388
{
3389
    int new_fpstt;
3390
    new_fpstt = (env->fpstt - 1) & 7;
3391
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3392
    env->fpstt = new_fpstt;
3393
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3394
}
3395

    
3396
void helper_fildll_ST0(int64_t val)
3397
{
3398
    int new_fpstt;
3399
    new_fpstt = (env->fpstt - 1) & 7;
3400
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3401
    env->fpstt = new_fpstt;
3402
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3403
}
3404

    
3405
uint32_t helper_fsts_ST0(void)
3406
{
3407
    union {
3408
        float32 f;
3409
        uint32_t i;
3410
    } u;
3411
    u.f = floatx_to_float32(ST0, &env->fp_status);
3412
    return u.i;
3413
}
3414

    
3415
uint64_t helper_fstl_ST0(void)
3416
{
3417
    union {
3418
        float64 f;
3419
        uint64_t i;
3420
    } u;
3421
    u.f = floatx_to_float64(ST0, &env->fp_status);
3422
    return u.i;
3423
}
3424

    
3425
int32_t helper_fist_ST0(void)
3426
{
3427
    int32_t val;
3428
    val = floatx_to_int32(ST0, &env->fp_status);
3429
    if (val != (int16_t)val)
3430
        val = -32768;
3431
    return val;
3432
}
3433

    
3434
int32_t helper_fistl_ST0(void)
3435
{
3436
    int32_t val;
3437
    val = floatx_to_int32(ST0, &env->fp_status);
3438
    return val;
3439
}
3440

    
3441
int64_t helper_fistll_ST0(void)
3442
{
3443
    int64_t val;
3444
    val = floatx_to_int64(ST0, &env->fp_status);
3445
    return val;
3446
}
3447

    
3448
int32_t helper_fistt_ST0(void)
3449
{
3450
    int32_t val;
3451
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3452
    if (val != (int16_t)val)
3453
        val = -32768;
3454
    return val;
3455
}
3456

    
3457
int32_t helper_fisttl_ST0(void)
3458
{
3459
    int32_t val;
3460
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3461
    return val;
3462
}
3463

    
3464
int64_t helper_fisttll_ST0(void)
3465
{
3466
    int64_t val;
3467
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3468
    return val;
3469
}
3470

    
3471
void helper_fldt_ST0(target_ulong ptr)
3472
{
3473
    int new_fpstt;
3474
    new_fpstt = (env->fpstt - 1) & 7;
3475
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3476
    env->fpstt = new_fpstt;
3477
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3478
}
3479

    
3480
void helper_fstt_ST0(target_ulong ptr)
3481
{
3482
    helper_fstt(ST0, ptr);
3483
}
3484

    
3485
void helper_fpush(void)
3486
{
3487
    fpush();
3488
}
3489

    
3490
void helper_fpop(void)
3491
{
3492
    fpop();
3493
}
3494

    
3495
void helper_fdecstp(void)
3496
{
3497
    env->fpstt = (env->fpstt - 1) & 7;
3498
    env->fpus &= (~0x4700);
3499
}
3500

    
3501
void helper_fincstp(void)
3502
{
3503
    env->fpstt = (env->fpstt + 1) & 7;
3504
    env->fpus &= (~0x4700);
3505
}
3506

    
3507
/* FPU move */
3508

    
3509
void helper_ffree_STN(int st_index)
3510
{
3511
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3512
}
3513

    
3514
void helper_fmov_ST0_FT0(void)
3515
{
3516
    ST0 = FT0;
3517
}
3518

    
3519
void helper_fmov_FT0_STN(int st_index)
3520
{
3521
    FT0 = ST(st_index);
3522
}
3523

    
3524
void helper_fmov_ST0_STN(int st_index)
3525
{
3526
    ST0 = ST(st_index);
3527
}
3528

    
3529
void helper_fmov_STN_ST0(int st_index)
3530
{
3531
    ST(st_index) = ST0;
3532
}
3533

    
3534
void helper_fxchg_ST0_STN(int st_index)
3535
{
3536
    CPU86_LDouble tmp;
3537
    tmp = ST(st_index);
3538
    ST(st_index) = ST0;
3539
    ST0 = tmp;
3540
}
3541

    
3542
/* FPU operations */
3543

    
3544
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3545

    
3546
void helper_fcom_ST0_FT0(void)
3547
{
3548
    int ret;
3549

    
3550
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3551
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3552
}
3553

    
3554
void helper_fucom_ST0_FT0(void)
3555
{
3556
    int ret;
3557

    
3558
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3559
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3560
}
3561

    
3562
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3563

    
3564
void helper_fcomi_ST0_FT0(void)
3565
{
3566
    int eflags;
3567
    int ret;
3568

    
3569
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3570
    eflags = helper_cc_compute_all(CC_OP);
3571
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3572
    CC_SRC = eflags;
3573
}
3574

    
3575
void helper_fucomi_ST0_FT0(void)
3576
{
3577
    int eflags;
3578
    int ret;
3579

    
3580
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3581
    eflags = helper_cc_compute_all(CC_OP);
3582
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3583
    CC_SRC = eflags;
3584
}
3585

    
3586
void helper_fadd_ST0_FT0(void)
3587
{
3588
    ST0 += FT0;
3589
}
3590

    
3591
void helper_fmul_ST0_FT0(void)
3592
{
3593
    ST0 *= FT0;
3594
}
3595

    
3596
void helper_fsub_ST0_FT0(void)
3597
{
3598
    ST0 -= FT0;
3599
}
3600

    
3601
void helper_fsubr_ST0_FT0(void)
3602
{
3603
    ST0 = FT0 - ST0;
3604
}
3605

    
3606
void helper_fdiv_ST0_FT0(void)
3607
{
3608
    ST0 = helper_fdiv(ST0, FT0);
3609
}
3610

    
3611
void helper_fdivr_ST0_FT0(void)
3612
{
3613
    ST0 = helper_fdiv(FT0, ST0);
3614
}
3615

    
3616
/* fp operations between STN and ST0 */
3617

    
3618
void helper_fadd_STN_ST0(int st_index)
3619
{
3620
    ST(st_index) += ST0;
3621
}
3622

    
3623
void helper_fmul_STN_ST0(int st_index)
3624
{
3625
    ST(st_index) *= ST0;
3626
}
3627

    
3628
void helper_fsub_STN_ST0(int st_index)
3629
{
3630
    ST(st_index) -= ST0;
3631
}
3632

    
3633
void helper_fsubr_STN_ST0(int st_index)
3634
{
3635
    CPU86_LDouble *p;
3636
    p = &ST(st_index);
3637
    *p = ST0 - *p;
3638
}
3639

    
3640
void helper_fdiv_STN_ST0(int st_index)
3641
{
3642
    CPU86_LDouble *p;
3643
    p = &ST(st_index);
3644
    *p = helper_fdiv(*p, ST0);
3645
}
3646

    
3647
void helper_fdivr_STN_ST0(int st_index)
3648
{
3649
    CPU86_LDouble *p;
3650
    p = &ST(st_index);
3651
    *p = helper_fdiv(ST0, *p);
3652
}
3653

    
3654
/* misc FPU operations */
3655
void helper_fchs_ST0(void)
3656
{
3657
    ST0 = floatx_chs(ST0);
3658
}
3659

    
3660
void helper_fabs_ST0(void)
3661
{
3662
    ST0 = floatx_abs(ST0);
3663
}
3664

    
3665
void helper_fld1_ST0(void)
3666
{
3667
    ST0 = f15rk[1];
3668
}
3669

    
3670
void helper_fldl2t_ST0(void)
3671
{
3672
    ST0 = f15rk[6];
3673
}
3674

    
3675
void helper_fldl2e_ST0(void)
3676
{
3677
    ST0 = f15rk[5];
3678
}
3679

    
3680
void helper_fldpi_ST0(void)
3681
{
3682
    ST0 = f15rk[2];
3683
}
3684

    
3685
void helper_fldlg2_ST0(void)
3686
{
3687
    ST0 = f15rk[3];
3688
}
3689

    
3690
void helper_fldln2_ST0(void)
3691
{
3692
    ST0 = f15rk[4];
3693
}
3694

    
3695
void helper_fldz_ST0(void)
3696
{
3697
    ST0 = f15rk[0];
3698
}
3699

    
3700
void helper_fldz_FT0(void)
3701
{
3702
    FT0 = f15rk[0];
3703
}
3704

    
3705
uint32_t helper_fnstsw(void)
3706
{
3707
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3708
}
3709

    
3710
uint32_t helper_fnstcw(void)
3711
{
3712
    return env->fpuc;
3713
}
3714

    
3715
static void update_fp_status(void)
3716
{
3717
    int rnd_type;
3718

    
3719
    /* set rounding mode */
3720
    switch(env->fpuc & RC_MASK) {
3721
    default:
3722
    case RC_NEAR:
3723
        rnd_type = float_round_nearest_even;
3724
        break;
3725
    case RC_DOWN:
3726
        rnd_type = float_round_down;
3727
        break;
3728
    case RC_UP:
3729
        rnd_type = float_round_up;
3730
        break;
3731
    case RC_CHOP:
3732
        rnd_type = float_round_to_zero;
3733
        break;
3734
    }
3735
    set_float_rounding_mode(rnd_type, &env->fp_status);
3736
#ifdef FLOATX80
3737
    switch((env->fpuc >> 8) & 3) {
3738
    case 0:
3739
        rnd_type = 32;
3740
        break;
3741
    case 2:
3742
        rnd_type = 64;
3743
        break;
3744
    case 3:
3745
    default:
3746
        rnd_type = 80;
3747
        break;
3748
    }
3749
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3750
#endif
3751
}
3752

    
3753
void helper_fldcw(uint32_t val)
3754
{
3755
    env->fpuc = val;
3756
    update_fp_status();
3757
}
3758

    
3759
void helper_fclex(void)
3760
{
3761
    env->fpus &= 0x7f00;
3762
}
3763

    
3764
void helper_fwait(void)
3765
{
3766
    if (env->fpus & FPUS_SE)
3767
        fpu_raise_exception();
3768
}
3769

    
3770
void helper_fninit(void)
3771
{
3772
    env->fpus = 0;
3773
    env->fpstt = 0;
3774
    env->fpuc = 0x37f;
3775
    env->fptags[0] = 1;
3776
    env->fptags[1] = 1;
3777
    env->fptags[2] = 1;
3778
    env->fptags[3] = 1;
3779
    env->fptags[4] = 1;
3780
    env->fptags[5] = 1;
3781
    env->fptags[6] = 1;
3782
    env->fptags[7] = 1;
3783
}
3784

    
3785
/* BCD ops */
3786

    
3787
void helper_fbld_ST0(target_ulong ptr)
3788
{
3789
    CPU86_LDouble tmp;
3790
    uint64_t val;
3791
    unsigned int v;
3792
    int i;
3793

    
3794
    val = 0;
3795
    for(i = 8; i >= 0; i--) {
3796
        v = ldub(ptr + i);
3797
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3798
    }
3799
    tmp = val;
3800
    if (ldub(ptr + 9) & 0x80)
3801
        tmp = -tmp;
3802
    fpush();
3803
    ST0 = tmp;
3804
}
3805

    
3806
void helper_fbst_ST0(target_ulong ptr)
3807
{
3808
    int v;
3809
    target_ulong mem_ref, mem_end;
3810
    int64_t val;
3811

    
3812
    val = floatx_to_int64(ST0, &env->fp_status);
3813
    mem_ref = ptr;
3814
    mem_end = mem_ref + 9;
3815
    if (val < 0) {
3816
        stb(mem_end, 0x80);
3817
        val = -val;
3818
    } else {
3819
        stb(mem_end, 0x00);
3820
    }
3821
    while (mem_ref < mem_end) {
3822
        if (val == 0)
3823
            break;
3824
        v = val % 100;
3825
        val = val / 100;
3826
        v = ((v / 10) << 4) | (v % 10);
3827
        stb(mem_ref++, v);
3828
    }
3829
    while (mem_ref < mem_end) {
3830
        stb(mem_ref++, 0);
3831
    }
3832
}
3833

    
3834
void helper_f2xm1(void)
3835
{
3836
    ST0 = pow(2.0,ST0) - 1.0;
3837
}
3838

    
3839
void helper_fyl2x(void)
3840
{
3841
    CPU86_LDouble fptemp;
3842

    
3843
    fptemp = ST0;
3844
    if (fptemp>0.0){
3845
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3846
        ST1 *= fptemp;
3847
        fpop();
3848
    } else {
3849
        env->fpus &= (~0x4700);
3850
        env->fpus |= 0x400;
3851
    }
3852
}
3853

    
3854
void helper_fptan(void)
3855
{
3856
    CPU86_LDouble fptemp;
3857

    
3858
    fptemp = ST0;
3859
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3860
        env->fpus |= 0x400;
3861
    } else {
3862
        ST0 = tan(fptemp);
3863
        fpush();
3864
        ST0 = 1.0;
3865
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3866
        /* the above code is for  |arg| < 2**52 only */
3867
    }
3868
}
3869

    
3870
void helper_fpatan(void)
3871
{
3872
    CPU86_LDouble fptemp, fpsrcop;
3873

    
3874
    fpsrcop = ST1;
3875
    fptemp = ST0;
3876
    ST1 = atan2(fpsrcop,fptemp);
3877
    fpop();
3878
}
3879

    
3880
void helper_fxtract(void)
3881
{
3882
    CPU86_LDoubleU temp;
3883
    unsigned int expdif;
3884

    
3885
    temp.d = ST0;
3886
    expdif = EXPD(temp) - EXPBIAS;
3887
    /*DP exponent bias*/
3888
    ST0 = expdif;
3889
    fpush();
3890
    BIASEXPONENT(temp);
3891
    ST0 = temp.d;
3892
}
3893

    
3894
void helper_fprem1(void)
3895
{
3896
    CPU86_LDouble dblq, fpsrcop, fptemp;
3897
    CPU86_LDoubleU fpsrcop1, fptemp1;
3898
    int expdif;
3899
    signed long long int q;
3900

    
3901
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3902
        ST0 = 0.0 / 0.0; /* NaN */
3903
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3904
        return;
3905
    }
3906

    
3907
    fpsrcop = ST0;
3908
    fptemp = ST1;
3909
    fpsrcop1.d = fpsrcop;
3910
    fptemp1.d = fptemp;
3911
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3912

    
3913
    if (expdif < 0) {
3914
        /* optimisation? taken from the AMD docs */
3915
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3916
        /* ST0 is unchanged */
3917
        return;
3918
    }
3919

    
3920
    if (expdif < 53) {
3921
        dblq = fpsrcop / fptemp;
3922
        /* round dblq towards nearest integer */
3923
        dblq = rint(dblq);
3924
        ST0 = fpsrcop - fptemp * dblq;
3925

    
3926
        /* convert dblq to q by truncating towards zero */
3927
        if (dblq < 0.0)
3928
           q = (signed long long int)(-dblq);
3929
        else
3930
           q = (signed long long int)dblq;
3931

    
3932
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3933
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3934
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3935
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3936
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3937
    } else {
3938
        env->fpus |= 0x400;  /* C2 <-- 1 */
3939
        fptemp = pow(2.0, expdif - 50);
3940
        fpsrcop = (ST0 / ST1) / fptemp;
3941
        /* fpsrcop = integer obtained by chopping */
3942
        fpsrcop = (fpsrcop < 0.0) ?
3943
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3944
        ST0 -= (ST1 * fpsrcop * fptemp);
3945
    }
3946
}
3947

    
3948
void helper_fprem(void)
3949
{
3950
    CPU86_LDouble dblq, fpsrcop, fptemp;
3951
    CPU86_LDoubleU fpsrcop1, fptemp1;
3952
    int expdif;
3953
    signed long long int q;
3954

    
3955
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3956
       ST0 = 0.0 / 0.0; /* NaN */
3957
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3958
       return;
3959
    }
3960

    
3961
    fpsrcop = (CPU86_LDouble)ST0;
3962
    fptemp = (CPU86_LDouble)ST1;
3963
    fpsrcop1.d = fpsrcop;
3964
    fptemp1.d = fptemp;
3965
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3966

    
3967
    if (expdif < 0) {
3968
        /* optimisation? taken from the AMD docs */
3969
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3970
        /* ST0 is unchanged */
3971
        return;
3972
    }
3973

    
3974
    if ( expdif < 53 ) {
3975
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3976
        /* round dblq towards zero */
3977
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3978
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3979

    
3980
        /* convert dblq to q by truncating towards zero */
3981
        if (dblq < 0.0)
3982
           q = (signed long long int)(-dblq);
3983
        else
3984
           q = (signed long long int)dblq;
3985

    
3986
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3987
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3988
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3989
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3990
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3991
    } else {
3992
        int N = 32 + (expdif % 32); /* as per AMD docs */
3993
        env->fpus |= 0x400;  /* C2 <-- 1 */
3994
        fptemp = pow(2.0, (double)(expdif - N));
3995
        fpsrcop = (ST0 / ST1) / fptemp;
3996
        /* fpsrcop = integer obtained by chopping */
3997
        fpsrcop = (fpsrcop < 0.0) ?
3998
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3999
        ST0 -= (ST1 * fpsrcop * fptemp);
4000
    }
4001
}
4002

    
4003
void helper_fyl2xp1(void)
4004
{
4005
    CPU86_LDouble fptemp;
4006

    
4007
    fptemp = ST0;
4008
    if ((fptemp+1.0)>0.0) {
4009
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4010
        ST1 *= fptemp;
4011
        fpop();
4012
    } else {
4013
        env->fpus &= (~0x4700);
4014
        env->fpus |= 0x400;
4015
    }
4016
}
4017

    
4018
void helper_fsqrt(void)
4019
{
4020
    CPU86_LDouble fptemp;
4021

    
4022
    fptemp = ST0;
4023
    if (fptemp<0.0) {
4024
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4025
        env->fpus |= 0x400;
4026
    }
4027
    ST0 = sqrt(fptemp);
4028
}
4029

    
4030
void helper_fsincos(void)
4031
{
4032
    CPU86_LDouble fptemp;
4033

    
4034
    fptemp = ST0;
4035
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4036
        env->fpus |= 0x400;
4037
    } else {
4038
        ST0 = sin(fptemp);
4039
        fpush();
4040
        ST0 = cos(fptemp);
4041
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4042
        /* the above code is for  |arg| < 2**63 only */
4043
    }
4044
}
4045

    
4046
void helper_frndint(void)
4047
{
4048
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4049
}
4050

    
4051
void helper_fscale(void)
4052
{
4053
    ST0 = ldexp (ST0, (int)(ST1));
4054
}
4055

    
4056
void helper_fsin(void)
4057
{
4058
    CPU86_LDouble fptemp;
4059

    
4060
    fptemp = ST0;
4061
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4062
        env->fpus |= 0x400;
4063
    } else {
4064
        ST0 = sin(fptemp);
4065
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4066
        /* the above code is for  |arg| < 2**53 only */
4067
    }
4068
}
4069

    
4070
void helper_fcos(void)
4071
{
4072
    CPU86_LDouble fptemp;
4073

    
4074
    fptemp = ST0;
4075
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4076
        env->fpus |= 0x400;
4077
    } else {
4078
        ST0 = cos(fptemp);
4079
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4080
        /* the above code is for  |arg5 < 2**63 only */
4081
    }
4082
}
4083

    
4084
void helper_fxam_ST0(void)
4085
{
4086
    CPU86_LDoubleU temp;
4087
    int expdif;
4088

    
4089
    temp.d = ST0;
4090

    
4091
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4092
    if (SIGND(temp))
4093
        env->fpus |= 0x200; /* C1 <-- 1 */
4094

    
4095
    /* XXX: test fptags too */
4096
    expdif = EXPD(temp);
4097
    if (expdif == MAXEXPD) {
4098
#ifdef USE_X86LDOUBLE
4099
        if (MANTD(temp) == 0x8000000000000000ULL)
4100
#else
4101
        if (MANTD(temp) == 0)
4102
#endif
4103
            env->fpus |=  0x500 /*Infinity*/;
4104
        else
4105
            env->fpus |=  0x100 /*NaN*/;
4106
    } else if (expdif == 0) {
4107
        if (MANTD(temp) == 0)
4108
            env->fpus |=  0x4000 /*Zero*/;
4109
        else
4110
            env->fpus |= 0x4400 /*Denormal*/;
4111
    } else {
4112
        env->fpus |= 0x400;
4113
    }
4114
}
4115

    
4116
void helper_fstenv(target_ulong ptr, int data32)
4117
{
4118
    int fpus, fptag, exp, i;
4119
    uint64_t mant;
4120
    CPU86_LDoubleU tmp;
4121

    
4122
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4123
    fptag = 0;
4124
    for (i=7; i>=0; i--) {
4125
        fptag <<= 2;
4126
        if (env->fptags[i]) {
4127
            fptag |= 3;
4128
        } else {
4129
            tmp.d = env->fpregs[i].d;
4130
            exp = EXPD(tmp);
4131
            mant = MANTD(tmp);
4132
            if (exp == 0 && mant == 0) {
4133
                /* zero */
4134
                fptag |= 1;
4135
            } else if (exp == 0 || exp == MAXEXPD
4136
#ifdef USE_X86LDOUBLE
4137
                       || (mant & (1LL << 63)) == 0
4138
#endif
4139
                       ) {
4140
                /* NaNs, infinity, denormal */
4141
                fptag |= 2;
4142
            }
4143
        }
4144
    }
4145
    if (data32) {
4146
        /* 32 bit */
4147
        stl(ptr, env->fpuc);
4148
        stl(ptr + 4, fpus);
4149
        stl(ptr + 8, fptag);
4150
        stl(ptr + 12, 0); /* fpip */
4151
        stl(ptr + 16, 0); /* fpcs */
4152
        stl(ptr + 20, 0); /* fpoo */
4153
        stl(ptr + 24, 0); /* fpos */
4154
    } else {
4155
        /* 16 bit */
4156
        stw(ptr, env->fpuc);
4157
        stw(ptr + 2, fpus);
4158
        stw(ptr + 4, fptag);
4159
        stw(ptr + 6, 0);
4160
        stw(ptr + 8, 0);
4161
        stw(ptr + 10, 0);
4162
        stw(ptr + 12, 0);
4163
    }
4164
}
4165

    
4166
void helper_fldenv(target_ulong ptr, int data32)
4167
{
4168
    int i, fpus, fptag;
4169

    
4170
    if (data32) {
4171
        env->fpuc = lduw(ptr);
4172
        fpus = lduw(ptr + 4);
4173
        fptag = lduw(ptr + 8);
4174
    }
4175
    else {
4176
        env->fpuc = lduw(ptr);
4177
        fpus = lduw(ptr + 2);
4178
        fptag = lduw(ptr + 4);
4179
    }
4180
    env->fpstt = (fpus >> 11) & 7;
4181
    env->fpus = fpus & ~0x3800;
4182
    for(i = 0;i < 8; i++) {
4183
        env->fptags[i] = ((fptag & 3) == 3);
4184
        fptag >>= 2;
4185
    }
4186
}
4187

    
4188
void helper_fsave(target_ulong ptr, int data32)
4189
{
4190
    CPU86_LDouble tmp;
4191
    int i;
4192

    
4193
    helper_fstenv(ptr, data32);
4194

    
4195
    ptr += (14 << data32);
4196
    for(i = 0;i < 8; i++) {
4197
        tmp = ST(i);
4198
        helper_fstt(tmp, ptr);
4199
        ptr += 10;
4200
    }
4201

    
4202
    /* fninit */
4203
    env->fpus = 0;
4204
    env->fpstt = 0;
4205
    env->fpuc = 0x37f;
4206
    env->fptags[0] = 1;
4207
    env->fptags[1] = 1;
4208
    env->fptags[2] = 1;
4209
    env->fptags[3] = 1;
4210
    env->fptags[4] = 1;
4211
    env->fptags[5] = 1;
4212
    env->fptags[6] = 1;
4213
    env->fptags[7] = 1;
4214
}
4215

    
4216
void helper_frstor(target_ulong ptr, int data32)
4217
{
4218
    CPU86_LDouble tmp;
4219
    int i;
4220

    
4221
    helper_fldenv(ptr, data32);
4222
    ptr += (14 << data32);
4223

    
4224
    for(i = 0;i < 8; i++) {
4225
        tmp = helper_fldt(ptr);
4226
        ST(i) = tmp;
4227
        ptr += 10;
4228
    }
4229
}
4230

    
4231
void helper_fxsave(target_ulong ptr, int data64)
4232
{
4233
    int fpus, fptag, i, nb_xmm_regs;
4234
    CPU86_LDouble tmp;
4235
    target_ulong addr;
4236

    
4237
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4238
    fptag = 0;
4239
    for(i = 0; i < 8; i++) {
4240
        fptag |= (env->fptags[i] << i);
4241
    }
4242
    stw(ptr, env->fpuc);
4243
    stw(ptr + 2, fpus);
4244
    stw(ptr + 4, fptag ^ 0xff);
4245
#ifdef TARGET_X86_64
4246
    if (data64) {
4247
        stq(ptr + 0x08, 0); /* rip */
4248
        stq(ptr + 0x10, 0); /* rdp */
4249
    } else 
4250
#endif
4251
    {
4252
        stl(ptr + 0x08, 0); /* eip */
4253
        stl(ptr + 0x0c, 0); /* sel  */
4254
        stl(ptr + 0x10, 0); /* dp */
4255
        stl(ptr + 0x14, 0); /* sel  */
4256
    }
4257

    
4258
    addr = ptr + 0x20;
4259
    for(i = 0;i < 8; i++) {
4260
        tmp = ST(i);
4261
        helper_fstt(tmp, addr);
4262
        addr += 16;
4263
    }
4264

    
4265
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4266
        /* XXX: finish it */
4267
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4268
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4269
        if (env->hflags & HF_CS64_MASK)
4270
            nb_xmm_regs = 16;
4271
        else
4272
            nb_xmm_regs = 8;
4273
        addr = ptr + 0xa0;
4274
        for(i = 0; i < nb_xmm_regs; i++) {
4275
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4276
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4277
            addr += 16;
4278
        }
4279
    }
4280
}
4281

    
4282
void helper_fxrstor(target_ulong ptr, int data64)
4283
{
4284
    int i, fpus, fptag, nb_xmm_regs;
4285
    CPU86_LDouble tmp;
4286
    target_ulong addr;
4287

    
4288
    env->fpuc = lduw(ptr);
4289
    fpus = lduw(ptr + 2);
4290
    fptag = lduw(ptr + 4);
4291
    env->fpstt = (fpus >> 11) & 7;
4292
    env->fpus = fpus & ~0x3800;
4293
    fptag ^= 0xff;
4294
    for(i = 0;i < 8; i++) {
4295
        env->fptags[i] = ((fptag >> i) & 1);
4296
    }
4297

    
4298
    addr = ptr + 0x20;
4299
    for(i = 0;i < 8; i++) {
4300
        tmp = helper_fldt(addr);
4301
        ST(i) = tmp;
4302
        addr += 16;
4303
    }
4304

    
4305
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4306
        /* XXX: finish it */
4307
        env->mxcsr = ldl(ptr + 0x18);
4308
        //ldl(ptr + 0x1c);
4309
        if (env->hflags & HF_CS64_MASK)
4310
            nb_xmm_regs = 16;
4311
        else
4312
            nb_xmm_regs = 8;
4313
        addr = ptr + 0xa0;
4314
        for(i = 0; i < nb_xmm_regs; i++) {
4315
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4316
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4317
            addr += 16;
4318
        }
4319
    }
4320
}
4321

    
4322
#ifndef USE_X86LDOUBLE
4323

    
4324
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4325
{
4326
    CPU86_LDoubleU temp;
4327
    int e;
4328

    
4329
    temp.d = f;
4330
    /* mantissa */
4331
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4332
    /* exponent + sign */
4333
    e = EXPD(temp) - EXPBIAS + 16383;
4334
    e |= SIGND(temp) >> 16;
4335
    *pexp = e;
4336
}
4337

    
4338
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4339
{
4340
    CPU86_LDoubleU temp;
4341
    int e;
4342
    uint64_t ll;
4343

    
4344
    /* XXX: handle overflow ? */
4345
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4346
    e |= (upper >> 4) & 0x800; /* sign */
4347
    ll = (mant >> 11) & ((1LL << 52) - 1);
4348
#ifdef __arm__
4349
    temp.l.upper = (e << 20) | (ll >> 32);
4350
    temp.l.lower = ll;
4351
#else
4352
    temp.ll = ll | ((uint64_t)e << 52);
4353
#endif
4354
    return temp.d;
4355
}
4356

    
4357
#else
4358

    
4359
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4360
{
4361
    CPU86_LDoubleU temp;
4362

    
4363
    temp.d = f;
4364
    *pmant = temp.l.lower;
4365
    *pexp = temp.l.upper;
4366
}
4367

    
4368
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4369
{
4370
    CPU86_LDoubleU temp;
4371

    
4372
    temp.l.upper = upper;
4373
    temp.l.lower = mant;
4374
    return temp.d;
4375
}
4376
#endif
4377

    
4378
#ifdef TARGET_X86_64
4379

    
4380
//#define DEBUG_MULDIV
4381

    
4382
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4383
{
4384
    *plow += a;
4385
    /* carry test */
4386
    if (*plow < a)
4387
        (*phigh)++;
4388
    *phigh += b;
4389
}
4390

    
4391
static void neg128(uint64_t *plow, uint64_t *phigh)
4392
{
4393
    *plow = ~ *plow;
4394
    *phigh = ~ *phigh;
4395
    add128(plow, phigh, 1, 0);
4396
}
4397

    
4398
/* return TRUE if overflow */
4399
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4400
{
4401
    uint64_t q, r, a1, a0;
4402
    int i, qb, ab;
4403

    
4404
    a0 = *plow;
4405
    a1 = *phigh;
4406
    if (a1 == 0) {
4407
        q = a0 / b;
4408
        r = a0 % b;
4409
        *plow = q;
4410
        *phigh = r;
4411
    } else {
4412
        if (a1 >= b)
4413
            return 1;
4414
        /* XXX: use a better algorithm */
4415
        for(i = 0; i < 64; i++) {
4416
            ab = a1 >> 63;
4417
            a1 = (a1 << 1) | (a0 >> 63);
4418
            if (ab || a1 >= b) {
4419
                a1 -= b;
4420
                qb = 1;
4421
            } else {
4422
                qb = 0;
4423
            }
4424
            a0 = (a0 << 1) | qb;
4425
        }
4426
#if defined(DEBUG_MULDIV)
4427
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4428
               *phigh, *plow, b, a0, a1);
4429
#endif
4430
        *plow = a0;
4431
        *phigh = a1;
4432
    }
4433
    return 0;
4434
}
4435

    
4436
/* return TRUE if overflow */
4437
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4438
{
4439
    int sa, sb;
4440
    sa = ((int64_t)*phigh < 0);
4441
    if (sa)
4442
        neg128(plow, phigh);
4443
    sb = (b < 0);
4444
    if (sb)
4445
        b = -b;
4446
    if (div64(plow, phigh, b) != 0)
4447
        return 1;
4448
    if (sa ^ sb) {
4449
        if (*plow > (1ULL << 63))
4450
            return 1;
4451
        *plow = - *plow;
4452
    } else {
4453
        if (*plow >= (1ULL << 63))
4454
            return 1;
4455
    }
4456
    if (sa)
4457
        *phigh = - *phigh;
4458
    return 0;
4459
}
4460

    
4461
void helper_mulq_EAX_T0(target_ulong t0)
4462
{
4463
    uint64_t r0, r1;
4464

    
4465
    mulu64(&r0, &r1, EAX, t0);
4466
    EAX = r0;
4467
    EDX = r1;
4468
    CC_DST = r0;
4469
    CC_SRC = r1;
4470
}
4471

    
4472
void helper_imulq_EAX_T0(target_ulong t0)
4473
{
4474
    uint64_t r0, r1;
4475

    
4476
    muls64(&r0, &r1, EAX, t0);
4477
    EAX = r0;
4478
    EDX = r1;
4479
    CC_DST = r0;
4480
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4481
}
4482

    
4483
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4484
{
4485
    uint64_t r0, r1;
4486

    
4487
    muls64(&r0, &r1, t0, t1);
4488
    CC_DST = r0;
4489
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4490
    return r0;
4491
}
4492

    
4493
void helper_divq_EAX(target_ulong t0)
4494
{
4495
    uint64_t r0, r1;
4496
    if (t0 == 0) {
4497
        raise_exception(EXCP00_DIVZ);
4498
    }
4499
    r0 = EAX;
4500
    r1 = EDX;
4501
    if (div64(&r0, &r1, t0))
4502
        raise_exception(EXCP00_DIVZ);
4503
    EAX = r0;
4504
    EDX = r1;
4505
}
4506

    
4507
void helper_idivq_EAX(target_ulong t0)
4508
{
4509
    uint64_t r0, r1;
4510
    if (t0 == 0) {
4511
        raise_exception(EXCP00_DIVZ);
4512
    }
4513
    r0 = EAX;
4514
    r1 = EDX;
4515
    if (idiv64(&r0, &r1, t0))
4516
        raise_exception(EXCP00_DIVZ);
4517
    EAX = r0;
4518
    EDX = r1;
4519
}
4520
#endif
4521

    
4522
static void do_hlt(void)
4523
{
4524
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4525
    env->halted = 1;
4526
    env->exception_index = EXCP_HLT;
4527
    cpu_loop_exit();
4528
}
4529

    
4530
void helper_hlt(int next_eip_addend)
4531
{
4532
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4533
    EIP += next_eip_addend;
4534
    
4535
    do_hlt();
4536
}
4537

    
4538
void helper_monitor(target_ulong ptr)
4539
{
4540
    if ((uint32_t)ECX != 0)
4541
        raise_exception(EXCP0D_GPF);
4542
    /* XXX: store address ? */
4543
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4544
}
4545

    
4546
void helper_mwait(int next_eip_addend)
4547
{
4548
    if ((uint32_t)ECX != 0)
4549
        raise_exception(EXCP0D_GPF);
4550
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4551
    EIP += next_eip_addend;
4552

    
4553
    /* XXX: not complete but not completely erroneous */
4554
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4555
        /* more than one CPU: do not sleep because another CPU may
4556
           wake this one */
4557
    } else {
4558
        do_hlt();
4559
    }
4560
}
4561

    
4562
void helper_debug(void)
4563
{
4564
    env->exception_index = EXCP_DEBUG;
4565
    cpu_loop_exit();
4566
}
4567

    
4568
void helper_raise_interrupt(int intno, int next_eip_addend)
4569
{
4570
    raise_interrupt(intno, 1, 0, next_eip_addend);
4571
}
4572

    
4573
void helper_raise_exception(int exception_index)
4574
{
4575
    raise_exception(exception_index);
4576
}
4577

    
4578
void helper_cli(void)
4579
{
4580
    env->eflags &= ~IF_MASK;
4581
}
4582

    
4583
void helper_sti(void)
4584
{
4585
    env->eflags |= IF_MASK;
4586
}
4587

    
4588
#if 0
4589
/* vm86plus instructions */
4590
void helper_cli_vm(void)
4591
{
4592
    env->eflags &= ~VIF_MASK;
4593
}
4594

4595
void helper_sti_vm(void)
4596
{
4597
    env->eflags |= VIF_MASK;
4598
    if (env->eflags & VIP_MASK) {
4599
        raise_exception(EXCP0D_GPF);
4600
    }
4601
}
4602
#endif
4603

    
4604
void helper_set_inhibit_irq(void)
4605
{
4606
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4607
}
4608

    
4609
void helper_reset_inhibit_irq(void)
4610
{
4611
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4612
}
4613

    
4614
void helper_boundw(target_ulong a0, int v)
4615
{
4616
    int low, high;
4617
    low = ldsw(a0);
4618
    high = ldsw(a0 + 2);
4619
    v = (int16_t)v;
4620
    if (v < low || v > high) {
4621
        raise_exception(EXCP05_BOUND);
4622
    }
4623
}
4624

    
4625
void helper_boundl(target_ulong a0, int v)
4626
{
4627
    int low, high;
4628
    low = ldl(a0);
4629
    high = ldl(a0 + 4);
4630
    if (v < low || v > high) {
4631
        raise_exception(EXCP05_BOUND);
4632
    }
4633
}
4634

    
4635
static float approx_rsqrt(float a)
4636
{
4637
    return 1.0 / sqrt(a);
4638
}
4639

    
4640
static float approx_rcp(float a)
4641
{
4642
    return 1.0 / a;
4643
}
4644

    
4645
#if !defined(CONFIG_USER_ONLY)
4646

    
4647
#define MMUSUFFIX _mmu
4648

    
4649
#define SHIFT 0
4650
#include "softmmu_template.h"
4651

    
4652
#define SHIFT 1
4653
#include "softmmu_template.h"
4654

    
4655
#define SHIFT 2
4656
#include "softmmu_template.h"
4657

    
4658
#define SHIFT 3
4659
#include "softmmu_template.h"
4660

    
4661
#endif
4662

    
4663
/* try to fill the TLB and return an exception if error. If retaddr is
4664
   NULL, it means that the function was called in C code (i.e. not
4665
   from generated code or from helper.c) */
4666
/* XXX: fix it to restore all registers */
4667
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4668
{
4669
    TranslationBlock *tb;
4670
    int ret;
4671
    unsigned long pc;
4672
    CPUX86State *saved_env;
4673

    
4674
    /* XXX: hack to restore env in all cases, even if not called from
4675
       generated code */
4676
    saved_env = env;
4677
    env = cpu_single_env;
4678

    
4679
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4680
    if (ret) {
4681
        if (retaddr) {
4682
            /* now we have a real cpu fault */
4683
            pc = (unsigned long)retaddr;
4684
            tb = tb_find_pc(pc);
4685
            if (tb) {
4686
                /* the PC is inside the translated code. It means that we have
4687
                   a virtual CPU fault */
4688
                cpu_restore_state(tb, env, pc, NULL);
4689
            }
4690
        }
4691
        raise_exception_err(env->exception_index, env->error_code);
4692
    }
4693
    env = saved_env;
4694
}
4695

    
4696

    
4697
/* Secure Virtual Machine helpers */
4698

    
4699
#if defined(CONFIG_USER_ONLY)
4700

    
4701
void helper_vmrun(int aflag, int next_eip_addend)
4702
{ 
4703
}
4704
void helper_vmmcall(void) 
4705
{ 
4706
}
4707
void helper_vmload(int aflag)
4708
{ 
4709
}
4710
void helper_vmsave(int aflag)
4711
{ 
4712
}
4713
void helper_stgi(void)
4714
{
4715
}
4716
void helper_clgi(void)
4717
{
4718
}
4719
void helper_skinit(void) 
4720
{ 
4721
}
4722
void helper_invlpga(int aflag)
4723
{ 
4724
}
4725
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4726
{ 
4727
}
4728
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4729
{
4730
}
4731

    
4732
void helper_svm_check_io(uint32_t port, uint32_t param, 
4733
                         uint32_t next_eip_addend)
4734
{
4735
}
4736
#else
4737

    
4738
static inline void svm_save_seg(target_phys_addr_t addr,
4739
                                const SegmentCache *sc)
4740
{
4741
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4742
             sc->selector);
4743
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4744
             sc->base);
4745
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4746
             sc->limit);
4747
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4748
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4749
}
4750
                                
4751
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4752
{
4753
    unsigned int flags;
4754

    
4755
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4756
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4757
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4758
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4759
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4760
}
4761

    
4762
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4763
                                      CPUState *env, int seg_reg)
4764
{
4765
    SegmentCache sc1, *sc = &sc1;
4766
    svm_load_seg(addr, sc);
4767
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4768
                           sc->base, sc->limit, sc->flags);
4769
}
4770

    
4771
void helper_vmrun(int aflag, int next_eip_addend)
4772
{
4773
    target_ulong addr;
4774
    uint32_t event_inj;
4775
    uint32_t int_ctl;
4776

    
4777
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4778

    
4779
    if (aflag == 2)
4780
        addr = EAX;
4781
    else
4782
        addr = (uint32_t)EAX;
4783

    
4784
    if (loglevel & CPU_LOG_TB_IN_ASM)
4785
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4786

    
4787
    env->vm_vmcb = addr;
4788

    
4789
    /* save the current CPU state in the hsave page */
4790
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4791
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4792

    
4793
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4794
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4795

    
4796
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4797
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4798
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4799
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4800
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4801
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4802

    
4803
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4804
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4805

    
4806
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4807
                  &env->segs[R_ES]);
4808
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4809
                 &env->segs[R_CS]);
4810
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4811
                 &env->segs[R_SS]);
4812
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4813
                 &env->segs[R_DS]);
4814

    
4815
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4816
             EIP + next_eip_addend);
4817
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4818
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4819

    
4820
    /* load the interception bitmaps so we do not need to access the
4821
       vmcb in svm mode */
4822
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4823
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4824
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4825
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4826
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4827
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4828

    
4829
    /* enable intercepts */
4830
    env->hflags |= HF_SVMI_MASK;
4831

    
4832
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4833

    
4834
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4835
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4836

    
4837
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4838
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4839

    
4840
    /* clear exit_info_2 so we behave like the real hardware */
4841
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4842

    
4843
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4844
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4845
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4846
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4847
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4848
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4849
    if (int_ctl & V_INTR_MASKING_MASK) {
4850
        env->v_tpr = int_ctl & V_TPR_MASK;
4851
        env->hflags2 |= HF2_VINTR_MASK;
4852
        if (env->eflags & IF_MASK)
4853
            env->hflags2 |= HF2_HIF_MASK;
4854
    }
4855

    
4856
    cpu_load_efer(env, 
4857
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4858
    env->eflags = 0;
4859
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4860
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4861
    CC_OP = CC_OP_EFLAGS;
4862

    
4863
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4864
                       env, R_ES);
4865
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4866
                       env, R_CS);
4867
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4868
                       env, R_SS);
4869
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4870
                       env, R_DS);
4871

    
4872
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4873
    env->eip = EIP;
4874
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4875
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4876
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4877
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4878
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4879

    
4880
    /* FIXME: guest state consistency checks */
4881

    
4882
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4883
        case TLB_CONTROL_DO_NOTHING:
4884
            break;
4885
        case TLB_CONTROL_FLUSH_ALL_ASID:
4886
            /* FIXME: this is not 100% correct but should work for now */
4887
            tlb_flush(env, 1);
4888
        break;
4889
    }
4890

    
4891
    env->hflags2 |= HF2_GIF_MASK;
4892

    
4893
    if (int_ctl & V_IRQ_MASK) {
4894
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4895
    }
4896

    
4897
    /* maybe we need to inject an event */
4898
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4899
    if (event_inj & SVM_EVTINJ_VALID) {
4900
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4901
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4902
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4903
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4904

    
4905
        if (loglevel & CPU_LOG_TB_IN_ASM)
4906
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4907
        /* FIXME: need to implement valid_err */
4908
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4909
        case SVM_EVTINJ_TYPE_INTR:
4910
                env->exception_index = vector;
4911
                env->error_code = event_inj_err;
4912
                env->exception_is_int = 0;
4913
                env->exception_next_eip = -1;
4914
                if (loglevel & CPU_LOG_TB_IN_ASM)
4915
                    fprintf(logfile, "INTR");
4916
                /* XXX: is it always correct ? */
4917
                do_interrupt(vector, 0, 0, 0, 1);
4918
                break;
4919
        case SVM_EVTINJ_TYPE_NMI:
4920
                env->exception_index = EXCP02_NMI;
4921
                env->error_code = event_inj_err;
4922
                env->exception_is_int = 0;
4923
                env->exception_next_eip = EIP;
4924
                if (loglevel & CPU_LOG_TB_IN_ASM)
4925
                    fprintf(logfile, "NMI");
4926
                cpu_loop_exit();
4927
                break;
4928
        case SVM_EVTINJ_TYPE_EXEPT:
4929
                env->exception_index = vector;
4930
                env->error_code = event_inj_err;
4931
                env->exception_is_int = 0;
4932
                env->exception_next_eip = -1;
4933
                if (loglevel & CPU_LOG_TB_IN_ASM)
4934
                    fprintf(logfile, "EXEPT");
4935
                cpu_loop_exit();
4936
                break;
4937
        case SVM_EVTINJ_TYPE_SOFT:
4938
                env->exception_index = vector;
4939
                env->error_code = event_inj_err;
4940
                env->exception_is_int = 1;
4941
                env->exception_next_eip = EIP;
4942
                if (loglevel & CPU_LOG_TB_IN_ASM)
4943
                    fprintf(logfile, "SOFT");
4944
                cpu_loop_exit();
4945
                break;
4946
        }
4947
        if (loglevel & CPU_LOG_TB_IN_ASM)
4948
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4949
    }
4950
}
4951

    
4952
void helper_vmmcall(void)
4953
{
4954
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4955
    raise_exception(EXCP06_ILLOP);
4956
}
4957

    
4958
void helper_vmload(int aflag)
4959
{
4960
    target_ulong addr;
4961
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4962

    
4963
    if (aflag == 2)
4964
        addr = EAX;
4965
    else
4966
        addr = (uint32_t)EAX;
4967

    
4968
    if (loglevel & CPU_LOG_TB_IN_ASM)
4969
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4970
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4971
                env->segs[R_FS].base);
4972

    
4973
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4974
                       env, R_FS);
4975
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4976
                       env, R_GS);
4977
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4978
                 &env->tr);
4979
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4980
                 &env->ldt);
4981

    
4982
#ifdef TARGET_X86_64
4983
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4984
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4985
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4986
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4987
#endif
4988
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4989
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4990
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4991
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4992
}
4993

    
4994
void helper_vmsave(int aflag)
4995
{
4996
    target_ulong addr;
4997
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4998

    
4999
    if (aflag == 2)
5000
        addr = EAX;
5001
    else
5002
        addr = (uint32_t)EAX;
5003

    
5004
    if (loglevel & CPU_LOG_TB_IN_ASM)
5005
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5006
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5007
                env->segs[R_FS].base);
5008

    
5009
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5010
                 &env->segs[R_FS]);
5011
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5012
                 &env->segs[R_GS]);
5013
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5014
                 &env->tr);
5015
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5016
                 &env->ldt);
5017

    
5018
#ifdef TARGET_X86_64
5019
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5020
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5021
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5022
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5023
#endif
5024
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5025
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5026
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5027
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5028
}
5029

    
5030
void helper_stgi(void)
5031
{
5032
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5033
    env->hflags2 |= HF2_GIF_MASK;
5034
}
5035

    
5036
void helper_clgi(void)
5037
{
5038
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5039
    env->hflags2 &= ~HF2_GIF_MASK;
5040
}
5041

    
5042
void helper_skinit(void)
5043
{
5044
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5045
    /* XXX: not implemented */
5046
    raise_exception(EXCP06_ILLOP);
5047
}
5048

    
5049
void helper_invlpga(int aflag)
5050
{
5051
    target_ulong addr;
5052
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5053
    
5054
    if (aflag == 2)
5055
        addr = EAX;
5056
    else
5057
        addr = (uint32_t)EAX;
5058

    
5059
    /* XXX: could use the ASID to see if it is needed to do the
5060
       flush */
5061
    tlb_flush_page(env, addr);
5062
}
5063

    
5064
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5065
{
5066
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5067
        return;
5068
    switch(type) {
5069
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5070
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5071
            helper_vmexit(type, param);
5072
        }
5073
        break;
5074
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5075
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5076
            helper_vmexit(type, param);
5077
        }
5078
        break;
5079
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5080
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5081
            helper_vmexit(type, param);
5082
        }
5083
        break;
5084
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5085
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5086
            helper_vmexit(type, param);
5087
        }
5088
        break;
5089
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5090
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5091
            helper_vmexit(type, param);
5092
        }
5093
        break;
5094
    case SVM_EXIT_MSR:
5095
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5096
            /* FIXME: this should be read in at vmrun (faster this way?) */
5097
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5098
            uint32_t t0, t1;
5099
            switch((uint32_t)ECX) {
5100
            case 0 ... 0x1fff:
5101
                t0 = (ECX * 2) % 8;
5102
                t1 = ECX / 8;
5103
                break;
5104
            case 0xc0000000 ... 0xc0001fff:
5105
                t0 = (8192 + ECX - 0xc0000000) * 2;
5106
                t1 = (t0 / 8);
5107
                t0 %= 8;
5108
                break;
5109
            case 0xc0010000 ... 0xc0011fff:
5110
                t0 = (16384 + ECX - 0xc0010000) * 2;
5111
                t1 = (t0 / 8);
5112
                t0 %= 8;
5113
                break;
5114
            default:
5115
                helper_vmexit(type, param);
5116
                t0 = 0;
5117
                t1 = 0;
5118
                break;
5119
            }
5120
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5121
                helper_vmexit(type, param);
5122
        }
5123
        break;
5124
    default:
5125
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5126
            helper_vmexit(type, param);
5127
        }
5128
        break;
5129
    }
5130
}
5131

    
5132
void helper_svm_check_io(uint32_t port, uint32_t param, 
5133
                         uint32_t next_eip_addend)
5134
{
5135
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5136
        /* FIXME: this should be read in at vmrun (faster this way?) */
5137
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5138
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5139
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5140
            /* next EIP */
5141
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5142
                     env->eip + next_eip_addend);
5143
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5144
        }
5145
    }
5146
}
5147

    
5148
/* Note: currently only 32 bits of exit_code are used */
5149
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5150
{
5151
    uint32_t int_ctl;
5152

    
5153
    if (loglevel & CPU_LOG_TB_IN_ASM)
5154
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5155
                exit_code, exit_info_1,
5156
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5157
                EIP);
5158

    
5159
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5160
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5161
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5162
    } else {
5163
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5164
    }
5165

    
5166
    /* Save the VM state in the vmcb */
5167
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5168
                 &env->segs[R_ES]);
5169
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5170
                 &env->segs[R_CS]);
5171
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5172
                 &env->segs[R_SS]);
5173
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5174
                 &env->segs[R_DS]);
5175

    
5176
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5177
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5178

    
5179
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5180
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5181

    
5182
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5183
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5184
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5185
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5186
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5187

    
5188
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5189
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5190
    int_ctl |= env->v_tpr & V_TPR_MASK;
5191
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5192
        int_ctl |= V_IRQ_MASK;
5193
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5194

    
5195
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5196
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5197
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5198
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5199
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5200
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5201
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5202

    
5203
    /* Reload the host state from vm_hsave */
5204
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5205
    env->hflags &= ~HF_SVMI_MASK;
5206
    env->intercept = 0;
5207
    env->intercept_exceptions = 0;
5208
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5209
    env->tsc_offset = 0;
5210

    
5211
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5212
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5213

    
5214
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5215
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5216

    
5217
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5218
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5219
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5220
    /* we need to set the efer after the crs so the hidden flags get
5221
       set properly */
5222
    cpu_load_efer(env, 
5223
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5224
    env->eflags = 0;
5225
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5226
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5227
    CC_OP = CC_OP_EFLAGS;
5228

    
5229
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5230
                       env, R_ES);
5231
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5232
                       env, R_CS);
5233
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5234
                       env, R_SS);
5235
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5236
                       env, R_DS);
5237

    
5238
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5239
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5240
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5241

    
5242
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5243
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5244

    
5245
    /* other setups */
5246
    cpu_x86_set_cpl(env, 0);
5247
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5248
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5249

    
5250
    env->hflags2 &= ~HF2_GIF_MASK;
5251
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5252

    
5253
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5254

    
5255
    /* Clears the TSC_OFFSET inside the processor. */
5256

    
5257
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5258
       from the page table indicated the host's CR3. If the PDPEs contain
5259
       illegal state, the processor causes a shutdown. */
5260

    
5261
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5262
    env->cr[0] |= CR0_PE_MASK;
5263
    env->eflags &= ~VM_MASK;
5264

    
5265
    /* Disables all breakpoints in the host DR7 register. */
5266

    
5267
    /* Checks the reloaded host state for consistency. */
5268

    
5269
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5270
       host's code segment or non-canonical (in the case of long mode), a
5271
       #GP fault is delivered inside the host.) */
5272

    
5273
    /* remove any pending exception */
5274
    env->exception_index = -1;
5275
    env->error_code = 0;
5276
    env->old_exception = -1;
5277

    
5278
    cpu_loop_exit();
5279
}
5280

    
5281
#endif
5282

    
5283
/* MMX/SSE */
5284
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5285
void helper_enter_mmx(void)
5286
{
5287
    env->fpstt = 0;
5288
    *(uint32_t *)(env->fptags) = 0;
5289
    *(uint32_t *)(env->fptags + 4) = 0;
5290
}
5291

    
5292
void helper_emms(void)
5293
{
5294
    /* set to empty state */
5295
    *(uint32_t *)(env->fptags) = 0x01010101;
5296
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5297
}
5298

    
5299
/* XXX: suppress */
5300
void helper_movq(void *d, void *s)
5301
{
5302
    *(uint64_t *)d = *(uint64_t *)s;
5303
}
5304

    
5305
#define SHIFT 0
5306
#include "ops_sse.h"
5307

    
5308
#define SHIFT 1
5309
#include "ops_sse.h"
5310

    
5311
#define SHIFT 0
5312
#include "helper_template.h"
5313
#undef SHIFT
5314

    
5315
#define SHIFT 1
5316
#include "helper_template.h"
5317
#undef SHIFT
5318

    
5319
#define SHIFT 2
5320
#include "helper_template.h"
5321
#undef SHIFT
5322

    
5323
#ifdef TARGET_X86_64
5324

    
5325
#define SHIFT 3
5326
#include "helper_template.h"
5327
#undef SHIFT
5328

    
5329
#endif
5330

    
5331
/* bit operations */
5332
target_ulong helper_bsf(target_ulong t0)
5333
{
5334
    int count;
5335
    target_ulong res;
5336

    
5337
    res = t0;
5338
    count = 0;
5339
    while ((res & 1) == 0) {
5340
        count++;
5341
        res >>= 1;
5342
    }
5343
    return count;
5344
}
5345

    
5346
target_ulong helper_bsr(target_ulong t0)
5347
{
5348
    int count;
5349
    target_ulong res, mask;
5350
    
5351
    res = t0;
5352
    count = TARGET_LONG_BITS - 1;
5353
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5354
    while ((res & mask) == 0) {
5355
        count--;
5356
        res <<= 1;
5357
    }
5358
    return count;
5359
}
5360

    
5361

    
5362
static int compute_all_eflags(void)
5363
{
5364
    return CC_SRC;
5365
}
5366

    
5367
static int compute_c_eflags(void)
5368
{
5369
    return CC_SRC & CC_C;
5370
}
5371

    
5372
uint32_t helper_cc_compute_all(int op)
5373
{
5374
    switch (op) {
5375
    default: /* should never happen */ return 0;
5376

    
5377
    case CC_OP_EFLAGS: return compute_all_eflags();
5378

    
5379
    case CC_OP_MULB: return compute_all_mulb();
5380
    case CC_OP_MULW: return compute_all_mulw();
5381
    case CC_OP_MULL: return compute_all_mull();
5382

    
5383
    case CC_OP_ADDB: return compute_all_addb();
5384
    case CC_OP_ADDW: return compute_all_addw();
5385
    case CC_OP_ADDL: return compute_all_addl();
5386

    
5387
    case CC_OP_ADCB: return compute_all_adcb();
5388
    case CC_OP_ADCW: return compute_all_adcw();
5389
    case CC_OP_ADCL: return compute_all_adcl();
5390

    
5391
    case CC_OP_SUBB: return compute_all_subb();
5392
    case CC_OP_SUBW: return compute_all_subw();
5393
    case CC_OP_SUBL: return compute_all_subl();
5394

    
5395
    case CC_OP_SBBB: return compute_all_sbbb();
5396
    case CC_OP_SBBW: return compute_all_sbbw();
5397
    case CC_OP_SBBL: return compute_all_sbbl();
5398

    
5399
    case CC_OP_LOGICB: return compute_all_logicb();
5400
    case CC_OP_LOGICW: return compute_all_logicw();
5401
    case CC_OP_LOGICL: return compute_all_logicl();
5402

    
5403
    case CC_OP_INCB: return compute_all_incb();
5404
    case CC_OP_INCW: return compute_all_incw();
5405
    case CC_OP_INCL: return compute_all_incl();
5406

    
5407
    case CC_OP_DECB: return compute_all_decb();
5408
    case CC_OP_DECW: return compute_all_decw();
5409
    case CC_OP_DECL: return compute_all_decl();
5410

    
5411
    case CC_OP_SHLB: return compute_all_shlb();
5412
    case CC_OP_SHLW: return compute_all_shlw();
5413
    case CC_OP_SHLL: return compute_all_shll();
5414

    
5415
    case CC_OP_SARB: return compute_all_sarb();
5416
    case CC_OP_SARW: return compute_all_sarw();
5417
    case CC_OP_SARL: return compute_all_sarl();
5418

    
5419
#ifdef TARGET_X86_64
5420
    case CC_OP_MULQ: return compute_all_mulq();
5421

    
5422
    case CC_OP_ADDQ: return compute_all_addq();
5423

    
5424
    case CC_OP_ADCQ: return compute_all_adcq();
5425

    
5426
    case CC_OP_SUBQ: return compute_all_subq();
5427

    
5428
    case CC_OP_SBBQ: return compute_all_sbbq();
5429

    
5430
    case CC_OP_LOGICQ: return compute_all_logicq();
5431

    
5432
    case CC_OP_INCQ: return compute_all_incq();
5433

    
5434
    case CC_OP_DECQ: return compute_all_decq();
5435

    
5436
    case CC_OP_SHLQ: return compute_all_shlq();
5437

    
5438
    case CC_OP_SARQ: return compute_all_sarq();
5439
#endif
5440
    }
5441
}
5442

    
5443
uint32_t helper_cc_compute_c(int op)
5444
{
5445
    switch (op) {
5446
    default: /* should never happen */ return 0;
5447

    
5448
    case CC_OP_EFLAGS: return compute_c_eflags();
5449

    
5450
    case CC_OP_MULB: return compute_c_mull();
5451
    case CC_OP_MULW: return compute_c_mull();
5452
    case CC_OP_MULL: return compute_c_mull();
5453

    
5454
    case CC_OP_ADDB: return compute_c_addb();
5455
    case CC_OP_ADDW: return compute_c_addw();
5456
    case CC_OP_ADDL: return compute_c_addl();
5457

    
5458
    case CC_OP_ADCB: return compute_c_adcb();
5459
    case CC_OP_ADCW: return compute_c_adcw();
5460
    case CC_OP_ADCL: return compute_c_adcl();
5461

    
5462
    case CC_OP_SUBB: return compute_c_subb();
5463
    case CC_OP_SUBW: return compute_c_subw();
5464
    case CC_OP_SUBL: return compute_c_subl();
5465

    
5466
    case CC_OP_SBBB: return compute_c_sbbb();
5467
    case CC_OP_SBBW: return compute_c_sbbw();
5468
    case CC_OP_SBBL: return compute_c_sbbl();
5469

    
5470
    case CC_OP_LOGICB: return compute_c_logicb();
5471
    case CC_OP_LOGICW: return compute_c_logicw();
5472
    case CC_OP_LOGICL: return compute_c_logicl();
5473

    
5474
    case CC_OP_INCB: return compute_c_incl();
5475
    case CC_OP_INCW: return compute_c_incl();
5476
    case CC_OP_INCL: return compute_c_incl();
5477

    
5478
    case CC_OP_DECB: return compute_c_incl();
5479
    case CC_OP_DECW: return compute_c_incl();
5480
    case CC_OP_DECL: return compute_c_incl();
5481

    
5482
    case CC_OP_SHLB: return compute_c_shlb();
5483
    case CC_OP_SHLW: return compute_c_shlw();
5484
    case CC_OP_SHLL: return compute_c_shll();
5485

    
5486
    case CC_OP_SARB: return compute_c_sarl();
5487
    case CC_OP_SARW: return compute_c_sarl();
5488
    case CC_OP_SARL: return compute_c_sarl();
5489

    
5490
#ifdef TARGET_X86_64
5491
    case CC_OP_MULQ: return compute_c_mull();
5492

    
5493
    case CC_OP_ADDQ: return compute_c_addq();
5494

    
5495
    case CC_OP_ADCQ: return compute_c_adcq();
5496

    
5497
    case CC_OP_SUBQ: return compute_c_subq();
5498

    
5499
    case CC_OP_SBBQ: return compute_c_sbbq();
5500

    
5501
    case CC_OP_LOGICQ: return compute_c_logicq();
5502

    
5503
    case CC_OP_INCQ: return compute_c_incl();
5504

    
5505
    case CC_OP_DECQ: return compute_c_incl();
5506

    
5507
    case CC_OP_SHLQ: return compute_c_shlq();
5508

    
5509
    case CC_OP_SARQ: return compute_c_sarl();
5510
#endif
5511
    }
5512
}